--- /dev/null
+[GitCommit]
+bears = GitCommitBear
+ignore_length_regex = Signed-off-by,
+ Also-by,
+ Co-authored-by,
+ http://,
+ https://
+
+[JSON]
+bears = JSONFormatBear
+files = **/*.json
+ignore = .*/**
+indent_size = 2
+
+[YAML]
+bears = YAMLLintBear
+files = jjb/**/*.yaml,openstack-hot/**/*.yaml
+document_start = True
+yamllint_config = yamllint.conf
+
+[ShellCheck]
+bears = ShellCheckBear,SpaceConsistencyBear
+files = jenkins-scripts/**/*.sh,
+ jjb/**/*.sh,
+ scripts/**/*.sh
+ignore = jenkins-scripts/*-local-env.sh,
+ jjb/integration/*.sh
+shell = bash
+indent_size = 4
+use_spaces = yeah
* CentOS 7 - devstack - 20170221-1719
* CentOS 7 - devstack - newton - 20170117-0005
* CentOS 7 - devstack-mitaka - 20170130-0523
+* CentOS 7 - devstack-mitaka - 20170314-2255
* CentOS 7 - devstack-newton - 20170130-0426
+* CentOS 7 - devstack-newton - 20170314-2256
* CentOS 7 - docker - 20170117-0003
* CentOS 7 - docker - 20170120-1434
* CentOS 7 - java-builder - 20170117-0004
* CentOS 7 - java-builder - 20170126-0058
+* CentOS 7 - java-builder - 20170309-2355
+* CentOS 7 - java-builder - 20170311-0517
* CentOS 7 - robot - 20170117-0004
* CentOS 7 - robot - 20170210-1803
* Fedora 23 (20151030 cloud)
* Ubuntu 14.04 - mininet-ovs-25 - 20170130-0425
* Ubuntu 14.04 - mininet-ovs-25 - 20170210-0300
* Ubuntu 14.04 LTS Trusty Tahr (cloudimg)
+* Ubuntu 16.04 - gbp - 20170308-0321
+* Ubuntu 16.04 - mininet-ovs-25 - 20170308-0230
* Ubuntu 16.04 LTS (2016-05-03 cloudimg)
</tr>
<tr>
<td colspan="4">
- A CentOS 7 build minion. This system has OpenJDK 1.7 (Java7) and OpenJDK
- 1.8 (Java8) installed on it along with all the other components and
- libraries needed for building any current OpenDaylight project. This is
- the label that is used for all basic verify, merge and daily builds for
+ CentOS 7 build minion configured with OpenJDK 1.7 (Java7) and OpenJDK
+ 1.8 (Java8) along with all the other components and libraries needed
+ for building any current OpenDaylight project. This is the label that
+ is used for all basic verify, merge and daily builds for
projects.
</td>
</tr>
</tr>
<tr>
<td colspan="4">
- A CentOS 7 minion that is configured with OpenJDK 1.7 (Java7), OpenJDK
+ CentOS 7 minion configured with OpenJDK 1.7 (Java7), OpenJDK
1.8 (Java8) and all the current packages used by the integration
project for doing robot driven jobs. If you are executing robot
framework jobs then your job should be using this as the minion that
</tr>
<tr class="warning">
- <td><b>Jenkins Labels</b><br/> ubuntu-trusty-mininet-2c-2g</td>
- <td><b>Minion Template names</b><br/> ubuntu-trusty-mininet-2c-2g</td>
+ <td><b>Jenkins Labels</b><br/> ubuntu1404-mininet-2c-2g</td>
+ <td><b>Minion Template names</b><br/> ubuntu1404-mininet-2c-2g</td>
<td><b>Packer Template</b><br/>
releng/builder/packer/teamplates/mininet.json</td>
<td><b>Spinup Script</b><br/> releng/builder/jenkins-scripts/mininet-ubuntu.sh</td>
</tr>
<tr>
<td colspan="4">
- Basic Ubuntu system with ovs 2.0.2 and mininet 2.1.0
+ Basic Ubuntu 14.04 (Trusty) system with ovs 2.0.2 and mininet 2.1.0
</td>
</tr>
<tr class="warning">
- <td><b>Jenkins Labels</b><br/> ubuntu-trusty-mininet-ovs-23-2c-2g</td>
- <td><b>Minion Template names</b><br/> ubuntu-trusty-mininet-ovs-23-2c-2g</td>
+ <td><b>Jenkins Labels</b><br/> ubuntu1404-mininet-ovs-23-2c-2g</td>
+ <td><b>Minion Template names</b><br/> ubuntu1404-mininet-ovs-23-2c-2g</td>
<td><b>Packer Template</b><br/> releng/builder/packer/templates/mininet-ovs-2.3.json</td>
<td><b>Spinup Script</b><br/> releng/builder/jenkins-scripts/mininet-ubuntu.sh</td>
</tr>
<tr>
<td colspan="4">
- Basic Ubuntu system with ovs 2.3 and mininet 2.2.1
+ Ubuntu 14.04 (Trusty) system with ovs 2.3 and mininet 2.2.1
</td>
</tr>
<tr class="warning">
- <td><b>Jenkins Labels</b><br/> ubuntu-trusty-mininet-ovs-25-2c-2g</td>
- <td><b>Minion Template names</b><br/> ubuntu-trusty-mininet-ovs-25-2c-2g</td>
+ <td><b>Jenkins Labels</b><br/> ubuntu1404-mininet-ovs-25-2c-2g</td>
+ <td><b>Minion Template names</b><br/> ubuntu1404-mininet-ovs-25-2c-2g</td>
<td><b>Packer Template</b><br/> releng/builder/packer/templates/mininet-ovs-2.5.json</td>
<td><b>Spinup Script</b><br/> releng/builder/jenkins-scripts/mininet-ubuntu.sh</td>
</tr>
<tr>
<td colspan="4">
- Basic Ubuntu system with ovs 2.5 and mininet 2.2.2
+ Ubuntu 14.04 (Trusty) system with ovs 2.5 and mininet 2.2.2
+ </td>
+ </tr>
+
+ <tr class="warning">
+ <td><b>Jenkins Labels</b><br/> ubuntu1604-mininet-ovs-25-2c-4g</td>
+ <td><b>Minion Template names</b><br/> ubuntu1604-mininet-ovs-25-2c-4g</td>
+ <td><b>Packer Template</b><br/> releng/builder/packer/templates/mininet-ovs-2.5.json</td>
+ <td><b>Spinup Script</b><br/> releng/builder/jenkins-scripts/mininet-ubuntu.sh</td>
+ </tr>
+ <tr>
+ <td colspan="4">
+ Ubuntu 16.04 (Xenial) system with ovs 2.5 and mininet 2.2.1
</td>
</tr>
</tr>
<tr>
<td colspan="4">
- A CentOS 7 system purpose built for doing OpenStack testing using
+ CentOS 7 system purpose built for doing OpenStack testing using
DevStack. This minion is primarily targeted at the needs of the OVSDB
project. It has OpenJDK 1.7 (aka Java7) and OpenJDK 1.8 (Java8) and
other basic DevStack related bits installed.
</tr>
<tr>
<td colspan="4">
- A CentOS 7 system that is configured with OpenJDK 1.7 (aka Java7),
+ CentOS 7 system configured with OpenJDK 1.7 (aka Java7),
OpenJDK 1.8 (Java8) and Docker. This system was originally custom
built for the test needs of the OVSDB project but other projects have
expressed interest in using it.
</tr>
<tr class="warning">
- <td><b>Jenkins Labels</b><br/> ubuntu-trusty-gbp-2c-2g</td>
- <td><b>Minion Template names</b><br/> ubuntu-trusty-gbp-2c-2g</td>
+ <td><b>Jenkins Labels</b><br/> ubuntu1404-gbp-2c-2g</td>
+ <td><b>Minion Template names</b><br/> ubuntu1404-gbp-2c-2g</td>
<td><b>Packer Template</b><br/> releng/builder/packer/templates/gbp.json</td>
<td><b>Spinup Script</b><br/> releng/builder/jenkins-scripts/ubuntu-docker-ovs.sh</td>
</tr>
<tr>
<td colspan="4">
- A basic Ubuntu node with latest OVS and docker installed. Used by Group Based Policy.
+ Ubuntu 14.04 (Trusty) node with latest OVS and docker installed. Used by Group Based Policy.
</td>
</tr>
+
+ <tr class="warning">
+ <td><b>Jenkins Labels</b><br/> ubuntu1604-gbp-2c-4g</td>
+ <td><b>Minion Template names</b><br/> ubuntu1604-gbp-2c-4g</td>
+ <td><b>Packer Template</b><br/> releng/builder/packer/templates/gbp.json</td>
+ <td><b>Spinup Script</b><br/> releng/builder/jenkins-scripts/ubuntu-docker-ovs.sh</td>
+ </tr>
+ <tr>
+ <td colspan="4">
+ Ubuntu 16.04 (Xenial) node with latest OVS and docker installed. Used by Group Based Policy.
+ </td>
+ </tr>
+
</table>
Pool: ODLPUB - HOT (Heat Orchestration Templates)
useradd -m -s /bin/bash jenkins
# Check if docker group exists
-grep -q docker /etc/group
-if [ "$?" == '0' ]
+if grep -q docker /etc/group
then
- # Add jenkins user to docker group
- usermod -a -G docker jenkins
+ # Add jenkins user to docker group
+ usermod -a -G docker jenkins
fi
# Check if mock group exists
-grep -q mock /etc/group
-if [ "$?" == '0' ]
+if grep -q mock /etc/group
then
- # Add jenkins user to mock group so they can build Int/Pack's RPMs
- usermod -a -G mock jenkins
+ # Add jenkins user to mock group so they can build Int/Pack's RPMs
+ usermod -a -G mock jenkins
fi
mkdir /home/jenkins/.ssh
mkdir /w
-cp -r /home/${OS}/.ssh/authorized_keys /home/jenkins/.ssh/authorized_keys
+cp -r "/home/${OS}/.ssh/authorized_keys" /home/jenkins/.ssh/authorized_keys
# Generate ssh key for use by Robot jobs
echo -e 'y\n' | ssh-keygen -N "" -f /home/jenkins/.ssh/id_rsa -t rsa
chown -R jenkins:jenkins /home/jenkins/.ssh /w
+chmod 700 /home/jenkins/.ssh
#!/bin/bash
-OS=`facter operatingsystem`
+OS=$(facter operatingsystem)
case "$OS" in
Fedora)
systemctl stop firewalld
;;
CentOS|RedHat)
- if [ `facter operatingsystemrelease | cut -d '.' -f1` -lt "7" ]; then
+ if [ "$(facter operatingsystemrelease | cut -d '.' -f1)" -lt "7" ]; then
service iptables stop
else
systemctl stop firewalld
# make sure jenkins is part of the docker only if jenkins has already been
# created
-grep -q jenkins /etc/passwd
-if [ "$?" == '0' ]
+
+if grep -q jenkins /etc/passwd
then
/usr/sbin/usermod -a -G docker jenkins
fi
# http://www.eclipse.org/legal/epl-v10.html
##############################################################################
-cd /builder/jenkins-scripts
-chmod +x *.sh
+cd /builder/jenkins-scripts || exit 1
+chmod +x -- *.sh
./system_type.sh
+# shellcheck disable=SC1091
source /tmp/system_type.sh
./basic_settings.sh
-./${SYSTEM_TYPE}.sh
+"./${SYSTEM_TYPE}.sh"
# Create the jenkins user last so that hopefully we don't have to deal with
# guard files
--- /dev/null
+---
+- project:
+ name: aaa-csit-keystone
+ jobs:
+ - '{project}-csit-1node-{functionality}-{install}-{stream}'
+ - '{project}-csit-verify-1node-{functionality}'
+
+ # The project name
+ project: 'aaa'
+
+ # The functionality under test
+ functionality: 'keystone'
+
+ # Project branches
+ stream:
+ - carbon:
+ branch: 'master'
+ jre: 'openjdk8'
+
+ install:
+ - only:
+ scope: 'only'
+ - all:
+ scope: 'all'
+
+ # Features to install
+ install-features: 'odl-restconf-all'
+
+ tools_system_count: '1'
+ tools_system_flavor: '4 GB General Purpose v1'
+ tools_system_image: 'CentOS 7 - docker - 20170120-1434'
+
+ # Robot custom options
+ robot-options: ''
- inject:
properties-file: variables.prop
-- builder:
- name: autorelease-generate-project-report
- builders:
- - shell: !include-raw: include-raw-generate-project-report.sh
-
- builder:
name: autorelease-fix-relative-paths
builders:
name: autorelease-projects
jobs:
- 'autorelease-release-{stream}'
- - 'autorelease-project-report-{stream}'
stream:
- carbon:
jdk: 'openjdk8'
integration-test: boron
- beryllium:
+ # Only run once a week since Beryllium is in maintenance mode
+ cron: 'H H * * 0'
next-release-tag: Beryllium-SR5
branch: 'stable/beryllium'
jdk: 'openjdk7'
integration-test: beryllium
project: 'releng/autorelease'
- archive-artifacts: '**/*.prop **/*.log **/patches/*.bundle **/patches/*.patch all-bundles.tar.gz'
+ archive-artifacts: >
+ **/*.prop
+ **/*.log
+ patches/**
+ patches.tar.gz
###
# TODO: Remove this job once guava21 testing is complete
project-type: freestyle
node: centos7-autorelease-4c-16g
jdk: '{jdk}'
+ cron: 'H 0 * * *'
properties:
- opendaylight-infra-properties:
build-timeout: '1440'
triggers:
- - timed: 'H 0 * * *'
+ - timed: '{cron}'
builders:
# force jenkins install of maven version before any shell scripts use it
# way for downstream jobs to pull the latest version of this file
# in their builds.
artifacts: 'dependencies.log'
- - email-notification:
- email-recipients: '{email-recipients}'
- email-prefix: '[autorelease]'
- trigger-parameterized-builds:
- project: 'integration-distribution-test-{integration-test}'
condition: UNSTABLE_OR_BETTER
fail-on-missing: true
- opendaylight-infra-shiplogs:
maven-version: 'mvn33'
-
-
-- job-template:
- name: 'autorelease-project-report-{stream}'
-
- project-type: freestyle
- node: centos7-java-builder-2c-8g
-
- properties:
- - opendaylight-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
-
- parameters:
- - opendaylight-infra-parameters:
- project: '{project}'
- branch: '{branch}'
- refspec: 'refs/heads/{branch}'
- artifacts: '{archive-artifacts}'
- - string:
- name: REPORT_DIR
- default: '$WORKSPACE/project-reports'
- description: "The directory containing project reports"
-
- scm:
- - git:
- credentials-id: 'opendaylight-jenkins-ssh'
- url: '$GIT_BASE'
- refspec: '$GERRIT_REFSPEC'
- branches:
- - '$GERRIT_BRANCH'
- choosing-strategy: 'gerrit'
- skip-tag: true
- submodule:
- recursive: true
-
- wrappers:
- - opendaylight-infra-wrappers:
- build-timeout: '30'
-
- triggers:
- - timed: '0 0 * * 0'
-
- builders:
- - shell: 'echo "DATE=`date +%Y-%m-%d`" > $WORKSPACE/variables.prop'
- - inject:
- properties-file: variables.prop
- - autorelease-generate-project-report
- - shell: "./scripts/list-project-dependencies.sh"
- - autorelease-determine-merge-order
- - autorelease-sys-stats
-
- publishers:
- - email-ext:
- attachments: 'project-reports/*.log'
- recipients: 'skitt@redhat.com thanh.ha@linuxfoundation.org'
- reply-to: dev@lists.opendaylight.org
- content-type: default
- subject: '[releng] ODL {stream} project report for ${{ENV, var="DATE"}}'
- body: |
- This is a project report generated on $DATE listing the commit
- history of ODL projects for the past week. See attached
- git-report.log
- Archive also available on Jenkins at $BUILD_URL
- always: true
- - opendaylight-infra-shiplogs:
- maven-version: 'mvn33'
NEXUS_STAGING_URL=${ODLNEXUS_STAGING_URL:-$ODLNEXUSPROXY}
NEXUSURL=${NEXUS_STAGING_URL}/content/repositories/
-VERSION=`grep -m2 '<version>' ${WORKSPACE}/integration/distribution/distribution-karaf/pom.xml | tail -n1 | awk -F'[<|>]' '/version/ { printf $3 }'`
+VERSION=$(grep -m2 '<version>' "${WORKSPACE}/integration/distribution/distribution-karaf/pom.xml" | tail -n1 | awk -F'[<|>]' '/version/ { printf $3 }')
echo "VERSION: ${VERSION}"
-STAGING_REPO_ID=`grep "Created staging repository with ID" $WORKSPACE/deploy-staged-repository.log | cut -d '"' -f2`
-BUNDLEURL=${NEXUSURL}/${STAGING_REPO_ID}/org/opendaylight/integration/distribution-karaf/${VERSION}/distribution-karaf-${VERSION}.zip
-echo STAGING_REPO_ID=$STAGING_REPO_ID >> $WORKSPACE/variables.prop
-echo BUNDLEURL=$BUNDLEURL >> $WORKSPACE/variables.prop
+STAGING_REPO_ID=$(grep "Created staging repository with ID" "$WORKSPACE/deploy-staged-repository.log" | cut -d '"' -f2)
+BUNDLEURL="${NEXUSURL}/${STAGING_REPO_ID}/org/opendaylight/integration/distribution-karaf/${VERSION}/distribution-karaf-${VERSION}.zip"
+echo STAGING_REPO_ID="$STAGING_REPO_ID" >> "$WORKSPACE/variables.prop"
+echo BUNDLEURL="$BUNDLEURL" >> "$WORKSPACE/variables.prop"
echo "BUNDLEURL: ${BUNDLEURL}"
# Copy variables.prop to variables.jenkins-trigger so that the end of build
##############################################################################
# Assuming that mvn deploy created the hide/from/pom/files/stage directory.
-cd hide/from/pom/files
+cd hide/from/pom/files || exit 1
mkdir -p m2repo/org/opendaylight/
# ODLNEXUSPROXY is used to define the location of the Nexus server used by the CI system.
# in cases where an internal ci system is using multiple NEXUS systems one for artifacts and another for staging,
# we can override using ODLNEXUS_STAGING_URL to route the staging build to the 2nd server.
# (most CI setups where a single Nexus server is used, ODLNEXUS_STAGING_URL should be left unset)
-NEXUS_STAGING_URL=${ODLNEXUS_STAGING_URL:-$ODLNEXUSPROXY}
+NEXUS_STAGING_URL=${ODLNEXUS_STAGING_URL:-"http://10.29.8.46:8081"}
NEXUS_STAGING_PROFILE=${ODLNEXUS_STAGING_PROFILE:-425e43800fea70}
NEXUS_STAGING_SERVER_ID=${ODLNEXUS_STAGING_SERVER_ID:-"opendaylight.staging"}
--exclude 'resolver-status.properties' \
"stage/org/opendaylight" m2repo/org/
-"$MVN" -V -B org.sonatype.plugins:nexus-staging-maven-plugin:1.6.2:deploy-staged-repository \
- -DrepositoryDirectory="`pwd`/m2repo" \
- -DnexusUrl=$NEXUS_STAGING_URL \
+"$MVN" -V -B org.sonatype.plugins:nexus-staging-maven-plugin:1.6.8:deploy-staged-repository \
+ -DrepositoryDirectory="$(pwd)/m2repo" \
+ -DnexusUrl="$NEXUS_STAGING_URL" \
-DstagingProfileId="$NEXUS_STAGING_PROFILE" \
-DserverId="$NEXUS_STAGING_SERVER_ID" \
- -s $SETTINGS_FILE \
- -gs $GLOBAL_SETTINGS_FILE | tee $WORKSPACE/deploy-staged-repository.log
+ -s "$SETTINGS_FILE" \
+ -gs "$GLOBAL_SETTINGS_FILE" | tee "$WORKSPACE/deploy-staged-repository.log"
# get console logs
wget -O "$CONSOLE_LOG" "${BUILD_URL}consoleText"
-# extract the failing project or artifactid
+# TODO: This section is still required since some of the projects use
+# description. Remove this section when the reactor info is more consistant.
+# extract failing project from reactor information
REACTOR_INFO=$(awk '/Reactor Summary:/ { flag=1 }
flag {
if ( sub(/^\[(INFO)\]/,"") && sub(/FAILURE \[.*/,"") ) {
# check for project format
if [[ ${REACTOR_INFO} =~ .*::*.*::*. ]]; then
- # extract project and artifactid from full format
+ # extract project and artifactId from full format
ODL=$(echo "${REACTOR_INFO}" | awk -F'::' '{ gsub(/^[ \t]+|[ \t]+$/, "", $1); print $1 }')
- PROJECT=$(echo "${REACTOR_INFO}" | awk -F'::' '{ gsub(/^[ \t]+|[ \t]+$/, "", $2); print $2 }')
- ARTIFACTID=$(echo "${REACTOR_INFO}" | awk -F'::' '{ gsub(/^[ \t]+|[ \t]+$/, "", $3); print $3 }')
+ PROJECT_=$(echo "${REACTOR_INFO}" | awk -F'::' '{ gsub(/^[ \t]+|[ \t]+$/, "", $2); print $2 }')
+ NAME=$(echo "${REACTOR_INFO}" | awk -F'::' '{ gsub(/^[ \t]+|[ \t]+$/, "", $3); print $3 }')
else
# set ARTIFACTID to partial format
ODL=""
- PROJECT=""
- ARTIFACTID=$(echo "${REACTOR_INFO}" | awk '{ gsub(/^[ \t]+|[ \t]+$/, ""); print }')
+ PROJECT_=""
+ NAME=$(echo "${REACTOR_INFO}" | awk '{ gsub(/^[ \t]+|[ \t]+$/, ""); print }')
+fi
+
+
+# determine ARTIFACT_ID for project mailing list
+ARTIFACT_ID=$(awk -F: '/\[ERROR\].*mvn <goals> -rf :/ { print $2}' $CONSOLE_LOG)
+
+# determine project mailing list using xpaths
+# if project.groupId:
+# project.groupId is set and is not inherited
+# else if project.parent.groupId:
+# project.groupId is not set but IS inherited from project.parent.groupId
+# else
+# exclude project mailing list
+grouplist=()
+while IFS="" read -r p; do
+ GROUP=$(xmlstarlet sel\
+ -N "x=http://maven.apache.org/POM/4.0.0"\
+ -t -m "/x:project[x:artifactId='$ARTIFACT_ID']"\
+ --if "/x:project/x:groupId"\
+ -v "/x:project/x:groupId"\
+ --elif "/x:project/x:parent/x:groupId"\
+ -v "/x:project/x:parent/x:groupId"\
+ --else -o ""\
+ "$p" 2>/dev/null)
+ if [ ! -z "${GROUP}" ]; then
+ grouplist+=($(echo "${GROUP}" | awk -F'.' '{ print $3 }'))
+ fi
+done < <(find . -name "pom.xml")
+
+if [ "${#grouplist[@]}" -eq 1 ]; then
+ PROJECT="${grouplist[0]}"
+else
+ GROUPLIST="NOTE: The artifactId: $ARTIFACT_ID matches multiple groups: ${grouplist[*]}"
fi
# Construct email subject & body
PROJECT_STRING=${PROJECT:+" from $PROJECT"}
-SUBJECT="[release] Autorelease $STREAM failed to build $ARTIFACTID$PROJECT_STRING"
-BODY="Attention$PROJECT_STRING,
+SUBJECT="[release] Autorelease $STREAM failed to build $ARTIFACT_ID$PROJECT_STRING"
+BODY="Attention ${PROJECT:-"OpenDaylight"}-devs,
-Autorelease $STREAM failed to build $ARTIFACTID$PROJECT_STRING in build
+Autorelease $STREAM failed to build $ARTIFACT_ID$PROJECT_STRING in build
$BUILD_NUMBER. Attached is a snippet of the error message related to the
failure that we were able to automatically parse as well as console logs.
+${PROJECT:+"$GROUPLIST"}
Console Logs:
https://logs.opendaylight.org/$SILO/$ARCHIVES_DIR
BUILD_STATUS=$(awk '/\[INFO\] Remote staging finished/{flag=1;next}/Total time:/{flag=0}flag' $CONSOLE_LOG \
| grep '\] BUILD' | awk '{print $3}')
-if [ ! -z "${ARTIFACTID}" ] && [[ "${BUILD_STATUS}" != "SUCCESS" ]]; then
+if ([ ! -z "${NAME}" ] || [ ! -z "${ARTIFACT_ID}" ]) && [[ "${BUILD_STATUS}" != "SUCCESS" ]]; then
# project search pattern should handle both scenarios
- # 1. Full format: ODL :: $PROJECT :: $ARTIFACTID
- # 2. Partial format: Building $ARTIFACTID
- sed -e "/\[INFO\] Building \(${ARTIFACTID} \|${ODL} :: ${PROJECT} :: ${ARTIFACTID} \)/,/Reactor Summary:/!d;//d" \
- $CONSOLE_LOG > /tmp/error_msg
+ # 1. Full format: ODL :: $PROJECT :: $ARTIFACT_ID
+ # 2. Partial format: Building $ARTIFACT_ID
+ sed -e "/\[INFO\] Building \(${NAME} \|${ARTIFACT_ID} \|${ODL} :: ${PROJECT_} :: ${NAME} \)/,/Reactor Summary:/!d;//d" \
+ $CONSOLE_LOG > /tmp/error.txt
if [ -n "${PROJECT}" ]; then
- RELEASE_EMAIL="${RELEASE_EMAIL}, ${PROJECT}-dev@opendaylight.org"
+ RELEASE_EMAIL="${RELEASE_EMAIL}, ${PROJECT}-dev@lists.opendaylight.org"
fi
- echo "${BODY}" | mail -a /tmp/error_msg \
- -S "from=Jenkins <jenkins-dontreply@opendaylight.org>" \
- -s "${SUBJECT}" "${RELEASE_EMAIL}"
+ # Only send emails in production (releng), not testing (sandbox)
+ if [ "${SILO}" == "releng" ]; then
+ echo "${BODY}" | mail -a /tmp/error.txt \
+ -r "Jenkins <jenkins-dontreply@opendaylight.org>" \
+ -s "${SUBJECT}" "${RELEASE_EMAIL}"
+ elif [ "${SILO}" == "sandbox" ]; then
+ echo "Running in sandbox, not actually sending notification emails"
+ echo "Subject: ${SUBJECT}"
+ echo "Body: ${BODY}"
+ else
+ echo "Not sure how to notify in \"${SILO}\" Jenkins silo"
+ fi
fi
rm $CONSOLE_LOG
#!/bin/bash
# @License EPL-1.0 <http://spdx.org/licenses/EPL-1.0>
##############################################################################
-# Copyright (c) 2015 The Linux Foundation and others.
+# Copyright (c) 2015, 2017 The Linux Foundation and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# RELEASE_TAG=Beryllium-SR1 # Example
# RELEASE_BRANCH=stable/beryllium # Example
+LFTOOLS_DIR="$WORKSPACE/.venv-lftools"
+if [ ! -d "$LFTOOLS_DIR" ]
+then
+ virtualenv "$LFTOOLS_DIR"
+ # shellcheck disable=SC1090
+ source "$LFTOOLS_DIR/bin/activate"
+ pip install --upgrade pip
+ pip freeze
+ pip install lftools
+fi
+# shellcheck disable=SC1090
+source "$LFTOOLS_DIR/bin/activate"
+
# Directory to put git format-patches
-PATCH_DIR=`pwd`/patches
+PATCH_DIR="$WORKSPACE/patches"
-echo $RELEASE_TAG
-./scripts/version.sh release $RELEASE_TAG
+echo "$RELEASE_TAG"
+lftools version release "$RELEASE_TAG"
git submodule foreach "git commit -am \"Release $RELEASE_TAG\" || true"
git commit -am "Release $RELEASE_TAG"
mkdir patches
-mv taglist.log $PATCH_DIR
-modules=`xmlstarlet sel -N x=http://maven.apache.org/POM/4.0.0 -t -m '//x:modules' -v '//x:module' pom.xml`
+mv taglist.log "$PATCH_DIR"
+modules=$(xmlstarlet sel -N x=http://maven.apache.org/POM/4.0.0 -t -m '//x:modules' -v '//x:module' pom.xml)
for module in $modules; do
- pushd $module
- git format-patch --stdout origin/$RELEASE_BRANCH > $PATCH_DIR/${module//\//-}.patch
- git bundle create $PATCH_DIR/${module//\//-}.bundle "origin/master..HEAD"
+ pushd "$module"
+ git format-patch --stdout "origin/$RELEASE_BRANCH" > "$PATCH_DIR/${module//\//-}.patch"
+ git bundle create "$PATCH_DIR/${module//\//-}.bundle" "origin/master..HEAD"
popd
done
-tar cvzf all-bundles.tar.gz `find $PATCH_DIR -type f -print0 \
- | xargs -0r file \
- | egrep -e ':.*Git bundle.*' \
- | cut -d: -f1`
-rm $PATCH_DIR/*.bundle
-
+tar cvzf patches.tar.gz -C "$WORKSPACE" patches
+rm "$PATCH_DIR"/*.bundle
+++ /dev/null
-#!/bin/bash
-# @License EPL-1.0 <http://spdx.org/licenses/EPL-1.0>
-##############################################################################
-# Copyright (c) 2015 The Linux Foundation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Eclipse Public License v1.0
-# which accompanies this distribution, and is available at
-# http://www.eclipse.org/legal/epl-v10.html
-##############################################################################
-
-GIT_REPORT_FILE=$REPORT_DIR/git-report.log
-
-mkdir $REPORT_DIR
-touch $GIT_REPORT_FILE
-
-projects=`grep path .gitmodules | sed 's/.*= //' | sort`
-for p in $projects; do
- echo "" >> $GIT_REPORT_FILE
- echo "========" >> $GIT_REPORT_FILE
- echo "$p" >> $GIT_REPORT_FILE
- echo "========" >> $GIT_REPORT_FILE
- echo "" >> $GIT_REPORT_FILE
-
- cd $WORKSPACE/$p
- git log --after="1 week ago" | sed 'sX^ Change-Id: X -> https://git.opendaylight.org/gerrit/r/X' >> $GIT_REPORT_FILE
- cd $WORKSPACE
-done
odl-restconf,
odl-bgpcep-pcep,
odl-bgpcep-bgp,
- odl-bgpcep-bmp,odl-jolokia
+ odl-bgpcep-bmp,
+ odl-jolokia
# Robot custom options
robot-options: '-v USE_NETCONF_CONNECTOR:False'
tools_system_count: 0
# Features to install
- install-features: 'odl-restconf,odl-clustering-test-app,odl-jolokia'
+ install-features: >
+ odl-restconf,
+ odl-clustering-test-app,
+ odl-jolokia
# Robot custom options
robot-options: ''
tools_system_count: 0
# Features to install
- install-features: 'odl-restconf,odl-clustering-test-app'
+ install-features: >
+ odl-restconf,
+ odl-clustering-test-app
# Robot custom options
robot-options: ''
--- /dev/null
+---
+- project:
+ name: mdsal-csit-dom-notification-broker-no-loss-longevity
+ jobs:
+ - '{project}-csit-1node-{functionality}-{install}-{stream}'
+
+ # The project name
+ project: 'controller'
+
+ # The functionality under test
+ functionality: 'notifications-longevity'
+
+ # Project branches
+ stream:
+ - carbon:
+ branch: 'master'
+ jre: 'openjdk8'
+
+ install:
+ - only:
+ scope: 'only'
+
+ # Features to install
+ install-features: >
+ odl-restconf,
+ odl-clustering-test-app
+
+ # Built timeout
+ build-timeout: '1500'
scope: 'all'
# Features to install
- install-features: 'odl-restconf,odl-mdsal-benchmark,odl-jolokia'
+ install-features: >
+ odl-restconf,
+ odl-mdsal-benchmark
# Robot custom options
robot-options: '--exclude singlenode_setup'
scope: 'all'
# Features to install
- install-features: 'odl-restconf,odl-mdsal-benchmark,odl-jolokia'
+ install-features: >
+ odl-restconf,
+ odl-mdsal-benchmark,
+ odl-jolokia
# Robot custom options
robot-options: '--exclude clustered_setup'
scope: 'all'
# Features to install
- install-features: 'odl-restconf,odl-clustering-test-app'
+ install-features: >
+ odl-restconf,
+ odl-clustering-test-app
# Robot custom options
robot-options: ''
scope: 'only'
# Features to install
- install-features: 'odl-restconf,odl-clustering-test-app'
+ install-features: >
+ odl-restconf,
+ odl-clustering-test-app
# Robot custom options
robot-options: ''
#!/bin/bash
-if [ $GERRIT_BRANCH == "master" ]; then
+if [ "$GERRIT_BRANCH" == "master" ]; then
RTD_BUILD_VERSION=latest
else
- RTD_BUILD_VERSION=${{GERRIT_BRANCH/\//-}}
+ RTD_BUILD_VERSION="${{GERRIT_BRANCH/\//-}}"
fi
+
+# shellcheck disable=SC1083
curl -X POST --data "version_slug=$RTD_BUILD_VERSION" https://readthedocs.org/build/{rtdproject}
- '{project-name}-integration-{stream}'
- '{project-name}-merge-{stream}'
- '{project-name}-verify-{stream}-{maven}-{jdks}'
+ - '{project-name}-distribution-check-{stream}'
+ - '{project-name}-validate-autorelease-{stream}'
project: 'federation'
project-name: 'federation'
- mitaka:
openstack-branch: 'stable/mitaka'
odl-ml2-branch: 'stable/mitaka'
- odl-ml2-driver-version: 'v1'
- liberty:
openstack-branch: 'stable/liberty'
odl-ml2-branch: 'stable/liberty'
- odl-ml2-driver-version: 'v1'
schedule: 'H H * * *'
- odl-enable-l3: 'yes'
-
public-bridge: 'br-int'
- public-physical-network: 'physnet1'
-
- enable-networking-l2gw: 'no'
-
- disable-odl-l3-service-plugin: 'no'
-
- enable-openstack-services: 'q-svc,q-dhcp,q-meta,n-cauth,tempest'
-
- disable-openstack-services: 'swift,cinder,n-net,q-vpn,n-cpu'
-
- tenant-network-type: 'vxlan'
+ enable-openstack-plugins: 'networking-odl'
security-group-mode: 'none'
-
- robot-options: ''
##############################################################################
# Clear workspace
-rm -rf *
+rm -rf -- "${WORKSPACE:?}"/*
# Create python script to parse json
-cat > ${WORKSPACE}/parse_json.py << EOF
+cat > "${WORKSPACE}/parse_json.py" << EOF
import json
import sys
# Clone all ODL projects
curl -s --header "Accept: application/json" \
https://git.opendaylight.org/gerrit/projects/ | \
- tail -n +2 > ${WORKSPACE}/projects.json
-for p in `cat ${WORKSPACE}/projects.json | python ${WORKSPACE}/parse_json.py`
+ tail -n +2 > "${WORKSPACE}/projects.json"
+for p in $(python "${WORKSPACE}/parse_json.py" < "${WORKSPACE}/projects.json")
do
# Ignore non-projects and archived projects
if [ "$p" == "All-Users" ] || \
then
continue
fi
- mkdir -p `dirname "$p"`
+ mkdir -p "$(dirname "$p")"
git clone "https://git.opendaylight.org/gerrit/$p.git" "$p"
done
# Check pom.xml for <repositories> and <pluginRepositories>
FILE=repos.txt
-find . -name pom.xml | xargs grep -i '<repositories>\|<pluginRepositories>' > $FILE
+find . -name pom.xml -print0 | xargs -0 grep -i '<repositories>\|<pluginRepositories>' > "$FILE"
[[ $(tr -d "\r\n" < $FILE|wc -c) -eq 0 ]] && rm $FILE
if [ -a $FILE ]
# Print out git status at the end of the build before we archive if $WORKSPACE
# is a git repo.
-if [ -d $WORKSPACE/.git ]; then
+if [ -d "$WORKSPACE/.git" ]; then
echo ""
echo "----------> Git Status Report"
git status
echo "Build logs: <a href=\"$LOGS_SERVER/$SILO/$ARCHIVES_DIR\">$LOGS_SERVER/$SILO/$ARCHIVES_DIR</a>"
mkdir .archives
-cd .archives/
+cd .archives/ || exit 1
cat > deploy-archives.xml <<EOF
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
</project>
EOF
-mkdir -p $ARCHIVES_DIR
-mkdir -p $WORKSPACE/archives
-if [ ! -z "${{ARCHIVE_ARTIFACTS}}" ]; then
- pushd $WORKSPACE
+mkdir -p "$ARCHIVES_DIR"
+mkdir -p "$WORKSPACE/archives"
+if [ ! -z "$ARCHIVE_ARTIFACTS" ]; then
+ pushd "$WORKSPACE"
shopt -s globstar # Enable globstar to copy archives
- archive_artifacts=$(echo ${{ARCHIVE_ARTIFACTS}})
- for f in $archive_artifacts; do
+ for f in $ARCHIVE_ARTIFACTS; do
+ [[ -e $f ]] || continue # handle the case of no files to archive
echo "Archiving $f"
- mkdir -p $WORKSPACE/archives/$(dirname $f)
- mv $f $WORKSPACE/archives/$f
+ dir=$(dirname "$f")
+ mkdir -p "$WORKSPACE/archives/$dir"
+ mv "$f" "$WORKSPACE/archives/$f"
done
shopt -u globstar # Disable globstar once archives are copied
popd
# Ignore logging if archives doesn't exist
-mv $WORKSPACE/archives/ $ARCHIVES_DIR > /dev/null 2>&1
-touch $ARCHIVES_DIR/_build-details.txt
-echo "build-url: ${{BUILD_URL}}" >> $ARCHIVES_DIR/_build-details.txt
-env | grep -v PASSWORD > $ARCHIVES_DIR/_build-enviroment-variables.txt
+mv "$WORKSPACE/archives/" "$ARCHIVES_DIR" > /dev/null 2>&1
+touch "$ARCHIVES_DIR/_build-details.txt"
+echo "build-url: ${BUILD_URL}" >> "$ARCHIVES_DIR/_build-details.txt"
+env | grep -v PASSWORD > "$ARCHIVES_DIR/_build-enviroment-variables.txt"
# capture system info
-touch $ARCHIVES_DIR/_sys-info.txt
-{{
- echo -e "uname -a:\n `uname -a` \n"
- echo -e "df -h:\n `df -h` \n"
- echo -e "free -m:\n `free -m` \n"
- echo -e "nproc:\n `nproc` \n"
- echo -e "lscpu:\n `lscpu` \n"
- echo -e "ip addr:\n `/sbin/ip addr` \n"
-}} 2>&1 | tee -a $ARCHIVES_DIR/_sys-info.txt
+touch "$ARCHIVES_DIR/_sys-info.txt"
+{
+ echo -e "uname -a:\n $(uname -a) \n"
+ echo -e "df -h:\n $(df -h) \n"
+ echo -e "free -m:\n $(free -m) \n"
+ echo -e "nproc:\n $(nproc) \n"
+ echo -e "lscpu:\n $(lscpu) \n"
+ echo -e "ip addr:\n $(/sbin/ip addr) \n"
+} 2>&1 | tee -a "$ARCHIVES_DIR/_sys-info.txt"
# Magic string used to trim console logs at the appropriate level during wget
echo "-----END_OF_BUILD-----"
-wget -O $ARCHIVES_DIR/console.log ${{BUILD_URL}}consoleText
-wget -O $ARCHIVES_DIR/console-timestamp.log "${{BUILD_URL}}/timestamps?time=HH:mm:ss&appendLog"
-sed -i '/^-----END_OF_BUILD-----$/,$d' $ARCHIVES_DIR/console.log
-sed -i '/^.*-----END_OF_BUILD-----$/,$d' $ARCHIVES_DIR/console-timestamp.log
+wget -O "$ARCHIVES_DIR/console.log" "${BUILD_URL}consoleText"
+wget -O "$ARCHIVES_DIR/console-timestamp.log" "$BUILD_URL/timestamps?time=HH:mm:ss&appendLog"
+sed -i '/^-----END_OF_BUILD-----$/,$d' "$ARCHIVES_DIR/console.log"
+sed -i '/^.*-----END_OF_BUILD-----$/,$d' "$ARCHIVES_DIR/console-timestamp.log"
-gzip $ARCHIVES_DIR/*.txt $ARCHIVES_DIR/*.log
+gzip "$ARCHIVES_DIR"/*.txt "$ARCHIVES_DIR"/*.log
# find and gzip any 'text' files
-find $ARCHIVES_DIR -type f -print0 \
+find "$ARCHIVES_DIR" -type f -print0 \
| xargs -0r file \
| egrep -e ':.*text.*' \
| cut -d: -f1 \
| xargs -d'\n' -r gzip
+# Compress Java heap dumps using xz
+find "$ARCHIVES_DIR" -type f -name \*.hprof -print0 | xargs -0 xz
-zip -r archives.zip $JENKINS_HOSTNAME/ > $ARCHIVES_DIR/_archives-zip.log
+zip -r archives.zip "$JENKINS_HOSTNAME/" > "$ARCHIVES_DIR/_archives-zip.log"
du -sh archives.zip
# http://www.eclipse.org/legal/epl-v10.html
##############################################################################
-if [[ $P2ZIP_URL == "" ]]; then
+if [[ "$P2ZIP_URL" == "" ]]; then
P2ZIP_URL=opendaylight.snapshot/$(find . -name "*.zip" -type f -exec ls "{}" + | head -1)
- FILE_NAME=`echo $P2ZIP_URL | awk -F'/' '{ print $NF }'`
+ FILE_NAME=$(echo "$P2ZIP_URL" | awk -F'/' '{ print $NF }')
RELEASE_PATH="snapshot"
else
- FILE_NAME=`echo $P2ZIP_URL | awk -F'/' '{ print $NF }'`
- VERSION=`echo $P2ZIP_URL | awk -F'/' '{ print $(NF-1) }'`
+ FILE_NAME=$(echo "$P2ZIP_URL" | awk -F'/' '{ print $NF }')
+ VERSION=$(echo "$P2ZIP_URL" | awk -F'/' '{ print $(NF-1) }')
RELEASE_PATH="release/$VERSION"
- wget --quiet $P2ZIP_URL -O $FILE_NAME
+ wget --quiet "$P2ZIP_URL" -O "$FILE_NAME"
fi
# If we detect a snapshot build then release to a snapshot repo
# YangIDE has indicated that the only want the latest snapshot released to
# the snapshot directory.
-if echo $P2ZIP_URL | grep opendaylight.snapshot; then
+if echo "$P2ZIP_URL" | grep opendaylight.snapshot; then
RELEASE_PATH="snapshot"
fi
-cat > ${WORKSPACE}/pom.xml <<EOF
+cat > "${WORKSPACE}/pom.xml" <<EOF
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.$PROJECT</groupId>
#!/bin/bash
if [ -d "$WORKSPACE/.venv-openstack" ]; then
- source $WORKSPACE/.venv-openstack/bin/activate
- OS_STATUS=`openstack --os-cloud rackspace stack show -f json -c stack_status $STACK_NAME | jq -r '.stack_status'`
+ # shellcheck disable=SC1090
+ source "$WORKSPACE/.venv-openstack/bin/activate"
+ OS_STATUS=$(openstack --os-cloud rackspace stack show -f json -c stack_status "$STACK_NAME" | jq -r '.stack_status')
if [ "$OS_STATUS" == "CREATE_COMPLETE" ] || [ "$OS_STATUS" == "CREATE_FAILED" ]; then
echo "Deleting $STACK_NAME"
- openstack --os-cloud rackspace stack delete --yes $STACK_NAME
+ openstack --os-cloud rackspace stack delete --yes "$STACK_NAME"
fi
fi
##############################################################################
# Assuming that mvn deploy created the hide/from/pom/files/stage directory.
-cd hide/from/pom/files
+cd hide/from/pom/files || exit 1
mkdir -p m2repo/org/opendaylight/
rsync -avz --exclude 'maven-metadata*' \
--exclude '_remote.repositories' \
--exclude 'resolver-status.properties' \
- "stage/org/opendaylight/$m" m2repo/org/opendaylight/
+ "stage/org/opendaylight/$PROJECT" m2repo/org/opendaylight/
mvn org.sonatype.plugins:nexus-staging-maven-plugin:1.6.2:deploy-staged-repository \
- -DrepositoryDirectory="`pwd`/m2repo" \
+ -DrepositoryDirectory="$(pwd)/m2repo" \
-DnexusUrl=https://nexus.opendaylight.org/ \
-DstagingProfileId="$STAGING_PROFILE_ID" \
-DserverId="opendaylight-staging" \
- -s $SETTINGS_FILE \
- -gs $GLOBAL_SETTINGS_FILE | tee $WORKSPACE/deploy-staged-repository.log
+ -s "$SETTINGS_FILE" \
+ -gs "$GLOBAL_SETTINGS_FILE" | tee "$WORKSPACE/deploy-staged-repository.log"
# If we detect a snapshot build then there is no need to run this script.
# YangIDE has indicated that the only want the latest snapshot released to
# the snapshot directory.
-if echo $P2ZIP_URL | grep opendaylight.snapshot; then
+if echo "$P2ZIP_URL" | grep opendaylight.snapshot; then
exit 0
fi
if [[ "$P2ZIP_URL" == "" ]]; then
exit 0
fi
-EPOCH_DATE=`date +%s%3N`
-MVN_METADATA=`echo $P2ZIP_URL | sed 's,/*[^/]\+/*$,,' | sed 's,/*[^/]\+/*$,,'`/maven-metadata.xml
+EPOCH_DATE=$(date +%s%3N)
+MVN_METADATA=$(echo "$P2ZIP_URL" | sed 's,/*[^/]\+/*$,,' | sed 's,/*[^/]\+/*$,,')/maven-metadata.xml
P2_COMPOSITE_ARTIFACTS=compositeArtifacts.xml
P2_COMPOSITE_CONTENT=compositeContent.xml
-wget $MVN_METADATA -O maven-metadata.xml
+wget "$MVN_METADATA" -O maven-metadata.xml
-VERSIONS=`xmlstarlet sel -t -m "/metadata/versioning/versions" -v "version" maven-metadata.xml`
-NUM_VERSIONS=`echo $VERSIONS | wc -w`
+VERSIONS=$(xmlstarlet sel -t -m "/metadata/versioning/versions" -v "version" maven-metadata.xml)
+NUM_VERSIONS=$(echo "$VERSIONS" | wc -w)
##
#!/bin/bash
-git log --show-signature -1 | egrep -q 'gpg: Signature made.*key ID'
-if [ $? -eq 0 ]; then
+if git log --show-signature -1 | egrep -q 'gpg: Signature made.*key ID'; then
echo "git commit is gpg signed"
else
echo "WARNING: gpg signature missing for the commit"
#!/bin/bash -x
set +e # To avoid failures in projects which generate zero snapshot artifacts.
-find /tmp/r/org/opendaylight/$GERRIT_PROJECT/ -path *-SNAPSHOT* -delete
+find "/tmp/r/org/opendaylight/$GERRIT_PROJECT/" -path "*-SNAPSHOT*" -delete
find /tmp/r/ -regex '.*/_remote.repositories\|.*/maven-metadata-local\.xml\|.*/maven-metadata-fake-nexus\.xml\|.*/resolver-status\.properties' -delete
find /tmp/r/ -type d -empty -delete
-echo "# INFO: A listing of project related files left in local repository follows."
-find /tmp/r/org/opendaylight/$GERRIT_PROJECT/
+echo "INFO: A listing of project related files left in local repository follows."
+find "/tmp/r/org/opendaylight/$GERRIT_PROJECT/"
true # To prevent the possibly non-zero return code from failing the job.
--- /dev/null
+---
+- job-template:
+ # Template: distribution-check-{stream}
+ # Goal: Build a patch and make sure it would not break distribution-check jobs for other projects.
+ # Operation: FIXME
+ # This job template builds a patch, creates a distribution containing
+ # the patch (making sure dependencies are specified),
+ # and performs the distribution deploy test.
+
+ name: 'distribution-check-{stream}'
+ disabled: false
+
+ project-type: freestyle
+ node: '{build-node}'
+ concurrent: true
+ jdk: '{jdk}'
+
+ properties:
+ - opendaylight-infra-properties:
+ build-days-to-keep: '{build-days-to-keep}'
+
+ parameters:
+ - opendaylight-infra-parameters:
+ project: '{project}'
+ branch: '{branch}'
+ refspec: 'refs/heads/{branch}'
+ artifacts: '{archive-artifacts} **/dependency_tree.txt **/target/surefire-reports/*-output.txt'
+
+ scm:
+ - integration-gerrit-scm:
+ basedir: 'distribution'
+ refspec: '$GERRIT_REFSPEC'
+ branch: '{branch}'
+
+ wrappers:
+ - opendaylight-infra-wrappers:
+ # Distro-check jobs typically run within 10 - 30 minutes
+ # with 45 minutes being the occassional edge case.
+ # enforce a 60 minute limit to ensure stuck jobs get
+ # cleared up sooner.
+ build-timeout: '60'
+
+ triggers:
+ - gerrit-trigger-patch-submitted:
+ server: '{server-name}'
+ project: '{project}'
+ branch: '{branch}'
+ files: '**'
+
+ builders:
+ - distribution-check-wipe
+ - distribution-check-build-project:
+ pom: 'distribution/pom.xml'
+ - distribution-check-verify-groupid:
+ gerrit-project: 'integration'
+ - distribution-check-delete-snapshots
+ - distribution-check-configure-remotes
+ - distribution-check-repeat-build:
+ dist-pom: 'distribution/pom.xml'
+ - integration-distribution-check
+
+ publishers:
+ - email-notification:
+ email-recipients: '{email-recipients}'
+ email-prefix: '[{project-name}]'
+ - postbuildscript:
+ builders:
+ - shell: |
+ #!/bin/bash
+ mkdir -p $WORKSPACE/archives
+ cp karaf*.log $WORKSPACE/archives
+ script-only-if-succeeded: false
+ script-only-if-failed: false
+ mark-unstable-if-failed: true
+ - archive:
+ artifacts: '*.zip'
+ - opendaylight-infra-shiplogs:
+ maven-version: 'mvn33'
- '{project-name}-clm-{stream}'
- '{project-name}-sonar'
- '{project-name}-validate-autorelease-{stream}'
+ - distribution-check-{stream}
stream:
- carbon:
+#!/bin/bash
echo "Cleaning up Robot installation..."
# ${ROBOT_VENV} comes from the include-raw-integration-install-robotframework.sh
# script.
# TODO: Is this still needed when we have integration-cleanup-workspace?
-rm -rf ${ROBOT_VENV}
+rm -rf "${ROBOT_VENV}"
+#!/bin/bash
echo "Cleaning up the workspace..."
# Leftover files from previous runs could be wrongly copied as results.
# Keep the cloned integration/test repository!
-for file_or_dir in `ls -A -1 -I "test"`
+for file_or_dir in *
# FIXME: Make this compatible with multipatch and other possible build&run jobs.
do
- rm -vrf "$file_or_dir"
+ if [ "$file_or_dir" != "test" ]; then
+ rm -vrf "$file_or_dir"
+ fi
done
echo "Configuring the startup features..."
FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
-sed -ie "s/featuresBoot=.*/featuresBoot=config,standard,region,package,kar,ssh,management,${ACTUALFEATURES}/g" \${FEATURESCONF}
+sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" \${FEATURESCONF}
sed -ie "s%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features,mvn:org.opendaylight.integration/features-integration-test/${BUNDLEVERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features%g" \${FEATURESCONF}
cat \${FEATURESCONF}
echo "Configuring the startup features..."
FEATURESCONF=/tmp/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
CUSTOMPROP=/tmp/${BUNDLEFOLDER}/etc/custom.properties
-sed -ie "s/featuresBoot=.*/featuresBoot=config,standard,region,package,kar,ssh,management,${ACTUALFEATURES}/g" \${FEATURESCONF}
+sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" \${FEATURESCONF}
sed -ie "s%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features,mvn:org.opendaylight.integration/features-integration-test/${BUNDLEVERSION}/xml/features,mvn:org.apache.karaf.decanter/apache-karaf-decanter/1.0.0/xml/features%g" \${FEATURESCONF}
cat \${FEATURESCONF}
echo "Configuring the startup features..."
FEATURESCONF=${WORKSPACE}/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
-sed -ie "s/featuresBoot=.*/featuresBoot=config,standard,region,package,kar,ssh,management,${ACTUALFEATURES}/g" ${FEATURESCONF}
+sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
sed -ie "s%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features,mvn:org.opendaylight.integration/features-integration-test/${BUNDLEVERSION}/xml/features%g" ${FEATURESCONF}
cat ${FEATURESCONF}
echo "Listing all open ports on controller system"
netstat -pnatu
-echo "redirected karaf console output to karaf_console.log"
-export KARAF_REDIRECT=${WORKSPACE}/${BUNDLEFOLDER}/data/log/karaf_console.log
-
if [ ${JDKVERSION} == 'openjdk8' ]; then
echo "Setting the JRE Version to 8"
# dynamic_verify does not allow sudo, JAVA_HOME should be enough for karaf start.
enable_service ${service_name}
EOF
done
+for plugin_name in ${ENABLE_OS_PLUGINS}
+do
+if [ "$plugin_name" == "networking-odl" ]; then
+ ENABLE_PLUGIN_ARGS="${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}"
+elif [ "$plugin_name" == "kuryr-kubernetes" ]; then
+ ENABLE_PLUGIN_ARGS="${DEVSTACK_KUBERNETES_PLUGIN_REPO} master" # note: kuryr-kubernetes only exists in master at the moment
+elif [ "$plugin_name" == "neutron-lbaas" ]; then
+ ENABLE_PLUGIN_ARGS="${DEVSTACK_LBAAS_PLUGIN_REPO} ${OPENSTACK_BRANCH}"
+else
+ echo "Error: Invalid plugin $plugin_name, unsupported"
+ continue
+fi
+cat >> ${local_conf_file_name} << EOF
+enable_plugin ${plugin_name} ${ENABLE_PLUGIN_ARGS}
+EOF
+done
unset IFS
-
+if [ "${OPENSTACK_BRANCH}" == "master" ] || [ "${OPENSTACK_BRANCH}" == "stable/ocata" ]; then # Ocata+
+ # placement is mandatory for nova since Ocata, note that this requires computes to enable placement-client
+ # this should be moved into enabled_services for each job (but only for Ocata)
+ echo "enable_service placement-api" >> ${local_conf_file_name}
+fi
cat >> ${local_conf_file_name} << EOF
HOST_IP=$OPENSTACK_CONTROL_NODE_IP
SERVICE_HOST=\$HOST_IP
-NEUTRON_CREATE_INITIAL_NETWORKS=False
+NEUTRON_CREATE_INITIAL_NETWORKS=${CREATE_INITIAL_NETWORKS}
Q_PLUGIN=ml2
Q_ML2_TENANT_NETWORK_TYPE=${TENANT_NETWORK_TYPE}
Q_OVS_USE_VETH=True
SERVICE_PASSWORD=admin
ADMIN_PASSWORD=admin
-enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
-
ODL_PORT=8080
ODL_MODE=externalodl
+ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
+
LIBVIRT_TYPE=qemu
+NEUTRON_LBAAS_SERVICE_PROVIDERV2=${LBAAS_SERVICE_PROVIDER} # Only relevant if neutron-lbaas plugin is enabled
EOF
if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
if [ "${ODL_ENABLE_L3_FWD}" == "yes" ]; then
cat >> ${local_conf_file_name} << EOF
PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
-PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
-ML2_VLAN_RANGES=physnet1
+PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
+ML2_VLAN_RANGES=${PUBLIC_PHYSICAL_NETWORK}
ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
disable_service q-l3
minimize_polling=True
[ml2]
-# Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-physnet1)
-# MTU(1440) + VXLAN(50) + VLAN(4) = 1494 < MTU eth0/br-phynset1(1500)
-physical_network_mtus = physnet1:1440
+# Needed for VLAN provider tests - because our provider networks are always encapsulated in VXLAN (br-phys1)
+# MTU(1440) + VXLAN(50) + VLAN(4) = 1494 < MTU eth0/br-phys1(1500)
+physical_network_mtus = ${PUBLIC_PHYSICAL_NETWORK}:1440
+path_mtu = 1490
[[post-config|/etc/neutron/dhcp_agent.ini]]
[DEFAULT]
else
RECLONE=yes
fi
+if [ "${OPENSTACK_BRANCH}" == "master" ] || [ "${OPENSTACK_BRANCH}" == "stable/ocata" ]; then # Ocata+
+ # placement is mandatory for nova since Ocata, note that this requires controller to enable placement-api
+ ENABLED_SERVICES="n-cpu,placement-client"
+else
+ ENABLED_SERVICES="n-cpu"
+fi
+
local_conf_file_name=${WORKSPACE}/local.conf_compute_${HOSTIP}
cat > ${local_conf_file_name} << EOF
[[local|localrc]]
NOVA_VNC_ENABLED=True
MULTI_HOST=1
-ENABLED_SERVICES=n-cpu
-
+ENABLED_SERVICES=${ENABLED_SERVICES}
HOST_IP=${HOSTIP}
SERVICE_HOST=${OPENSTACK_CONTROL_NODE_IP}
SERVICE_PASSWORD=admin
ADMIN_PASSWORD=admin
-enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
ODL_MODE=compute
+ODL_PORT_BINDING_CONTROLLER=${ODL_ML2_PORT_BINDING}
LIBVIRT_TYPE=qemu
EOF
+if [[ "${ENABLE_OS_PLUGINS}" =~ networking-odl ]]; then
+cat >> ${local_conf_file_name} << EOF
+enable_plugin networking-odl ${ODL_ML2_DRIVER_REPO} ${ODL_ML2_BRANCH}
+EOF
+fi
+
if [ "${NUM_ODL_SYSTEM}" -gt 1 ]; then
odl_list=${ODL_SYSTEM_1_IP}
for i in `seq 2 ${NUM_ODL_SYSTEM}`
ODL_L3=${ODL_L3}
PUBLIC_INTERFACE=br100 # FIXME do we use br100 at all?
PUBLIC_BRIDGE=${PUBLIC_BRIDGE}
-PUBLIC_PHYSICAL_NETWORK=physnet1 # FIXME this should be a parameter
+PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK}
ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS}
EOF
fi
scp "${!CONTROLLERIP}:/tmp/odl${i}_karaf.log.tar" "${WORKSPACE}/odl${i}_karaf.log.tar"
tar -xvf ${WORKSPACE}/odl${i}_karaf.log.tar -C . --strip-components 2 --transform s/karaf/odl${i}_karaf/g
grep "ROBOT MESSAGE\| ERROR " odl${i}_karaf.log > odl${i}_err.log
- grep "ROBOT MESSAGE\| Exception " odl${i}_karaf.log > odl${i}_exception.log
+ grep "ROBOT MESSAGE\|Exception" odl${i}_karaf.log > odl${i}_exception.log
grep "ROBOT MESSAGE\| ERROR \| WARN \|Exception" odl${i}_karaf.log > odl${i}_err_warn_exception.log
rm ${WORKSPACE}/odl${i}_karaf.log.tar
done
mkdir -p ${OS_CTRL_FOLDER}
scp ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/nohup.out ${OS_CTRL_FOLDER}/stack.log
scp ${OPENSTACK_CONTROL_NODE_IP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_CTRL_FOLDER}/ovs-vswitchd.log
+scp ${OPENSTACK_CONTROL_NODE_IP}:/etc/neutron/neutron.conf ${OS_CTRL_FOLDER}/neutron.conf
rsync -avhe ssh ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/logs/* ${OS_CTRL_FOLDER} # rsync to prevent copying of symbolic links
scp extra_debug.sh ${OPENSTACK_CONTROL_NODE_IP}:/tmp
${SSH} ${OPENSTACK_CONTROL_NODE_IP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
mkdir -p ${OS_COMPUTE_FOLDER}
scp ${!OSIP}:/opt/stack/devstack/nohup.out ${OS_COMPUTE_FOLDER}/stack.log
scp ${!OSIP}:/var/log/openvswitch/ovs-vswitchd.log ${OS_COMPUTE_FOLDER}/ovs-vswitchd.log
+ scp ${!OSIP}:/etc/nova/nova.conf ${OS_COMPUTE_FOLDER}/nova.conf
rsync -avhe ssh ${!OSIP}:/opt/stack/logs/* ${OS_COMPUTE_FOLDER} # rsync to prevent copying of symbolic links
scp extra_debug.sh ${!OSIP}:/tmp
${SSH} ${!OSIP} "bash /tmp/extra_debug.sh > /tmp/extra_debug.log"
create_control_node_local_conf
scp ${WORKSPACE}/local.conf_control ${OPENSTACK_CONTROL_NODE_IP}:/opt/stack/devstack/local.conf
+cat > "${WORKSPACE}/manual_install_package.sh" << EOF
+cd /opt/stack
+git clone "\$1"
+cd "\$2"
+git checkout "\$3"
+sudo python setup.py install
+
+EOF
+
# Workworund for successful stacking with Mitaka
if [ "${ODL_ML2_BRANCH}" == "stable/mitaka" ]; then
# the problem has been solved with version 1.17. If the latest version of paramiko is used, it causes
# other timeout problems
ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/mitaka; sed -i /openstacksdk/d upper-constraints.txt; sed -i /libvirt-python/d upper-constraints.txt; sed -i /paramiko/d upper-constraints.txt"
+ scp "${WORKSPACE}/manual_install_package.sh" "${OPENSTACK_CONTROL_NODE_IP}:/tmp"
ssh ${OPENSTACK_CONTROL_NODE_IP} "sudo pip install deprecation"
- ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://github.com/openstack/python-openstacksdk; cd python-openstacksdk; sudo python setup.py install"
- ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://github.com/paramiko/paramiko; cd paramiko; git checkout 1.17; sudo python setup.py install"
+ # Fix for recent requirements update in the master branch of the sdk.The section must be replaced with a better fix.
+ ssh "${OPENSTACK_CONTROL_NODE_IP}" "sh /tmp/manual_install_package.sh https://github.com/openstack/python-openstacksdk python-openstacksdk 0.9.14"
+ ssh "${OPENSTACK_CONTROL_NODE_IP}" "sh /tmp/manual_install_package.sh https://github.com/paramiko/paramiko paramiko 1.17"
fi
ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
ssh ${OPENSTACK_CONTROL_NODE_IP} "cd /opt/stack; git clone https://git.openstack.org/openstack/requirements; cd requirements; git checkout stable/newton; sed -i /appdirs/d upper-constraints.txt"
fi
-
for i in `seq 1 $((NUM_OPENSTACK_SYSTEM - 1))`
do
COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP
# Control Node - PUBLIC_BRIDGE will act as the external router
GATEWAY_IP="10.10.10.250" # FIXME this should be a parameter, also shared with integration-test
-${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ifconfig $PUBLIC_BRIDGE up ${GATEWAY_IP}/24"
+${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ip link add link ${PUBLIC_BRIDGE} name ${PUBLIC_BRIDGE}.167 type vlan id 167"
+${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ifconfig ${PUBLIC_BRIDGE} up"
+${SSH} ${OPENSTACK_CONTROL_NODE_IP} "sudo ifconfig ${PUBLIC_BRIDGE}.167 up ${GATEWAY_IP}/24"
compute_index=1
for compute_ip in ${COMPUTE_IPS[*]}
do
-v OS_COMPUTE_2_IP:${OPENSTACK_COMPUTE_NODE_2_IP} \
-v OS_USER:${USER} \
-v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
+ -v SECURITY_GROUP_MODE:${SECURITY_GROUP_MODE} \
-v TOOLS_SYSTEM_IP:${TOOLS_SYSTEM_1_IP} \
-v TOOLS_SYSTEM_1_IP:${TOOLS_SYSTEM_1_IP} \
-v TOOLS_SYSTEM_2_IP:${TOOLS_SYSTEM_2_IP} \
echo "Configuring the startup features..."
FEATURESCONF=${WORKSPACE}/${BUNDLEFOLDER}/etc/org.apache.karaf.features.cfg
-sed -ie "s/featuresBoot=.*/featuresBoot=config,standard,region,package,kar,ssh,management,${ACTUALFEATURES}/g" ${FEATURESCONF}
+sed -ie "s/\(featuresBoot=\|featuresBoot =\)/featuresBoot = ${ACTUALFEATURES},/g" ${FEATURESCONF}
sed -ie "s%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features%mvn:org.opendaylight.integration/features-integration-index/${BUNDLEVERSION}/xml/features,mvn:org.opendaylight.integration/features-integration-test/${BUNDLEVERSION}/xml/features%g" ${FEATURESCONF}
cat ${FEATURESCONF}
BUNDLEFOLDER="distribution-karaf-${BUNDLEVERSION}"
BUNDLE="distribution-karaf-${TIMESTAMP}.zip"
ACTUALBUNDLEURL="${NEXUSPATH}/${BUNDLEVERSION}/${BUNDLE}"
-else
+elif [[ ${BUNDLEURL} == *"distribution-check"* ]]; then
ACTUALBUNDLEURL="${BUNDLEURL}"
BUNDLE="${BUNDLEURL##*/}"
BUNDLEFOLDER="${BUNDLE//.zip}"
BUNDLEVERSION="${BUNDLEFOLDER//distribution-karaf-}"
+else
+ ACTUALBUNDLEURL="${BUNDLEURL}"
+ BUNDLE="${BUNDLEURL##*/}"
+ BUNDLEVERSION="$(basename $(dirname $BUNDLEURL))"
+ BUNDLEFOLDER="distribution-karaf-${BUNDLEVERSION}"
fi
if [ ${JDKVERSION} == 'openjdk8' ]; then
pip install --upgrade docker-py importlib requests scapy netifaces netaddr ipaddr pyhocon
pip install --upgrade robotframework{,-{httplibrary,requests,sshlibrary,selenium2library}}
+pip install --upgrade robotframework-pycurllibrary
# Module jsonpath is needed by current AAA idmlite suite.
pip install --upgrade jsonpath-rw
# Module for iso8601 datetime format
pip install isodate
+# Modules for tornado and jsonpointer used by client libraries of IoTDM project
+# Note: Could be removed when client running on tools VM is used instead
+# of client libraries only.
+pip install --upgrade tornado jsonpointer
+
+# Module for TemplatedRequests.robot library
+pip install --upgrade jmespath
+
# Print installed versions.
pip freeze
---
# TODO: Make all bash constants more readable.
# e.g.: DISTRIBUTION_BRANCH instead of DISTROBRANCH.
-
- parameter:
name: integration-distribution-branch
parameters:
server-name: '{server}'
trigger-on:
- patchset-created-event:
- exclude-drafts: 'true'
- exclude-trivial-rebase: 'false'
- exclude-no-code-change: 'true'
+ exclude-drafts: true
+ exclude-trivial-rebase: false
+ exclude-no-code-change: false
- draft-published-event
- comment-added-contains-event:
- comment-contains-value: 'recheck'
+ comment-contains-value: recheck
override-votes: true
gerrit-build-unstable-verified-value: +1
gerrit-build-unstable-codereview-value: 0
projects:
- - project-compare-type: 'ANT'
+ - project-compare-type: ANT
project-pattern: '{project}'
branches:
- - branch-compare-type: 'ANT'
+ - branch-compare-type: ANT
branch-pattern: '**/{branch}'
file-paths:
- - compare-type: 'ANT'
+ - compare-type: ANT
pattern: '{files}'
# Macro: integration-openstack-controller-mininet
name: ODL_ML2_DRIVER_VERSION
default: '{odl-ml2-driver-version}'
description: 'Mode of networking-odl (v1 or v2)'
+ - string:
+ name: ODL_ML2_PORT_BINDING
+ default: '{odl-ml2-port-binding}'
+ description: 'Method of networking-odl port-binding (pseudo-agentdb-binding or legacy-port-binding or
+ network-topology)'
+ - string:
+ name: DEVSTACK_KUBERNETES_PLUGIN_REPO
+ default: '{devstack-kubernetes-plugin-repo}'
+ description: 'URL to fetch kubernetes devstack plugin'
+ - string:
+ name: DEVSTACK_LBAAS_PLUGIN_REPO
+ default: '{devstack-lbaas-plugin-repo}'
+ description: 'URL to fetch neutron-lbaas devstack plugin'
- string:
name: ODL_ENABLE_L3_FWD
default: '{odl-enable-l3}'
name: ENABLE_OS_SERVICES
default: '{enable-openstack-services}'
description: 'comma seperated list of services to enable'
+ - string:
+ name: ENABLE_OS_PLUGINS
+ default: '{enable-openstack-plugins}'
+ description: 'comma seperated list of plugins to enable'
- string:
name: DISABLE_OS_SERVICES
default: '{disable-openstack-services}'
name: DISABLE_ODL_L3_PLUGIN
default: '{disable-odl-l3-service-plugin}'
description: 'Disable odl l3 service plugin'
+ - string:
+ name: CREATE_INITIAL_NETWORKS
+ default: '{create-initial-networks}'
+ description: 'Toggles the option of letting devstack create initial networks (True/False)'
+ - string:
+ name: LBAAS_SERVICE_PROVIDER
+ default: '{lbaas-service-provider}'
+ description: 'The NEUTRON_LBAAS_SERVICE_PROVIDERV2 value to be used in local.conf - only relevant when using
+ neutron-lbaas'
scm:
- integration-gerrit-scm:
basedir: 'test'
name: ODL_ML2_DRIVER_VERSION
default: '{odl-ml2-driver-version}'
description: 'Mode of networking-odl (v1 or v2)'
+ - string:
+ name: ODL_ML2_PORT_BINDING
+ default: '{odl-ml2-port-binding}'
+ description: 'Method of networking-odl port-binding (pseudo-agentdb-binding or legacy-port-binding or
+ network-topology)'
+ - string:
+ name: DEVSTACK_KUBERNETES_PLUGIN_REPO
+ default: '{devstack-kubernetes-plugin-repo}'
+ description: 'URL to fetch kubernetes devstack plugin'
+ - string:
+ name: DEVSTACK_LBAAS_PLUGIN_REPO
+ default: '{devstack-lbaas-plugin-repo}'
+ description: 'URL to fetch neutron-lbaas devstack plugin'
- string:
name: ODL_ENABLE_L3_FWD
default: '{odl-enable-l3}'
name: ENABLE_OS_SERVICES
default: '{enable-openstack-services}'
description: 'comma seperated list of services to enable'
+ - string:
+ name: ENABLE_OS_PLUGINS
+ default: '{enable-openstack-plugins}'
+ description: 'comma seperated list of plugins to enable'
- string:
name: DISABLE_OS_SERVICES
default: '{disable-openstack-services}'
name: DISABLE_ODL_L3_PLUGIN
default: '{disable-odl-l3-service-plugin}'
description: 'Disable odl l3 service plugin'
-
+ - string:
+ name: CREATE_INITIAL_NETWORKS
+ default: '{create-initial-networks}'
+ description: 'Toggles the option of letting devstack create initial networks (True/False)'
+ - string:
+ name: LBAAS_SERVICE_PROVIDER
+ default: '{lbaas-service-provider}'
+ description: 'The NEUTRON_LBAAS_SERVICE_PROVIDERV2 value to be used in local.conf - only relevant when using
+ neutron-lbaas'
scm:
- integration-gerrit-scm:
basedir: 'test'
maven-version: 'mvn33'
pom: '{project}/pom.xml'
goals: >
- clean install dependency:tree -V -B -Djenkins
- -DskipTests
- -Dcheckstyle.skip=true
- -Dmaven.javadoc.skip=true
- -Dmaven.site.skip=true
+ clean install dependency:tree
+ -V -B -Pq
+ -Dgitid.skip=false
+ -Djenkins
-DgenerateReports=false
-Dmaven.repo.local=/tmp/r -Dorg.ops4j.pax.url.mvn.localRepository=/tmp/r
-Dstream={stream}
maven-name: 'mvn33'
root-pom: 'distribution/pom.xml'
goals: >
- clean install dependency:tree -V -B -Djenkins
+ clean install dependency:tree
+ -V -B -Pq
+ -Dgitid.skip=false
+ -Djenkins
-Dmaven.repo.local=/tmp/r -Dorg.ops4j.pax.url.mvn.localRepository=/tmp/r
maven-opts: '-Xmx1024m -XX:MaxPermSize=256m -Dmaven.compile.fork=true'
settings: 'integration-settings'
name: integration-test
project: integration/test
project-name: integration-test
+ test-branch: master
jobs:
- - '{project-name}-verify-tox-{stream}'
+ - '{project-name}-verify-tox-master'
- 'integration-csit-verify-1node-library'
- 'integration-distribution-test-{stream}'
- 'integration-patch-test-{stream}'
- 'integration-multipatch-test-{stream}'
+ - 'integration-distribution-weekly-test-trigger-{stream}'
# CSIT Lists in releng-defaults.yaml
stream:
jdk: 'openjdk8'
jre: 'openjdk8'
csit-list: '{csit-list-carbon}'
+ csit-weekly-list: '{csit-weekly-list-carbon}'
schedule: 'H H * * 0-4'
+ schedule-weekly: 'H 12 * * 0'
- boron:
branch: 'stable/boron'
jdk: 'openjdk8'
jre: 'openjdk8'
csit-list: '{csit-list-boron}'
+ csit-weekly-list: ''
schedule: 'H H * * 0-4'
+ schedule-weekly: 'H 12 * * 0'
- beryllium:
branch: 'stable/beryllium'
jdk: 'openjdk7'
jre: 'openjdk7'
csit-list: '{csit-list-beryllium}'
+ csit-weekly-list: ''
schedule: 'H H * * 6'
+ schedule-weekly: 'H 12 * * 0'
# tools system image
tools_system_image: Ubuntu 14.04 - mininet-ovs-25 - 20170210-0300
branch: stable/boron
scope: only
jre: openjdk8
- # Integration/Test branch remains hardcoded to 'master' as int/test does not have any other branch.
- test-branch: master
properties:
- opendaylight-infra-properties:
- email-notification:
email-recipients: '{email-recipients}'
email-prefix: '[int/dist]'
+
+- job-template:
+ name: 'integration-distribution-weekly-test-trigger-{stream}'
+
+ project-type: freestyle
+ node: centos7-robot-2c-2g
+
+ properties:
+ - opendaylight-infra-properties:
+ build-days-to-keep: '{build-days-to-keep}'
+
+ parameters:
+ - integration-bundleurl:
+ bundleurl: '{bundleurl}'
+ - integration-jdk-version:
+ jdkversion: '{jre}'
+
+ triggers:
+ - timed: '{schedule-weekly}'
+
+ builders:
+ - trigger-builds:
+ - project: '{csit-weekly-list}'
+ block: true
+ predefined-parameters:
+ BUNDLEURL=$BUNDLEURL
+
+ publishers:
+ - email-notification:
+ email-recipients: '{email-recipients}'
+ email-prefix: '[integration]'
scope: 'all'
# Features to install
- install-features: 'odl-onem2mall-iotdm'
+ install-features: 'odl-iotdmcsitdist'
# Robot custom options
robot-options: ''
# Features to install
# odl-jolokia is added automatically just because this is a 3node job.
# Netconf clustered feature is installed at runtime.
- install-features: 'odl-netconf-ssh,odl-restconf'
+ install-features: >
+ odl-restconf,
+ odl-netconf-ssh
# Robot custom options
robot-options: '-v USE_NETCONF_CONNECTOR:False'
scope: 'all'
# Features to install
- install-features: 'odl-restconf,odl-netconf-clustered-topology'
+ install-features: >
+ odl-restconf,
+ odl-netconf-clustered-topology
# Robot custom options
robot-options: '-v USE_NETCONF_CONNECTOR:False'
scope: 'all'
# Features to install
- install-features: 'odl-restconf,odl-netconf-clustered-topology'
+ install-features: >
+ odl-restconf,
+ odl-netconf-clustered-topology
# Robot custom options
robot-options: '-v USE_NETCONF_CONNECTOR:False'
set -e
echo "---> Cleaning up OVS $OVS_VERSION"
-docker logs $CID > $WORKSPACE/docker-ovs-${OVS_VERSION}.log
-docker stop $CID
-docker rm $CID
+docker logs "$CID" > "$WORKSPACE/docker-ovs-${OVS_VERSION}.log"
+docker stop "$CID"
+docker rm "$CID"
rm env.properties
docker images
#
# https://github.com/openstack-infra/project-config/blob/master/jenkins/jobs/networking-odl.yaml
-export PATH=$PATH:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/local/sbin
+export PATH="$PATH:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/local/sbin"
# *SIGH*. This is required to get lsb_release
sudo yum -y install redhat-lsb-core indent python-testrepository
sudo bash -c 'echo "stack ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers'
# We need to install some scripts from openstack/os-testr project
-cd ~
+cd ~ || exit 1
echo "Setting up infra scripts"
sudo mkdir -p /usr/local/jenkins/slave_scripts
git clone https://github.com/openstack/os-testr.git
-cd os-testr/os_testr
+cd os-testr/os_testr || exit 1
sudo cp subunit2html.py /usr/local/jenkins/slave_scripts
# Save existing WORKSPACE
-SAVED_WORKSPACE=$WORKSPACE
+SAVED_WORKSPACE="$WORKSPACE"
export WORKSPACE=~/workspace
-mkdir -p $WORKSPACE
-cd $WORKSPACE
+mkdir -p "$WORKSPACE"
+cd "$WORKSPACE" || exit 1
# This is the job which checks out devstack-gate
if [[ ! -e devstack-gate ]]; then
git clone https://git.openstack.org/openstack-infra/devstack-gate
else
echo "Fixing devstack-gate git remotes"
- cd devstack-gate
+ cd devstack-gate || exit 1
git remote set-url origin https://git.openstack.org/openstack-infra/devstack-gate
git remote update
git reset --hard
# Set the pieces we want to test
if [ "$GERRIT_PROJECT" == "openstack/neutron" ]; then
- ZUUL_PROJECT=$GERRIT_PROJECT
- ZUUL_BRANCH=$GERRIT_REFSPEC
+ export ZUUL_PROJECT=$GERRIT_PROJECT
+ export ZUUL_BRANCH=$GERRIT_REFSPEC
elif [ "$GERRIT_PROJECT" == "openstack-dev/devstack" ]; then
- ZUUL_PROJECT=$GERRIT_PROJECT
- ZUUL_BRANCH=$GERRIT_REFSPEC
+ export ZUUL_PROJECT=$GERRIT_PROJECT
+ export ZUUL_BRANCH=$GERRIT_REFSPEC
fi
echo "Setting environment variables"
DEVSTACK_LOCAL_CONFIG+="ODL_JAVA_MAX_PERM_MEM=784m;"
# Set ODL_URL_PREFIX if "nexus proxy" is provided
-URL_PREFIX=${ODLNEXUSPROXY:-https://nexus.opendaylight.org}
+export URL_PREFIX="${ODLNEXUSPROXY:-https://nexus.opendaylight.org}"
if [ -n "$ODLNEXUSPROXY" ] ; then
DEVSTACK_LOCAL_CONFIG+="ODL_URL_PREFIX=$ODLNEXUSPROXY;"
fi
DGRET=$?
# Restore WORKSPACE
-OS_WORKSPACE=$WORKSPACE
-export WORKSPACE=$SAVED_WORKSPACE
+OS_WORKSPACE="$WORKSPACE"
+export WORKSPACE="$SAVED_WORKSPACE"
# Copy and display all the logs
cat /opt/stack/new/devstacklog*
ls /opt/stack/; ls /opt/stack/new; ls /opt/stack/new/opendaylight;
-cp -r $OS_WORKSPACE/logs $WORKSPACE
-cp -a /opt/stack/new/logs/screen-odl-karaf* $WORKSPACE/logs
-mkdir -p $WORKSPACE/logs/opendaylight
-cp -a /opt/stack/new/opendaylight/distribution*/etc $WORKSPACE/logs/opendaylight
+cp -r "$OS_WORKSPACE/logs" "$WORKSPACE"
+cp -a /opt/stack/new/logs/screen-odl-karaf* "$WORKSPACE/logs"
+mkdir -p "$WORKSPACE/logs/opendaylight"
+cp -a /opt/stack/new/opendaylight/distribution*/etc "$WORKSPACE/logs/opendaylight"
# Unzip the logs to make them easier to view
-gunzip $WORKSPACE/logs/*.gz
+gunzip "$WORKSPACE"/logs/*.gz
-exit $DGRET
+exit "$DGRET"
set -e
-OVS_VERSION=${OVS_VERSION:-2.5.0}
+OVS_VERSION="${OVS_VERSION:-2.5.0}"
echo "---> Cleaning up existing Docker processes and images"
for x in $(docker ps -a -q)
echo "---> Starting OVS $OVS_VERSION"
-/usr/bin/docker pull vpickard/openvswitch:$OVS_VERSION
-CID=$(/usr/bin/docker run -p 6641:6640 --privileged=true -d -i -t vpickard/openvswitch:$OVS_VERSION /usr/bin/supervisord)
-REALCID=`echo $CID | rev | cut -d ' ' -f 1 | rev`
+/usr/bin/docker pull "vpickard/openvswitch:$OVS_VERSION"
+CID=$(/usr/bin/docker run -p 6641:6640 --privileged=true -d -i -t "vpickard/openvswitch:$OVS_VERSION" /usr/bin/supervisord)
+REALCID=$(echo "$CID" | rev | cut -d ' ' -f 1 | rev)
echo "CID=$REALCID" > env.properties
echo "OVS_VERSION=${OVS_VERSION}" >> env.properties
-CONTROLLER_IP=`facter ipaddress`
+CONTROLLER_IP=$(facter ipaddress)
echo "CONTROLLER_IP=${CONTROLLER_IP}" >> env.properties
echo "---> Waiting..."
set -e
echo "---> Configuring OVS for HW VTEP Emulator"
-/usr/bin/docker exec $CID supervisorctl stop ovsdb-server
-/usr/bin/docker exec $CID supervisorctl start ovsdb-server-vtep
-/usr/bin/docker exec $CID ovs-vsctl add-br br-vtep
-/usr/bin/docker exec $CID ovs-vsctl add-port br-vtep eth0
-/usr/bin/docker exec $CID vtep-ctl add-ps br-vtep
-/usr/bin/docker exec $CID vtep-ctl add-port br-vtep eth0
-/usr/bin/docker exec $CID vtep-ctl set Physical_Switch br-vtep tunnel_ips=192.168.254.20
-/usr/bin/docker exec $CID vtep-ctl set-manager ptcp:6640
+/usr/bin/docker exec "$CID" supervisorctl stop ovsdb-server
+/usr/bin/docker exec "$CID" supervisorctl start ovsdb-server-vtep
+/usr/bin/docker exec "$CID" ovs-vsctl add-br br-vtep
+/usr/bin/docker exec "$CID" ovs-vsctl add-port br-vtep eth0
+/usr/bin/docker exec "$CID" vtep-ctl add-ps br-vtep
+/usr/bin/docker exec "$CID" vtep-ctl add-port br-vtep eth0
+/usr/bin/docker exec "$CID" vtep-ctl set Physical_Switch br-vtep tunnel_ips=192.168.254.20
+/usr/bin/docker exec "$CID" vtep-ctl set-manager ptcp:6640
sleep 5
echo "---> Starting OVS HW VTEP Emulator"
-/usr/bin/docker exec $CID supervisorctl start ovs-vtep
+/usr/bin/docker exec "$CID" supervisorctl start ovs-vtep
sleep 5
set -e
echo "---> Setting up controller IP"
-CONTROLLER_IP=`facter ipaddress`
+CONTROLLER_IP=$(facter ipaddress)
echo "CONTROLLER_IP=${CONTROLLER_IP}" > env.properties
echo "---> Loading OVS kernel module"
echo "---> Verifying OVS kernel module loaded"
/usr/sbin/lsmod | /usr/bin/grep openvswitch
-
- mitaka:
openstack-branch: 'stable/mitaka'
odl-ml2-branch: 'stable/mitaka'
- odl-ml2-driver-version: 'v1'
- openstack_system_image: 'CentOS 7 - devstack-mitaka - 20170210-1356'
+ openstack_system_image: 'CentOS 7 - devstack-mitaka - 20170314-2255'
- newton:
openstack-branch: 'stable/newton'
odl-ml2-branch: 'stable/newton'
- odl-ml2-driver-version: 'v1'
- openstack_system_image: 'CentOS 7 - devstack-newton - 20170210-1344'
+ openstack_system_image: 'CentOS 7 - devstack-newton - 20170314-2256'
- newton-nodl-v2:
openstack-branch: 'stable/newton'
odl-ml2-branch: 'stable/newton'
odl-ml2-driver-version: 'v2'
- openstack_system_image: 'CentOS 7 - devstack-newton - 20170210-1344'
+ openstack_system_image: 'CentOS 7 - devstack-newton - 20170314-2256'
tools_system_count: 2
tools_system_image: 'Ubuntu 14.04 - mininet-ovs-25 - 20170210-0300'
- odl-enable-l3: 'yes'
-
enable-networking-l2gw: 'yes'
disable-odl-l3-service-plugin: 'yes'
- public-bridge: 'br-physnet1'
-
- public-physical-network: 'physnet1'
-
enable-openstack-services: 'q-svc,q-dhcp,q-meta,n-cauth,tempest,l2gw-plugin'
- disable-openstack-services: 'swift,cinder,n-net,q-vpn,n-cpu'
-
- tenant-network-type: 'vxlan'
-
security-group-mode: '{sg-mode}'
-
- robot-options: ''
functionality:
- upstream-transparent:
- schedule: 'H H/4 * * *'
+ schedule: 'H H * * *'
sg-mode: 'transparent'
- upstream-stateful:
- schedule: 'H H * * *'
+ schedule: 'H H/4 * * *'
sg-mode: 'stateful'
- upstream-learn:
schedule: 'H H * * *'
- mitaka:
openstack-branch: 'stable/mitaka'
odl-ml2-branch: 'stable/mitaka'
- odl-ml2-driver-version: 'v1'
- openstack_system_image: 'CentOS 7 - devstack-mitaka - 20170210-1356'
+ openstack_system_image: 'CentOS 7 - devstack-mitaka - 20170314-2255'
- newton:
openstack-branch: 'stable/newton'
odl-ml2-branch: 'stable/newton'
- odl-ml2-driver-version: 'v1'
- openstack_system_image: 'CentOS 7 - devstack-newton - 20170210-1344'
+ openstack_system_image: 'CentOS 7 - devstack-newton - 20170314-2256'
- newton-nodl-v2:
openstack-branch: 'stable/newton'
odl-ml2-branch: 'stable/newton'
odl-ml2-driver-version: 'v2'
- openstack_system_image: 'CentOS 7 - devstack-newton - 20170210-1344'
-
- odl-enable-l3: 'yes'
-
- public-bridge: 'br-physnet1'
-
- public-physical-network: 'physnet1'
-
- enable-networking-l2gw: 'no'
-
- disable-odl-l3-service-plugin: 'no'
-
- enable-openstack-services: 'q-svc,q-dhcp,q-meta,n-cauth,tempest'
-
- disable-openstack-services: 'swift,cinder,n-net,q-vpn,n-cpu'
-
- tenant-network-type: 'vxlan'
+ openstack_system_image: 'CentOS 7 - devstack-newton - 20170314-2256'
security-group-mode: '{sg-mode}'
-
- robot-options: ''
- mitaka:
openstack-branch: 'stable/mitaka'
odl-ml2-branch: 'stable/mitaka'
- odl-ml2-driver-version: 'v1'
- openstack_system_image: CentOS 7 - devstack-mitaka - 20170210-1356
+ openstack_system_image: 'CentOS 7 - devstack-mitaka - 20170314-2255'
- newton:
openstack-branch: 'stable/newton'
odl-ml2-branch: 'stable/newton'
- odl-ml2-driver-version: 'v1'
- openstack_system_image: CentOS 7 - devstack-newton - 20170210-1344
+ openstack_system_image: 'CentOS 7 - devstack-newton - 20170314-2256'
- newton-nodl-v2:
openstack-branch: 'stable/newton'
odl-ml2-branch: 'stable/newton'
odl-ml2-driver-version: 'v2'
- openstack_system_image: CentOS 7 - devstack-newton - 20170210-1344
-
- odl-enable-l3: 'yes'
+ openstack_system_image: 'CentOS 7 - devstack-newton - 20170314-2256'
public-bridge: 'br-ex'
-
- public-physical-network: 'physnet1'
-
- enable-networking-l2gw: 'no'
-
- disable-odl-l3-service-plugin: 'no'
-
- enable-openstack-services: 'q-svc,q-dhcp,q-meta,n-cauth,tempest'
-
- disable-openstack-services: 'swift,cinder,n-net,q-vpn,n-cpu'
-
- tenant-network-type: 'vxlan'
-
- security-group-mode: 'none'
-
- robot-options: ''
sudo bash -c 'echo "stack ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers'
# We need to install some scripts from openstack-infra/project-config
-cd ~
+cd ~ || exit 1
echo "Setting up infra scripts"
sudo mkdir -p /usr/local/jenkins/slave_scripts
git clone https://git.openstack.org/openstack-infra/project-config
-cd project-config
+cd project-config || exit 1
sudo cp jenkins/scripts/subunit2html.py /usr/local/jenkins/slave_scripts
# Save existing WORKSPACE
SAVED_WORKSPACE=$WORKSPACE
export WORKSPACE=~/workspace
mkdir -p $WORKSPACE
-cd $WORKSPACE
+cd $WORKSPACE || exit 1
# This is the job which checks out devstack-gate
if [[ ! -e devstack-gate ]]; then
git clone https://git.openstack.org/openstack-infra/devstack-gate
else
echo "Fixing devstack-gate git remotes"
- cd devstack-gate
+ cd devstack-gate || exit 1
git remote set-url origin https://git.openstack.org/openstack-infra/devstack-gate
git remote update
git reset --hard
# Set the pieces we want to test
if [ "$GERRIT_PROJECT" == "openstack/neutron" ]; then
- ZUUL_PROJECT=$GERRIT_PROJECT
- ZUUL_BRANCH=$GERRIT_REFSPEC
+ export ZUUL_PROJECT=$GERRIT_PROJECT
+ export ZUUL_BRANCH=$GERRIT_REFSPEC
elif [ "$GERRIT_PROJECT" == "openstack-dev/devstack" ]; then
- ZUUL_PROJECT=$GERRIT_PROJECT
- ZUUL_BRANCH=$GERRIT_REFSPEC
+ export ZUUL_PROJECT=$GERRIT_PROJECT
+ export ZUUL_BRANCH=$GERRIT_REFSPEC
fi
echo "Setting environment variables"
DEVSTACK_LOCAL_CONFIG+="ODL_JAVA_MAX_PERM_MEM=784m;"
# Set ODL_URL_PREFIX if "nexus proxy" is provided
-URL_PREFIX=${ODLNEXUSPROXY:-https://nexus.opendaylight.org}
+export URL_PREFIX="${ODLNEXUSPROXY:-https://nexus.opendaylight.org}"
if [ -n "$ODLNEXUSPROXY" ] ; then
DEVSTACK_LOCAL_CONFIG+="ODL_URL_PREFIX=$ODLNEXUSPROXY;"
fi
export WORKSPACE=$SAVED_WORKSPACE
# Copy all the logs
-cp -r $OS_WORKSPACE/logs $WORKSPACE
-cp -a /opt/stack/new/logs/q-odl-karaf* $WORKSPACE/logs
-mkdir -p $WORKSPACE/logs/opendaylight
-cp -a /opt/stack/new/opendaylight/distribution*/etc $WORKSPACE/logs/opendaylight
+cp -r "$OS_WORKSPACE/logs" "$WORKSPACE"
+cp -a /opt/stack/new/logs/q-odl-karaf* "$WORKSPACE/logs"
+mkdir -p "$WORKSPACE/logs/opendaylight"
+cp -a /opt/stack/new/opendaylight/distribution*/etc "$WORKSPACE/logs/opendaylight"
# Unzip the logs to make them easier to view
-gunzip $WORKSPACE/logs/*.gz
+gunzip "$WORKSPACE"/logs/*.gz
exit $DGRET
- '{project-name}-merge-{stream}'
- '{project-name}-validate-autorelease-{stream}'
- '{project-name}-verify-{stream}-{maven}-{jdks}'
+ - '{project-name}-verify-tox-{stream}'
stream: carbon
project: 'odlparent'
--- /dev/null
+#!/bin/bash
+LFTOOLS_DIR="$WORKSPACE/.venv-lftools"
+if [ ! -d "$LFTOOLS_DIR" ]
+then
+ virtualenv "$LFTOOLS_DIR"
+ # shellcheck disable=SC1090
+ source "$LFTOOLS_DIR/bin/activate"
+ pip install --upgrade pip
+ pip install "lftools>=0.0.10"
+ pip freeze
+fi
+# shellcheck disable=SC1090
+source "$LFTOOLS_DIR/bin/activate"
+
+lftools openstack --os-cloud odlpriv-sandbox \
+ image cleanup --hide-public=True \
+ --days=30 \
+ --clouds=odlpriv-sandbox,rackspace
#!/bin/bash
-virtualenv $WORKSPACE/.venv
-source $WORKSPACE/.venv/bin/activate
+virtualenv "$WORKSPACE/.venv"
+# shellcheck disable=SC1090
+source "$WORKSPACE/.venv/bin/activate"
pip install --upgrade pip
pip install --upgrade python-openstackclient python-heatclient
pip freeze
#########################
# Fetch stack list before fetching active builds to minimize race condition
# where we might be try to delete stacks while jobs are trying to start
-OS_STACKS=(`openstack --os-cloud rackspace stack list \
+OS_STACKS=($(openstack --os-cloud rackspace stack list \
-f json -c "Stack Name" -c "Stack Status" \
--property "stack_status=CREATE_COMPLETE" \
--property "stack_status=DELETE_FAILED" \
--property "stack_status=CREATE_FAILED" \
- | jq -r '.[] | ."Stack Name"'`)
+ | jq -r '.[] | ."Stack Name"'))
# Make sure we fetch active builds on both the releng and sandbox silos
ACTIVE_BUILDS=()
for silo in releng sandbox; do
JENKINS_URL="https://jenkins.opendaylight.org/$silo//computer/api/json?tree=computer[executors[currentExecutable[url]],oneOffExecutors[currentExecutable[url]]]&xpath=//url&wrapper=builds"
- wget --no-verbose -O $silo_builds.json $JENKINS_URL
+ wget --no-verbose -O "${silo}_builds.json" "$JENKINS_URL"
sleep 1 # Need to sleep for 1 second otherwise next line causes script to stall
- ACTIVE_BUILDS=(${ACTIVE_BUILDS[@]} ` \
- jq -r '.computer[].executors[].currentExecutable.url' $silo_builds.json \
- | grep -v null | awk -F'/' '{print $6 "-" $7}'`)
+ ACTIVE_BUILDS=(${ACTIVE_BUILDS[@]} $( \
+ jq -r '.computer[].executors[].currentExecutable.url' "${silo}_builds.json" \
+ | grep -v null | awk -F'/' '{print $6 "-" $7}'))
done
##########################
##########################
# Search for stacks taht are not in use by either releng or sandbox silos and
# delete them.
-for stack in ${OS_STACKS[@]}; do
+for stack in "${OS_STACKS[@]}"; do
if [[ "${ACTIVE_BUILDS[@]}" =~ $stack ]]; then
# No need to delete stacks if there exists an active build for them
continue
else
echo "Deleting orphaned stack: $stack"
- openstack --os-cloud rackspace stack delete --yes $stack
+ openstack --os-cloud rackspace stack delete --yes "$stack"
fi
done
#!/bin/bash
echo "----------> Copy ssh public keys to csit lab"
-source $WORKSPACE/.venv-openstack/bin/activate
+# shellcheck disable=SC1090
+source "$WORKSPACE/.venv-openstack/bin/activate"
function copy-ssh-keys-to-slave() {
RETRIES=60
for j in $(seq 1 $RETRIES); do
+ # shellcheck disable=SC2092
if `ssh-copy-id -i /home/jenkins/.ssh/id_rsa.pub "jenkins@${i}" > /dev/null 2>&1`; then
- ssh jenkins@${i} 'echo "$(facter ipaddress_eth0) $(/bin/hostname)" | sudo tee -a /etc/hosts'
+ ssh "jenkins@${i}" 'echo "$(facter ipaddress_eth0) $(/bin/hostname)" | sudo tee -a /etc/hosts'
echo "Successfully copied public keys to slave ${i}"
break
- elif [ $j -eq $RETRIES ]; then
+ elif [ "$j" -eq $RETRIES ]; then
echo "SSH not responding on ${i} after $RETIRES tries. Giving up."
exit 1
else
fi
# ping test to see if connectivity is available
- if ping -c1 ${i} &> /dev/null; then
+ if ping -c1 "${i}" &> /dev/null; then
echo "Ping to ${i} successful."
else
echo "Ping to ${i} failed."
# Print the Stack outputs parameters so that we can identify which IPs belong
# to which VM types.
-openstack --os-cloud rackspace stack show -c outputs $STACK_NAME
+openstack --os-cloud rackspace stack show -c outputs "$STACK_NAME"
-ADDR=(`openstack --os-cloud rackspace stack show -f json -c outputs $STACK_NAME | \
+# shellcheck disable=SC2006
+ADDR=(`openstack --os-cloud rackspace stack show -f json -c outputs "$STACK_NAME" | \
jq -r '.outputs[] | \
select(.output_key | match("^vm_[0-9]+_ips\$")) | \
.output_value | .[]'`)
# Detect when a process failed to copy ssh keys and fail build
for p in $pids; do
- if wait $p; then
+ if wait "$p"; then
echo "Process $p successfully copied ssh keys."
else
echo "Process $p failed to copy ssh keys."
#!/bin/bash
-CHANGE_ID=`ssh -p 29418 jenkins-$SILO@git.opendaylight.org gerrit query \
+# shellcheck disable=SC1083
+CHANGE_ID=$(ssh -p 29418 "jenkins-$SILO@git.opendaylight.org" gerrit query \
limit:1 owner:self is:open project:{project} \
message:'{gerrit-commit-message}' \
topic:{gerrit-topic} | \
grep 'Change-Id:' | \
- awk '{{ print $2 }}'`
+ awk '{{ print $2 }}')
if [ -z "$CHANGE_ID" ]; then
git commit -sm "{gerrit-commit-message}"
fi
git status
-git remote add gerrit ssh://jenkins-$SILO@git.opendaylight.org:29418/releng/builder.git
+git remote add gerrit "ssh://jenkins-$SILO@git.opendaylight.org:29418/releng/builder.git"
# Don't fail the build if this command fails because it's possible that there
# is no changes since last update.
+# shellcheck disable=SC1083
git review --yes -t {gerrit-topic} || true
#!/bin/bash
-virtualenv $WORKSPACE/.venv-openstack
-source $WORKSPACE/.venv-openstack/bin/activate
+virtualenv "$WORKSPACE/.venv-openstack"
+# shellcheck disable=SC1090
+source "$WORKSPACE/.venv-openstack/bin/activate"
pip install --upgrade pip
pip install --upgrade python-openstackclient python-heatclient
pip freeze
-cd /builder/openstack-hot
+cd /builder/openstack-hot || exit 1
-JOB_SUM=`echo $JOB_NAME | sum | awk '{{ print $1 }}'`
+JOB_SUM=$(echo "$JOB_NAME" | sum | awk '{{ print $1 }}')
VM_NAME="$JOB_SUM-$BUILD_NUMBER"
OS_TIMEOUT=10 # Minutes to wait for OpenStack VM to come online
openstack --os-cloud rackspace limits show --absolute
openstack --os-cloud rackspace limits show --rate
echo "Trying up to $STACK_RETRIES times to create $STACK_NAME."
-for try in `seq $STACK_RETRIES`; do
- openstack --os-cloud rackspace stack create --timeout $OS_TIMEOUT -t {stack-template} -e $WORKSPACE/opendaylight-infra-environment.yaml --parameter "job_name=$VM_NAME" --parameter "silo=$SILO" $STACK_NAME
+for try in $(seq $STACK_RETRIES); do
+ # shellcheck disable=SC1083
+ openstack --os-cloud rackspace stack create --timeout "$OS_TIMEOUT" -t {stack-template} -e "$WORKSPACE/opendaylight-infra-environment.yaml" --parameter "job_name=$VM_NAME" --parameter "silo=$SILO" "$STACK_NAME"
openstack --os-cloud rackspace stack list
- echo "Waiting for $OS_TIMEOUT minutes to create $STACK_NAME."
- for i in `seq $OS_TIMEOUT`; do
+ echo "$try: Waiting for $OS_TIMEOUT minutes to create $STACK_NAME."
+ for i in $(seq $OS_TIMEOUT); do
sleep 60
- OS_STATUS=`openstack --os-cloud rackspace stack show -f json -c stack_status $STACK_NAME | jq -r '.stack_status'`
+ OS_STATUS=$(openstack --os-cloud rackspace stack show -f json -c stack_status "$STACK_NAME" | jq -r '.stack_status')
+ echo "$i: $OS_STATUS"
case "$OS_STATUS" in
CREATE_COMPLETE)
CREATE_FAILED)
echo "ERROR: Failed to initialize infrastructure. Deleting stack and possibly retrying to create..."
openstack --os-cloud rackspace stack list
- openstack --os-cloud rackspace stack delete --yes $STACK_NAME
- openstack --os-cloud rackspace stack show $STACK_NAME
+ openstack --os-cloud rackspace stack delete --yes "$STACK_NAME"
+ openstack --os-cloud rackspace stack show "$STACK_NAME"
# after stack delete, poll for 10m to know when stack is fully removed
# the logic here is that when "stack show $STACK_NAME" does not contain $STACK_NAME
# we assume it's successfully deleted and we can break to retry
- for i in `seq 20`; do
+ for j in $(seq 20); do
sleep 30;
- STACK_SHOW=$(openstack --os-cloud rackspace stack show $STACK_NAME)
- echo $STACK_SHOW
+ STACK_SHOW=$(openstack --os-cloud rackspace stack show "$STACK_NAME")
+ echo "$j: $STACK_SHOW"
if [[ $STACK_SHOW == *"DELETE_FAILED"* ]]; then
echo "stack delete failed. trying to stack abandon now"
- openstack --os-cloud rackspace stack abandon $STACK_NAME
- STACK_SHOW=$(openstack --os-cloud rackspace stack show $STACK_NAME)
- echo $STACK_SHOW
+ openstack --os-cloud rackspace stack abandon "$STACK_NAME"
+ STACK_SHOW=$(openstack --os-cloud rackspace stack show "$STACK_NAME")
+ echo "$STACK_SHOW"
fi
if [[ $STACK_SHOW != *"$STACK_NAME"* ]]; then
echo "stack show on $STACK_NAME came back empty. Assuming successful delete"
done
# capture stack info in console logs
-openstack --os-cloud rackspace stack show $STACK_NAME
+openstack --os-cloud rackspace stack show "$STACK_NAME"
if ! $STACK_SUCCESSFUL; then
exit 1
#!/bin/bash
-virtualenv $WORKSPACE/.venv
-source $WORKSPACE/.venv/bin/activate
+virtualenv "$WORKSPACE/.venv"
+# shellcheck disable=SC1090
+source "$WORKSPACE/.venv/bin/activate"
pip install --upgrade --quiet pip
pip install --upgrade --quiet python-openstackclient python-heatclient
pip freeze
-cat > $WORKSPACE/docs/cloud-images.rst << EOF
+cat > "$WORKSPACE/docs/cloud-images.rst" << EOF
Following are the list of published images available to be used with Jenkins jobs.
EOF
# Blank line before EOF is on purpose to ensure there is spacing.
IFS=$'\n'
-IMAGES=(`openstack --os-cloud odlpriv image list --public -f value -c Name`)
-for i in ${IMAGES[@]}; do
- echo "* $i" >> $WORKSPACE/docs/cloud-images.rst
+IMAGES=($(openstack --os-cloud odlpriv image list --public -f value -c Name))
+for i in "${IMAGES[@]}"; do
+ echo "* $i" >> "$WORKSPACE/docs/cloud-images.rst"
done
git add docs/cloud-images.rst
+++ /dev/null
----
-- project:
- name: openflowplugin-csit-bulk-matic-ds-daily-old
- jobs:
- - '{project}-csit-1node-periodic-{functionality}-{install}-{stream}'
-
- # The project name
- project: 'openflowplugin'
-
- # The functionality under test
- functionality: 'bulk-matic-ds-daily'
-
- # Project branches
- stream:
- - beryllium:
- branch: 'stable/beryllium'
- jre: 'openjdk7'
-
- install:
- - only:
- scope: 'only'
-
- # Features to install
- install-features: 'odl-openflowplugin-flow-services-ui,odl-openflowplugin-app-bulk-o-matic'
-
- # Robot custom options
- robot-options: '-v flnr:50000'
-
- # Plot Info
- 01-plot-title: 'REST setup rate (for 100k flows)'
- 01-plot-yaxis: 'setup time[s], req[flows/s]'
- 01-plot-group: 'Performance Rate'
- 01-plot-data-file: 'flows_setup_time.csv'
+++ /dev/null
----
-- project:
- name: openflowplugin-csit-bulk-matic-ds-daily
- jobs:
- - '{project}-csit-1node-periodic-{functionality}-{install}-{stream}'
-
- # The project name
- project: 'openflowplugin'
-
- # The functionality under test
- functionality: 'bulk-matic-ds-daily'
-
- # Project branches
- stream:
- - carbon:
- branch: 'master'
- jre: 'openjdk8'
- - boron:
- branch: 'stable/boron'
- jre: 'openjdk8'
-
- install:
- - only:
- scope: 'only'
-
- # Features to install
- install-features: 'odl-openflowplugin-flow-services-ui,odl-openflowplugin-app-bulk-o-matic'
-
- # Robot custom options
- robot-options: '-v flnr:50000'
-
- # Plot Info
- 01-plot-title: 'REST setup rate (for 100k flows)'
- 01-plot-yaxis: 'setup time[s], req[flows/s]'
- 01-plot-group: 'Performance Rate'
- 01-plot-data-file: 'flows_setup_time.csv'
project: 'openflowplugin'
# The functionality under test
- functionality: 'bulkomatic-clustering-daily'
+ functionality:
+ - bulkomatic-clustering-daily
+ - gate-bulkomatic-clustering-daily
# Project branches
stream:
project: 'openflowplugin'
# The functionality under test
- functionality: 'bulkomatic-clustering-perf-daily'
+ functionality:
+ - bulkomatic-clustering-perf-daily
+ - gate-bulkomatic-clustering-perf-daily
# Project branches
stream:
project: 'openflowplugin'
# The functionality under test
- functionality: 'bulkomatic-perf-daily'
+ functionality:
+ - bulkomatic-perf-daily
+ - gate-bulkomatic-perf-daily
# Project branches
stream:
+++ /dev/null
----
-- project:
- name: openflowplugin-csit-cbench-performance-old
- jobs:
- - '{project}-csit-1node-{functionality}-{install}-{stream}'
-
- # The project name
- project: 'openflowplugin'
-
- # The functionality under test
- functionality: 'cbench-performance'
-
- # Project branches
- stream:
- - beryllium:
- branch: 'stable/beryllium'
- jre: 'openjdk7'
-
- install:
- - only:
- scope: 'only'
-
- # Features to install
- install-features: 'odl-openflowplugin-flow-services-ui,odl-openflowplugin-drop-test'
-
- # Robot custom options
- robot-options: '-v throughput_threshold:20000 -v latency_threshold:5000'
-
- # Plot Info
- 01-plot-title: 'Throughput Mode'
- 01-plot-yaxis: 'flow_mods/sec'
- 01-plot-group: 'Cbench Performance'
- 01-plot-data-file: 'throughput.csv'
- 02-plot-title: 'Latency Mode'
- 02-plot-yaxis: 'flow_mods/sec'
- 02-plot-group: 'Cbench Performance'
- 02-plot-data-file: 'latency.csv'
+++ /dev/null
----
-- project:
- name: openflowplugin-csit-cbench-performance
- jobs:
- - '{project}-csit-1node-{functionality}-{install}-{stream}'
-
- # The project name
- project: 'openflowplugin'
-
- # The functionality under test
- functionality: 'cbench-performance'
-
- # Project branches
- stream:
- - carbon:
- branch: 'master'
- jre: 'openjdk8'
- - boron:
- branch: 'stable/boron'
- jre: 'openjdk8'
-
- install:
- - only:
- scope: 'only'
-
- # Features to install
- install-features: 'odl-openflowplugin-flow-services-ui,odl-openflowplugin-drop-test'
-
- # Robot custom options
- robot-options: '-v throughput_threshold:20000 -v latency_threshold:5000'
-
- # Plot Info
- 01-plot-title: 'Throughput Mode'
- 01-plot-yaxis: 'flow_mods/sec'
- 01-plot-group: 'Cbench Performance'
- 01-plot-data-file: 'throughput.csv'
- 02-plot-title: 'Latency Mode'
- 02-plot-yaxis: 'flow_mods/sec'
- 02-plot-group: 'Cbench Performance'
- 02-plot-data-file: 'latency.csv'
name: openflowplugin-csit-clustering
jobs:
- '{project}-csit-3node-{functionality}-{install}-{stream}'
- - '{project}-csit-verify-3node-{functionality}'
# The project name
project: 'openflowplugin'
# The functionality under test
- functionality: 'clustering'
+ functionality:
+ - clustering
+ - gate-clustering
# Project branches
stream:
+++ /dev/null
----
-- project:
- name: openflowplugin-csit-config-performance-old
- jobs:
- - '{project}-csit-1node-{functionality}-{install}-{stream}'
-
- # The project name
- project: 'openflowplugin'
-
- # The functionality under test
- functionality: 'config-performance'
-
- # Project branches
- stream:
- - beryllium:
- branch: 'stable/beryllium'
- jre: 'openjdk7'
-
- install:
- - only:
- scope: 'only'
-
- # Features to install
- install-features: 'odl-openflowplugin-flow-services-ui'
-
- # Robot custom options
- robot-options: ''
-
- # Plot Info
- 01-plot-title: 'Flow REST API Rate'
- 01-plot-yaxis: 'http_req/sec'
- 01-plot-group: 'Flow Config Performance'
- 01-plot-data-file: 'rates.csv'
- 02-plot-title: 'Flow Stats Collect Time'
- 02-plot-yaxis: 'seconds'
- 02-plot-group: 'Stats Collect Performance'
- 02-plot-data-file: 'times.csv'
+++ /dev/null
----
-- project:
- name: openflowplugin-csit-config-performance
- jobs:
- - '{project}-csit-1node-{functionality}-{install}-{stream}'
-
- # The project name
- project: 'openflowplugin'
-
- # The functionality under test
- functionality: 'config-performance'
-
- # Project branches
- stream:
- - carbon:
- branch: 'master'
- jre: 'openjdk8'
- - boron:
- branch: 'stable/boron'
- jre: 'openjdk8'
-
- install:
- - only:
- scope: 'only'
-
- # Features to install
- install-features: 'odl-openflowplugin-flow-services-ui'
-
- # Robot custom options
- robot-options: ''
-
- # Plot Info
- 01-plot-title: 'Flow REST API Rate'
- 01-plot-yaxis: 'http_req/sec'
- 01-plot-group: 'Flow Config Performance'
- 01-plot-data-file: 'rates.csv'
- 02-plot-title: 'Flow Stats Collect Time'
- 02-plot-yaxis: 'seconds'
- 02-plot-group: 'Stats Collect Performance'
- 02-plot-data-file: 'times.csv'
name: openflowplugin-csit-flow-services
jobs:
- '{project}-csit-1node-{functionality}-{install}-{stream}'
- - '{project}-csit-verify-1node-{functionality}'
# The project name
project: 'openflowplugin'
# The functionality under test
- functionality: 'flow-services'
+ functionality:
+ - flow-services
+ - gate-flow-services
# Project branches
stream:
+++ /dev/null
----
-- project:
- name: openflowplugin-csit-rpc-time-measure-daily-old
- jobs:
- - '{project}-csit-1node-periodic-{functionality}-{install}-{stream}'
-
- # The project name
- project: 'openflowplugin'
-
- # The functionality under test
- functionality: 'rpc-time-measure-daily'
-
- # Project branches
- stream:
- - beryllium:
- branch: 'stable/beryllium'
- jre: 'openjdk7'
-
- install:
- - only:
- scope: 'only'
-
- # Features to install
- install-features: 'odl-openflowplugin-flow-services-ui,odl-openflowplugin-app-bulk-o-matic'
-
- # Robot custom options
- robot-options: ''
-
- # Plot Info
- 01-plot-title: 'REST setup rate (for 100k flows)'
- 01-plot-yaxis: 'setup time[s], req[flows/s]'
- 01-plot-group: 'Performance Rate'
- 01-plot-data-file: 'flows_setup_time.csv'
---
- project:
- name: openflowplugin-csit-rpc-time-measure-daily
+ name: openflowplugin-csit-sanity
jobs:
- - '{project}-csit-1node-periodic-{functionality}-{install}-{stream}'
+ - '{project}-csit-1node-{functionality}-{install}-{stream}'
+ - '{project}-csit-verify-1node-{functionality}'
# The project name
project: 'openflowplugin'
# The functionality under test
- functionality: 'rpc-time-measure-daily'
+ functionality: 'sanity'
# Project branches
stream:
scope: 'only'
# Features to install
- install-features: 'odl-openflowplugin-flow-services-ui,odl-openflowplugin-app-bulk-o-matic'
+ install-features: 'odl-openflowplugin-flow-services-ui,odl-openflowplugin-app-table-miss-enforcer'
# Robot custom options
robot-options: ''
- # Plot Info
- 01-plot-title: 'REST setup rate (for 100k flows)'
- 01-plot-yaxis: 'setup time[s], req[flows/s]'
- 01-plot-group: 'Performance Rate'
- 01-plot-data-file: 'flows_setup_time.csv'
+ # mininet image
+ tools_system_image: Ubuntu 14.04 - mininet-ovs-25 - 20170210-0300
project: 'openflowplugin'
# The functionality under test
- functionality: 'scalability'
+ functionality:
+ - scalability
+ - gate-scalability
# Project branches
stream:
project: 'openflowplugin'
# The functionality under test
- functionality: 'scale-stats-collection-daily'
+ functionality:
+ - scale-stats-collection-daily
+ - gate-scale-stats-collection-daily
# Project branches
stream:
feature:
- core:
csit-list: >
- openflowplugin-csit-1node-flow-services-only-{stream},
- openflowplugin-csit-1node-flow-services-all-{stream},
- openflowplugin-csit-1node-scalability-only-{stream},
- openflowplugin-csit-1node-cbench-performance-only-{stream},
- openflowplugin-csit-1node-config-performance-only-{stream},
- openflowplugin-csit-3node-clustering-only-{stream}
+ openflowplugin-csit-1node-gate-flow-services-only-{stream},
+ openflowplugin-csit-1node-gate-flow-services-all-{stream},
+ openflowplugin-csit-1node-gate-scalability-only-{stream},
+ openflowplugin-csit-1node-periodic-gate-scale-stats-collection-daily-only-{stream},
+ openflowplugin-csit-1node-periodic-gate-bulkomatic-perf-daily-only-{stream},
+ openflowplugin-csit-3node-gate-clustering-only-{stream},
+ openflowplugin-csit-3node-periodic-gate-bulkomatic-clustering-daily-only-{stream},
+ openflowplugin-csit-3node-periodic-gate-bulkomatic-clustering-perf-daily-only-{stream}
- netvirt:
csit-list: >
./autogen.sh
./configure --prefix="$ROOT" \
--with-buildversion=$BUILD_NUMBER \
- CPPFLAGS="-isystem $ROOT/include"
+ CPPFLAGS="-isystem $ROOT/include" \
CXXFLAGS="-Wall"
make -j8
if ! make check; then find . -name test-suite.log -exec cat {} \; && false; fi
set -e
echo "---> Cleaning up OVS $OVS_VERSION"
-docker logs $CID > $WORKSPACE/docker-ovs-${OVS_VERSION}.log
-docker stop $CID
-docker rm $CID
+docker logs "$CID" > "$WORKSPACE/docker-ovs-${OVS_VERSION}.log"
+docker stop "$CID"
+docker rm "$CID"
rm env.properties
docker images
echo "---> Starting OVS $OVS_VERSION"
-/usr/bin/docker pull vpickard/openvswitch:$OVS_VERSION
-CID=$(/usr/bin/docker run -p 6641:6640 --privileged=true -d -i -t vpickard/openvswitch:$OVS_VERSION /usr/bin/supervisord)
-REALCID=`echo $CID | rev | cut -d ' ' -f 1 | rev`
+/usr/bin/docker pull "vpickard/openvswitch:$OVS_VERSION"
+CID=$(/usr/bin/docker run -p 6641:6640 --privileged=true -d -i -t "vpickard/openvswitch:$OVS_VERSION" /usr/bin/supervisord)
+REALCID=$(echo "$CID" | rev | cut -d ' ' -f 1 | rev)
echo "CID=$REALCID" > env.properties
echo "OVS_VERSION=${OVS_VERSION}" >> env.properties
-CONTROLLER_IP=`facter ipaddress`
+CONTROLLER_IP=$(facter ipaddress)
echo "CONTROLLER_IP=${CONTROLLER_IP}" >> env.properties
echo "---> Waiting..."
set -e
echo "---> Configuring OVS for HW VTEP Emulator"
-/usr/bin/docker exec $CID supervisorctl stop ovsdb-server
-/usr/bin/docker exec $CID supervisorctl start ovsdb-server-vtep
-/usr/bin/docker exec $CID ovs-vsctl add-br br-vtep
-/usr/bin/docker exec $CID ovs-vsctl add-port br-vtep eth0
-/usr/bin/docker exec $CID vtep-ctl add-ps br-vtep
-/usr/bin/docker exec $CID vtep-ctl add-port br-vtep eth0
-/usr/bin/docker exec $CID vtep-ctl set Physical_Switch br-vtep tunnel_ips=192.168.254.20
-/usr/bin/docker exec $CID vtep-ctl set-manager ptcp:6640
+/usr/bin/docker exec "$CID" supervisorctl stop ovsdb-server
+/usr/bin/docker exec "$CID" supervisorctl start ovsdb-server-vtep
+/usr/bin/docker exec "$CID" ovs-vsctl add-br br-vtep
+/usr/bin/docker exec "$CID" ovs-vsctl add-port br-vtep eth0
+/usr/bin/docker exec "$CID" vtep-ctl add-ps br-vtep
+/usr/bin/docker exec "$CID" vtep-ctl add-port br-vtep eth0
+/usr/bin/docker exec "$CID" vtep-ctl set Physical_Switch br-vtep tunnel_ips=192.168.254.20
+/usr/bin/docker exec "$CID" vtep-ctl set-manager ptcp:6640
sleep 5
echo "---> Starting OVS HW VTEP Emulator"
-/usr/bin/docker exec $CID supervisorctl start ovs-vtep
+/usr/bin/docker exec "$CID" supervisorctl start ovs-vtep
sleep 5
project: 'ovsdb'
# The functionality under test
- functionality: 'clustering'
+ functionality:
+ - 'upstream-clustering'
+ - 'gate-clustering'
# Project branches
stream:
+++ /dev/null
----
-- project:
- name: ovsdb-csit-scalability-daily
- jobs:
- - '{project}-csit-1node-periodic-{functionality}-{install}-{stream}'
-
- # The project name
- project: 'ovsdb'
-
- # The functionality under test
- functionality: 'scalability-daily'
-
- # Project branches
- stream:
- - carbon:
- branch: 'master'
- jre: 'openjdk8'
- - boron:
- branch: 'stable/boron'
- jre: 'openjdk8'
- - beryllium:
- branch: 'stable/beryllium'
- jre: 'openjdk7'
-
- install:
- - only:
- scope: 'only'
-
- # Features to install
- install-features: 'odl-ovsdb-openstack'
-
- # Robot custom options
- robot-options: '-v MIN_SWITCHES:100 -v MAX_SWITCHES:500 -v STEP_SWITCHES:100'
-
- # Plot Info
- 01-plot-title: 'Max number of Switches'
- 01-plot-yaxis: 'OpenFlow Switches'
- 01-plot-group: 'Inventory Scalability'
- 01-plot-data-file: 'switches.csv'
project: 'ovsdb'
# The functionality under test
- functionality: 'southbound'
+ functionality:
+ - 'upstream-southbound'
+ - 'gate-southbound'
# Project branches
stream:
- core:
csit-list: >
- ovsdb-csit-1node-southbound-only-{stream},
- ovsdb-csit-1node-southbound-all-{stream},
- ovsdb-csit-3node-clustering-only-{stream}
+ ovsdb-csit-1node-gate-southbound-only-{stream},
+ ovsdb-csit-1node-gate-southbound-all-{stream},
+ ovsdb-csit-3node-gate-clustering-only-{stream}
gdebi
# Build release specified by build params
-$WORKSPACE/packaging/deb/build.py --major "$VERSION_MAJOR" \
+"$WORKSPACE/packaging/deb/build.py" --major "$VERSION_MAJOR" \
--minor "$VERSION_MINOR" \
--patch "$VERSION_PATCH" \
--deb "$PKG_VERSION" \
# Install required packages
virtualenv rpm_build
+# shellcheck disable=SC1091
source rpm_build/bin/activate
pip install --upgrade pip
-pip install -r $WORKSPACE/packaging/rpm/requirements.txt
+pip install -r "$WORKSPACE/packaging/rpm/requirements.txt"
# Build the latest snapshot matching the given major minor version
-$WORKSPACE/packaging/rpm/build.py --build-latest-snap \
+"$WORKSPACE/packaging/rpm/build.py" --build-latest-snap \
--major "$VERSION_MAJOR" \
--minor "$VERSION_MINOR" \
- --sysd_commit "$SYSD_COMMIT" \
--changelog_name "$CHANGELOG_NAME" \
--changelog_email "$CHANGELOG_EMAIL"
# Install required packages
virtualenv rpm_build
+# shellcheck disable=SC1091
source rpm_build/bin/activate
pip install --upgrade pip
-pip install -r $WORKSPACE/packaging/rpm/requirements.txt
+pip install -r "$WORKSPACE/packaging/rpm/requirements.txt"
+
+# Make a URL for the tarball artifact from DOWNLOAD_URL (a zip)
+# shellcheck disable=SC2154
+download_url="${{DOWNLOAD_URL//zip/tar.gz}}"
# Build release specified by build params
-$WORKSPACE/packaging/rpm/build.py --download_url "$DOWNLOAD_URL" \
- --sysd_commit "$SYSD_COMMIT" \
+"$WORKSPACE/packaging/rpm/build.py" --download_url "$download_url" \
--changelog_date "$CHANGELOG_DATE" \
--changelog_name "$CHANGELOG_NAME" \
--changelog_email "$CHANGELOG_EMAIL"
--- /dev/null
+#!/bin/bash
+
+# Options:
+# -x: Echo commands
+# -e: Fail on errors
+# -o pipefail: Fail on errors in scripts this calls, give stacktrace
+set -ex -o pipefail
+
+# Install required packages
+virtualenv deb_build
+source deb_build/bin/activate
+pip install --upgrade pip
+
+# Install latest ansible
+sudo apt-add-repository ppa:ansible/ansible
+sudo apt-get update
+sudo apt-get install -y ansible
+
+git clone https://github.com/dfarrell07/ansible-opendaylight.git
+cd ansible-opendaylight
+sudo ansible-galaxy install -r requirements.yml
+sudo ansible-playbook -i "localhost," -c local examples/deb_repo_install_playbook.yml
+
+# Add more tests
--- /dev/null
+#!/bin/bash
+
+# Options:
+# -x: Echo commands
+# -e: Fail on errors
+# -o pipefail: Fail on errors in scripts this calls, give stacktrace
+set -ex -o pipefail
+
+# Install required packages
+virtualenv rpm_build
+source rpm_build/bin/activate
+pip install --upgrade pip
+sudo yum install -y ansible
+
+git clone https://github.com/dfarrell07/ansible-opendaylight.git
+cd ansible-opendaylight
+sudo ansible-galaxy install -r requirements.yml
+sudo ansible-playbook -i "localhost," -c local examples/odl_6_testing_playbook.yml
+
+# Add more tests
--- /dev/null
+#!/bin/bash
+
+# Options:
+# -x: Echo commands
+# -e: Fail on errors
+# -o pipefail: Fail on errors in scripts this calls, give stacktrace
+set -ex -o pipefail
+
+# Install ODL from .deb link or .repo url
+if [[ $URL == *.deb ]]
+then
+ sudo apt-get install -y "$URL"
+elif [[ $URL == ppa:* ]]
+then
+ sudo add-apt-repository "$REPO_FILE"
+ sudo apt-get update
+ sudo apt-get install -y opendaylight
+else
+ echo "URL is not a link to a PPA repo or .deb"
+fi
+
+# Install expect to interact with karaf shell
+sudo apt-get install -y expect
+
+# Start OpenDaylight
+sudo systemctl start opendaylight
+
+# Check status of OpenDaylight
+sudo systemctl status opendaylight
+
+# Get process id of Java
+pgrep java
# https://github.com/dfarrell07/puppet-opendaylight/blob/master/Vagrantfile
# Update Int/Pack's puppet-opendaylight submodule to latest master
-pushd $WORKSPACE/packaging
+pushd "$WORKSPACE/packaging"
git submodule init
git submodule update --remote
gpg2 --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
curl -L get.rvm.io | bash -s stable
# Expected by RVM, seems required to make RVM functions (`rvm use`) available
-source $HOME/.rvm/scripts/rvm
+# shellcheck disable=SC1090
+source "$HOME/.rvm/scripts/rvm"
rvm install 2.4.0
ruby --version
# This has to be done as a login shell to get rvm fns
# Install gems dependencies of puppet-opendaylight via Bundler
gem install bundler
-echo export PATH=\\$PATH:/usr/local/bin >> $HOME/.bashrc
-pushd $WORKSPACE/packaging/puppet/puppet-opendaylight
+echo export PATH="\\$PATH:/usr/local/bin" >> "$HOME/.bashrc"
+pushd "$WORKSPACE/packaging/puppet/puppet-opendaylight"
bundle install
bundle update
- 'packaging-build-rpm-snap-{stream}'
- 'packaging-build-deb-{stream}'
- 'packaging-test-rpm-{stream}'
+ - 'packaging-test-deb-{stream}'
- 'packaging-test-puppet-{stream}'
+ - 'packaging-test-ansible-rpm-{stream}'
+ - 'packaging-test-ansible-deb-{stream}'
project: 'integration/packaging'
- string:
name: DOWNLOAD_URL
# yamllint disable-line rule:line-length
- default: 'https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.5.0-Boron/distribution-karaf-0.5.0-Boron.tar.gz'
+ default: 'https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.5.2-Boron/distribution-karaf-0.5.2-Boron.tar.gz'
description: 'URL to ODL tarball artifact to repackage into RPM'
- - string:
- name: SYSD_COMMIT
- default: '07f7c83b0ef46ad3809e5be03e09a77fe554eeae'
- description: 'Version of ODL systemd unitfile to download and package in ODL RPM'
- string:
name: CHANGELOG_DATE
default: 'Sun Aug 25 1991'
artifacts: '{archive-artifacts}'
- string:
name: VERSION_MAJOR
- default: '5'
+ default: '6'
description: 'OpenDaylight major (element) version number to build'
- string:
name: VERSION_MINOR
- default: '2'
+ default: '0'
description: 'OpenDaylight minor (SR) version number to build'
- - string:
- name: SYSD_COMMIT
- default: '07f7c83b0ef46ad3809e5be03e09a77fe554eeae'
- description: 'Version of ODL systemd unitfile to download and package in ODL RPM'
- string:
name: CHANGELOG_NAME
default: 'Jenkins'
- string:
name: URL
# yamllint disable-line rule:line-length
- default: 'https://raw.githubusercontent.com/opendaylight/integration-packaging/master/rpm/example_repo_configs/opendaylight-51-release.repo'
+ default: 'https://raw.githubusercontent.com/opendaylight/integration-packaging/master/rpm/example_repo_configs/opendaylight-52-release.repo'
description: 'Link to .repo or .rpm file'
- string:
name: REPO_FILE
- default: '/etc/yum.repos.d/opendaylight-51-release.repo'
+ default: '/etc/yum.repos.d/opendaylight-52-release.repo'
description: 'Name of the .repo file'
scm:
builders:
- shell: !include-raw: include-raw-test-rpm.sh
- - shell: !include-raw: include-raw-test-karaf.sh
+ - shell: !include-raw: include-raw-test-karaf.expect
publishers:
- archive-artifacts:
artifacts: '**'
+- job-template:
+ name: 'packaging-test-deb-{stream}'
+
+ node: ubuntu1604-mininet-ovs-25-2c-4g
+
+ parameters:
+ - opendaylight-infra-parameters:
+ project: '{project}'
+ branch: '{branch}'
+ refspec: 'refs/heads/{branch}'
+ artifacts: '{archive-artifacts}'
+ - string:
+ name: URL
+ # yamllint disable-line rule:line-length
+ default: 'http://download.opensuse.org/repositories/home:/akshitajha/xUbuntu_16.04/all/opendaylight_5.0.0-1_all.deb'
+ description: 'Link to .deb file'
+ - string:
+ name: REPO_FILE
+ default: 'ppa:odl-team/boron'
+ description: 'Name of the PPA repository'
+
+ scm:
+ - integration-gerrit-scm:
+ basedir: 'packaging'
+ refspec: '$GERRIT_REFSPEC'
+ branch: 'master'
+
+ wrappers:
+ - opendaylight-infra-wrappers:
+ build-timeout: '{build-timeout}'
+
+ builders:
+ - shell: !include-raw: include-raw-test-deb.sh
+ - shell: !include-raw: include-raw-test-karaf.expect
+
+ publishers:
+ - archive-artifacts:
+ artifacts: '**'
+
+- job-template:
+ name: 'packaging-test-ansible-rpm-{stream}'
+
+ node: centos7-java-builder-2c-4g
+
+ project-type: freestyle
+
+ properties:
+ - opendaylight-infra-properties:
+ build-days-to-keep: 7
+
+ parameters:
+ - opendaylight-infra-parameters:
+ project: '{project}'
+ branch: '{branch}'
+ refspec: 'refs/heads/{branch}'
+ artifacts: '{archive-artifacts}'
+
+ scm:
+ - integration-gerrit-scm:
+ basedir: 'packaging'
+ refspec: '$GERRIT_REFSPEC'
+ branch: 'master'
+
+ wrappers:
+ - opendaylight-infra-wrappers:
+ build-timeout: '{build-timeout}'
+
+ builders:
+ - shell: !include-raw: include-raw-test-ansible-rpm.sh
+
+ triggers:
+ - timed: '@daily'
+
+
- job-template:
name: 'packaging-build-deb-{stream}'
- node: ubuntu-trusty-mininet-2c-2g
+ node: ubuntu1404-mininet-2c-2g
project-type: freestyle
description: 'OpenDaylight major (element) version number to build'
- string:
name: VERSION_MINOR
- default: '0'
+ default: '2'
description: 'OpenDaylight minor (SR) version number to build'
- string:
name: VERSION_PATCH
description: 'Deb version for the given ODL major.minor.patch'
- string:
name: SYSD_COMMIT
- default: '07f7c83b0ef46ad3809e5be03e09a77fe554eeae'
+ default: 'ba1f409c1a46efd068b16ced6ddc8b32a412b03a'
description: 'Version of ODL systemd unitfile to download and package in ODL .deb'
- string:
name: CODENAME
- default: 'Boron'
+ default: 'Boron-SR2'
description: 'Elemental codename for the ODL release, including SR if applicable'
- string:
name: DOWNLOAD_URL
# yamllint disable-line rule:line-length
- default: 'https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.5.0-Boron/distribution-karaf-0.5.0-Boron.tar.gz'
+ default: 'https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.5.2-Boron/distribution-karaf-0.5.2-Boron.tar.gz'
description: 'URL to ODL tarball artifact to repackage into .deb'
- string:
name: JAVA_VERSION
publishers:
- archive-artifacts:
artifacts: 'packaging/deb/opendaylight/*.deb'
+
+
+- job-template:
+ name: 'packaging-test-ansible-deb-{stream}'
+
+ node: ubuntu1604-mininet-ovs-25-2c-4g
+
+ project-type: freestyle
+
+ properties:
+ - opendaylight-infra-properties:
+ build-days-to-keep: 7
+
+ parameters:
+ - opendaylight-infra-parameters:
+ project: '{project}'
+ branch: '{branch}'
+ refspec: 'refs/heads/{branch}'
+ artifacts: '{archive-artifacts}'
+
+ scm:
+ - integration-gerrit-scm:
+ basedir: 'packaging'
+ refspec: '$GERRIT_REFSPEC'
+ branch: 'master'
+
+ wrappers:
+ - opendaylight-infra-wrappers:
+ build-timeout: '{build-timeout}'
+
+ builders:
+ - shell: !include-raw: include-raw-test-ansible-deb.sh
+
+ triggers:
+ - timed: '@daily'
email-recipients: 'jenkins@lists.opendaylight.org'
odl-ml2-driver-repo: 'https://github.com/openstack/networking-odl'
networking-l2gw-repo: 'http://git.openstack.org/openstack/networking-l2gw'
+ devstack-kubernetes-plugin-repo: 'https://github.com/openstack/kuryr-kubernetes'
+ devstack-lbaas-plugin-repo: 'https://github.com/openstack/neutron-lbaas'
server-name: 'OpenDaylight'
git-url: 'ssh://jenkins-$SILO@git.opendaylight.org:29418'
+ public-bridge: 'br-physnet1'
+ public-physical-network: 'physnet1'
+ odl-enable-l3: 'yes'
+ disable-odl-l3-service-plugin: 'no'
+ odl-ml2-driver-version: 'v1'
+ odl-ml2-port-binding: 'network-topology'
+ enable-openstack-plugins: 'networking-odl'
+ enable-openstack-services: 'q-svc,q-dhcp,q-meta,n-cauth,tempest'
+ disable-openstack-services: 'swift,cinder,n-net,q-vpn,n-cpu'
+ enable-networking-l2gw: 'no'
+ lbaas-service-provider: ''
+ create-initial-networks: 'False'
+ tenant-network-type: 'vxlan'
+ security-group-mode: 'none'
+
# openstack-infra-parameters defaults
archive-artifacts: ''
branch: master
aaa-csit-1node-authn-only-carbon,
aaa-csit-1node-idmlite-all-carbon,
aaa-csit-1node-idmlite-only-carbon,
+ aaa-csit-1node-keystone-all-carbon,
+ aaa-csit-1node-keystone-only-carbon,
alto-csit-1node-setup-all-carbon,
alto-csit-1node-setup-only-carbon,
armoury-csit-1node-basic-all-carbon,
ocpplugin-csit-1node-scalability-only-carbon,
of-config-csit-1node-basic-all-carbon,
of-config-csit-1node-basic-only-carbon,
- openflowplugin-csit-1node-cbench-performance-only-carbon,
- openflowplugin-csit-1node-config-performance-only-carbon,
openflowplugin-csit-1node-flow-services-all-carbon,
openflowplugin-csit-1node-flow-services-frs-only-carbon,
openflowplugin-csit-1node-flow-services-only-carbon,
- openflowplugin-csit-1node-periodic-bulk-matic-ds-daily-only-carbon,
openflowplugin-csit-1node-periodic-bulkomatic-perf-daily-only-carbon,
openflowplugin-csit-1node-periodic-cbench-daily-only-carbon,
openflowplugin-csit-1node-periodic-link-scalability-daily-only-carbon,
openflowplugin-csit-1node-periodic-restconf-perf-daily-only-carbon,
- openflowplugin-csit-1node-periodic-rpc-time-measure-daily-only-carbon,
openflowplugin-csit-1node-periodic-scale-stats-collection-daily-frs-only-carbon,
openflowplugin-csit-1node-periodic-scale-stats-collection-daily-only-carbon,
openflowplugin-csit-1node-periodic-sw-scalability-daily-only-carbon,
+ openflowplugin-csit-1node-sanity-only-carbon,
openflowplugin-csit-1node-scalability-only-carbon,
openflowplugin-csit-3node-clustering-only-carbon,
openflowplugin-csit-3node-periodic-bulkomatic-clustering-daily-only-carbon,
openflowplugin-csit-3node-periodic-bulkomatic-clustering-perf-daily-only-carbon,
openflowplugin-csit-3node-periodic-restconf-clustering-perf-daily-only-carbon,
- ovsdb-csit-1node-periodic-scalability-daily-only-carbon,
- ovsdb-csit-1node-southbound-all-carbon,
- ovsdb-csit-1node-southbound-only-carbon,
- ovsdb-csit-3node-clustering-only-carbon,
+ ovsdb-csit-1node-upstream-southbound-all-carbon,
+ ovsdb-csit-1node-upstream-southbound-only-carbon,
+ ovsdb-csit-3node-upstream-clustering-only-carbon,
packetcable-csit-1node-pcmm-all-carbon,
packetcable-csit-1node-pcmm-only-carbon,
sdninterfaceapp-csit-1node-basic-only-carbon,
sxp-csit-1node-topology-only-carbon,
sxp-csit-3node-periodic-clustering-all-carbon,
sxp-csit-3node-periodic-clustering-only-carbon,
+ sxp-csit-3node-periodic-routing-all-carbon,
+ sxp-csit-3node-periodic-routing-only-carbon,
topoprocessing-csit-1node-topology-operations-all-carbon,
topoprocessing-csit-1node-topology-operations-only-carbon,
tsdr-csit-1node-cassandra-datastore-only-carbon,
+ tsdr-csit-1node-elasticsearch-datastore-only-carbon,
tsdr-csit-1node-hbase-datastore-all-carbon,
tsdr-csit-1node-hbase-datastore-only-carbon,
tsdr-csit-1node-hsqldb-datastore-all-carbon,
vtn-csit-3node-manager-all-carbon,
vtn-csit-3node-manager-only-carbon,
yangtools-csit-1node-periodic-system-only-carbon
+ csit-weekly-list-carbon: >
+ controller-csit-1node-notifications-longevity-only-carbon
csit-list-boron: >
aaa-csit-1node-authn-all-boron,
aaa-csit-1node-authn-only-boron,
ocpplugin-csit-1node-scalability-only-boron,
of-config-csit-1node-basic-all-boron,
of-config-csit-1node-basic-only-boron,
- openflowplugin-csit-1node-cbench-performance-only-boron,
- openflowplugin-csit-1node-config-performance-only-boron,
openflowplugin-csit-1node-flow-services-all-boron,
openflowplugin-csit-1node-flow-services-frs-only-boron,
openflowplugin-csit-1node-flow-services-only-boron,
- openflowplugin-csit-1node-periodic-bulk-matic-ds-daily-only-boron,
openflowplugin-csit-1node-periodic-bulkomatic-perf-daily-only-boron,
openflowplugin-csit-1node-periodic-cbench-daily-only-boron,
openflowplugin-csit-1node-periodic-link-scalability-daily-only-boron,
openflowplugin-csit-1node-periodic-restconf-perf-daily-only-boron,
- openflowplugin-csit-1node-periodic-rpc-time-measure-daily-only-boron,
openflowplugin-csit-1node-periodic-scale-stats-collection-daily-frs-only-boron,
openflowplugin-csit-1node-periodic-scale-stats-collection-daily-only-boron,
openflowplugin-csit-1node-periodic-sw-scalability-daily-only-boron,
+ openflowplugin-csit-1node-sanity-only-boron,
openflowplugin-csit-1node-scalability-only-boron,
openflowplugin-csit-3node-clustering-only-boron,
openflowplugin-csit-3node-periodic-bulkomatic-clustering-daily-only-boron,
openflowplugin-csit-3node-periodic-bulkomatic-clustering-perf-daily-only-boron,
openflowplugin-csit-3node-periodic-restconf-clustering-perf-daily-only-boron,
- ovsdb-csit-1node-periodic-scalability-daily-only-boron,
- ovsdb-csit-1node-southbound-all-boron,
- ovsdb-csit-1node-southbound-only-boron,
- ovsdb-csit-3node-clustering-only-boron,
+ ovsdb-csit-1node-upstream-southbound-all-boron,
+ ovsdb-csit-1node-upstream-southbound-only-boron,
+ ovsdb-csit-3node-upstream-clustering-only-boron,
packetcable-csit-1node-pcmm-all-boron,
packetcable-csit-1node-pcmm-only-boron,
sdninterfaceapp-csit-1node-basic-only-boron,
topoprocessing-csit-1node-topology-operations-all-boron,
topoprocessing-csit-1node-topology-operations-only-boron,
tsdr-csit-1node-cassandra-datastore-only-boron,
+ tsdr-csit-1node-elasticsearch-datastore-only-boron,
tsdr-csit-1node-hbase-datastore-all-boron,
tsdr-csit-1node-hbase-datastore-only-boron,
tsdr-csit-1node-hsqldb-datastore-all-boron,
nic-csit-1node-vtnrenderer-only-beryllium,
of-config-csit-1node-basic-all-beryllium,
of-config-csit-1node-basic-only-beryllium,
- openflowplugin-csit-1node-cbench-performance-only-beryllium,
- openflowplugin-csit-1node-config-performance-only-beryllium,
openflowplugin-csit-1node-flow-services-all-beryllium,
openflowplugin-csit-1node-flow-services-only-beryllium,
- openflowplugin-csit-1node-periodic-bulk-matic-ds-daily-only-beryllium,
openflowplugin-csit-1node-periodic-bulkomatic-perf-daily-only-beryllium,
openflowplugin-csit-1node-periodic-cbench-daily-only-beryllium,
openflowplugin-csit-1node-periodic-link-scalability-daily-only-beryllium,
openflowplugin-csit-1node-periodic-restconf-perf-daily-only-beryllium,
- openflowplugin-csit-1node-periodic-rpc-time-measure-daily-only-beryllium,
openflowplugin-csit-1node-periodic-scale-stats-collection-daily-only-beryllium,
openflowplugin-csit-1node-periodic-sw-scalability-daily-only-beryllium,
openflowplugin-csit-1node-scalability-only-beryllium,
openflowplugin-csit-3node-periodic-bulkomatic-clustering-daily-only-beryllium,
openflowplugin-csit-3node-periodic-bulkomatic-clustering-perf-daily-only-beryllium,
openflowplugin-csit-3node-periodic-restconf-clustering-perf-daily-only-beryllium,
- ovsdb-csit-1node-periodic-scalability-daily-only-beryllium,
- ovsdb-csit-1node-southbound-all-beryllium,
- ovsdb-csit-1node-southbound-only-beryllium,
- ovsdb-csit-3node-clustering-only-beryllium,
+ ovsdb-csit-1node-upstream-southbound-all-beryllium,
+ ovsdb-csit-1node-upstream-southbound-only-beryllium,
+ ovsdb-csit-3node-upstream-clustering-only-beryllium,
packetcable-csit-1node-pcmm-all-beryllium,
packetcable-csit-1node-pcmm-only-beryllium,
sdninterfaceapp-csit-1node-basic-only-beryllium,
topoprocessing-csit-1node-topology-operations-all-beryllium,
topoprocessing-csit-1node-topology-operations-only-beryllium,
tsdr-csit-1node-cassandra-datastore-only-beryllium,
+ tsdr-csit-1node-elasticsearch-datastore-only-beryllium,
tsdr-csit-1node-hbase-datastore-all-beryllium,
tsdr-csit-1node-hbase-datastore-only-beryllium,
tsdr-csit-1node-hsqldb-datastore-all-beryllium,
- 'builder-verify-packer'
- 'builder-merge-packer-{platforms}-{templates}'
# OpenStack Related
+ - 'builder-cleanup-old-images'
- 'builder-delete-stale-stacks'
# Automation for docs and jobs
- 'builder-update-csit-tests'
platforms:
- centos
- ubuntu-14.04
+ - ubuntu-16.04
templates:
- devstack
templates: devstack
- platforms: ubuntu-14.04
templates: docker
+ - platforms: ubuntu-16.04
+ templates: java-builder
+ - platforms: ubuntu-16.04
+ templates: robot
+ - platforms: ubuntu-16.04
+ templates: devstack-pre-pip-mitaka
+ - platforms: ubuntu-16.04
+ templates: devstack-pre-pip-newton
+ - platforms: ubuntu-16.04
+ templates: devstack
+ - platforms: ubuntu-16.04
+ templates: docker
+ - platforms: ubuntu-16.04
+ templates: mininet
+ - platforms: ubuntu-16.04
+ templates: mininet-ovs-2.3
+ - platforms: ubuntu-16.04
+ templates: gbp
+ - platforms: ubuntu-14.04
+ templates: mininet-ovs-2.5
+
project-type: freestyle
archive-artifacts: '**/*.log'
virtualenv $WORKSPACE/venv
source $WORKSPACE/venv/bin/activate
pip install --upgrade pip
- pip freeze
pip install jenkins-job-builder
+ pip freeze
jenkins-jobs -l DEBUG test --recursive -o archives/job_output jjb/
gzip archives/job_output/*
- releng-check-unicode
virtualenv $WORKSPACE/venv
source $WORKSPACE/venv/bin/activate
pip install --upgrade pip
- pip freeze
pip install jenkins-job-builder
+ pip freeze
jenkins-jobs --version
jenkins-jobs update --recursive --delete-old --workers 4 jjb/
virtualenv $WORKSPACE/venv
source $WORKSPACE/venv/bin/activate
pip install --upgrade pip
- pip freeze
pip install jenkins-job-builder
+ pip freeze
jenkins-jobs --flush-cache update --recursive --workers 4 jjb/
publishers:
- opendaylight-infra-shiplogs:
maven-version: 'mvn33'
+- job-template:
+ name: 'builder-cleanup-old-images'
+ project-type: freestyle
+ node: centos7-java-builder-2c-4g
+
+ properties:
+ - opendaylight-infra-properties:
+ build-days-to-keep: 7
+
+ parameters:
+ - opendaylight-infra-parameters:
+ project: '{project}'
+ branch: '{branch}'
+ refspec: 'refs/heads/{branch}'
+ artifacts: '{archive-artifacts}'
+
+ scm:
+ - git-scm:
+ branch: '{branch}'
+
+ wrappers:
+ - opendaylight-infra-wrappers:
+ build-timeout: '{build-timeout}'
+ # Listed after to override openstack-infra-wrappers clouds.yaml definition
+ - config-file-provider:
+ files:
+ - file-id: opendaylight-private-clouds-yaml
+ target: '$HOME/.config/openstack/clouds.yaml'
+
+ triggers:
+ # Cleanup images on a weekly schedule
+ - timed: '@weekly'
+
+ builders:
+ - shell: !include-raw-escape: opendaylight-infra-cleanup-old-images.sh
+
+ publishers:
+ - email-notification:
+ email-recipients: '{email-recipients}'
+ email-prefix: '[releng]'
+ - opendaylight-infra-shiplogs:
+ maven-version: 'mvn33'
+
- job-template:
name: 'builder-update-image-list'
project-type: freestyle
jobs-filename: "csit_jobs_beryllium.lst"
- integration-replace-block-text:
starting-regex: "'csit-list-carbon: >'"
- ending-regex: "'csit-list-boron: >'"
+ ending-regex: "'csit-weekly-list-carbon: >'"
file-with-changes-to-insert: "'csit_jobs_carbon.lst'"
file-to-change: "'jjb/releng-defaults.yaml'"
- integration-replace-block-text:
properties:
- build-discarder:
days-to-keep: '{build-days-to-keep}'
+ num-to-keep: 40
+ artifact-days-to-keep: -1
+ artifact-num-to-keep: 5
- parameter:
name: opendaylight-infra-parameters
description: 'Parameter to identify an ODL Gerrit project'
- string:
name: ARCHIVE_ARTIFACTS
- default: '{artifacts} **/target/surefire-reports/*-output.txt **/hs_err_*.log **/target/feature/feature.xml'
+ default: >
+ {artifacts}
+ **/target/surefire-reports/*-output.txt
+ **/hs_err_*.log
+ **/target/feature/feature.xml
+ **/*.hprof
description: 'Space separated glob patterns for artifacts to archive into logs.opendaylight.org'
- string:
name: GERRIT_PROJECT
server-name: '{server}'
trigger-on:
- patchset-created-event:
- exclude-drafts: 'true'
- exclude-trivial-rebase: 'false'
- exclude-no-code-change: 'true'
+ exclude-drafts: true
+ exclude-trivial-rebase: false
+ exclude-no-code-change: false
- draft-published-event
- comment-added-contains-event:
- comment-contains-value: 'recheck'
+ comment-contains-value: recheck
projects:
- - project-compare-type: 'ANT'
+ - project-compare-type: ANT
project-pattern: '{project}'
branches:
- - branch-compare-type: 'ANT'
+ - branch-compare-type: ANT
branch-pattern: '**/{branch}'
file-paths:
- - compare-type: 'ANT'
+ - compare-type: ANT
pattern: '{files}'
# TODO: Unify argument names across gerrit-trigger-* macros.
- postbuildscript:
builders:
- shell: !include-raw: include-raw-stack-delete.sh
- - shell: !include-raw: include-raw-deploy-archives.sh
+ - shell: !include-raw-escape: include-raw-deploy-archives.sh
- maven-target:
maven-version: '{maven-version}'
pom: '.archives/deploy-archives.xml'
cd packer
export PACKER_LOG="yes" && \
export PACKER_LOG_PATH="packer-build.log" && \
- packer.io build -var-file=$CLOUDENV \
+ packer.io build -color=false -var-file=$CLOUDENV \
-var-file=../packer/vars/{platform}.json \
../packer/templates/{template}.json
files:
- file-id: 'jjbini'
target: '$HOME/.config/jenkins_jobs/jenkins_jobs.ini'
+
+- builder:
+ name: distribution-check-wipe
+ # Step zero: Wipe file repositories up front.
+ builders:
+ - shell: |
+ echo "wipe r: the local Maven repository"
+ rm -rfv /tmp/r
+ echo "wipe n: the fake remote (Nexus) repository"
+ rm -rfv /tmp/n
+ echo "wipe t: the transient repository used in some manipulations"
+ rm -rfv /tmp/t
+
+- builder:
+ name: distribution-check-build-project
+ # Step one: Online build of the project, using local repository /tmp/r/ and deploying artifacts to /tmp/n/.
+ # Ordinary SingleFeatureTest failures are detected in the verify job, so we can use "q" profile here.
+ # Arguments:
+ # pom: Relative path to pom file to use. Typically '$GERRIT_PROJECT/pom.xml'.
+ builders:
+ - maven-target:
+ maven-version: 'mvn33'
+ pom: '{pom}'
+ goals: >
+ clean deploy dependency:tree
+ -DoutputFile=dependency_tree.txt
+ -V -B -Pq
+ -Djenkins
+ -DaltDeploymentRepository=fake-nexus::default::file:///tmp/n/
+ -Dmaven.repo.local=/tmp/r
+ -Dorg.ops4j.pax.url.mvn.localRepository=/tmp/r
+ java-opts:
+ - '-Xmx4096m -XX:MaxPermSize=1024m -Dmaven.compile.fork=true'
+ settings: 'integration-settings'
+ settings-type: cfp
+ global-settings: 'odl-global-settings'
+ global-settings-type: cfp
+
+- builder:
+ name: distribution-check-verify-groupid
+ # Step two: Verify all deployed artifacts belong to the project's groupId.
+ # This is done by moving the allowed directories out of /tmp/n and checking no files remained there.
+ # The correct directory is derived from $GERRIT_PROJECT.
+ # TODO: Verify all deployed artifacts are snapshots.
+ # Arguments:
+ # gerrit-project: Project name as nexus URI part. Typically '$GERRIT_PROJECT'.
+ builders:
+ - shell: |
+ mkdir -p /tmp/t/org/opendaylight/{gerrit-project}
+ mv /tmp/n/org/opendaylight/{gerrit-project}/* /tmp/t/org/opendaylight/{gerrit-project}/
+ test -z "`find /tmp/n/ -type f`" || ( echo "ERROR: Mismatched groupId detected (see above)." && false )
+ rm -rf /tmp/n
+ mv /tmp/t /tmp/n
+
+- builder:
+ name: distribution-check-download-deps
+ # Step three: Online build of integration distribution.
+ # This step is mainly used for downloading other project artifacts.
+ # Running SingleFeaturesTest here does not seem to be required, so -Pq is used again.
+ # Arguments:
+ # dist-pom: Relative path to pom file to use. 'distribution/pom.xml' is recommended.
+ builders:
+ - maven-target:
+ maven-version: 'mvn33'
+ pom: '{dist-pom}'
+ goals: >
+ clean install dependency:tree
+ -DoutputFile=dependency_tree.txt
+ -V -B -Pq
+ -Djenkins
+ -Dmaven.repo.local=/tmp/r
+ -Dorg.ops4j.pax.url.mvn.localRepository=/tmp/r
+ java-opts:
+ - '-Xmx1024m -XX:MaxPermSize=256m -Dmaven.compile.fork=true'
+ settings: 'integration-settings'
+ settings-type: cfp
+ global-settings: 'odl-global-settings'
+ global-settings-type: cfp
+
+- builder:
+ name: distribution-check-delete-snapshots
+ # Step four: Delete snapshot artifacts from the local repository.
+ # This is critical to detect orphaned artifacts or missing project-internal dependency declarations.
+ # Also other files related to maven repository resolution are removed,
+ # and then empty directories are removed, in order to simplify debugging.
+ builders:
+ - shell: !include-raw-escape: integration-distribution-delete-snaphot-artifacts.sh
+
+- builder:
+ name: distribution-check-configure-remotes
+ # Now the ugly part. It seems that the only way to tell Maven 2+
+ # which remote repositories to use is via settings.xml file.
+ # So we create such a file here, but it needs most of odlparent:settings.xml
+ builders:
+ - shell: |
+ echo '
+ <settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
+ https://maven.apache.org/xsd/settings-1.0.0.xsd">
+ <profiles>
+ <profile>
+ <id>opendaylight-release</id>
+ <repositories>
+ <repository>
+ <id>opendaylight-mirror</id>
+ <name>opendaylight</name>
+ <url>https://nexus.opendaylight.org/content/repositories/public/</url>
+ <releases><updatePolicy>never</updatePolicy></releases>
+ <snapshots><enabled>false</enabled></snapshots>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>opendaylight-plugin-mirror</id>
+ <name>opendaylight-plugin</name>
+ <url>https://nexus.opendaylight.org/content/repositories/public/</url>
+ <releases><updatePolicy>never</updatePolicy></releases>
+ <snapshots><enabled>false</enabled></snapshots>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ <profile>
+ <id>file-snapshots</id>
+ <repositories>
+ <repository>
+ <id>file-snapshots</id>
+ <name>file</name>
+ <url>file:///tmp/n/</url>
+ <releases><enabled>false</enabled></releases>
+ </repository>
+ </repositories>
+ <pluginRepositories>
+ <pluginRepository>
+ <id>file-plugin-snapshots</id>
+ <name>file-plugin</name>
+ <url>file:///tmp/n/</url>
+ <releases><enabled>false</enabled></releases>
+ </pluginRepository>
+ </pluginRepositories>
+ </profile>
+ </profiles>
+ <activeProfiles>
+ <activeProfile>file-snapshots</activeProfile>
+ <activeProfile>opendaylight-release</activeProfile>
+ </activeProfiles>
+ </settings>
+ ' > fake_remotes.xml
+ # Notes: The settings are minimal in order to detect breakage scenarios,
+ # while allowing for the following quirks:
+ # * Some plugins seem to have hardcoded repos, for example check-license looks at repository.apache.org
+ # * Some plugin artifacts (related to surefire) are not downloaded when tests are skipped.
+ # * populate-local-repo looks at oss.sonatype.org and does not store things (like guava) to /tmp/r
+
+- builder:
+ name: distribution-check-repeat-build
+ # Step five: Repeat the distribution build but with the new settings.
+ # Here, only the project snapshot artifacts deployed to /tmp/n are available,
+ # which faithfully reproduces conditions in later verify-like job runs.
+ # We cannot use --offline, because: "Cannot access file (file:///tmp/n) in offline mode"
+ # This is where SingleFeatureTest is not skipped.
+ # Arguments:
+ # dist-pom: Relative path to pom file to use. 'distribution/pom.xml' is recommended.
+ builders:
+ - maven-target:
+ maven-version: 'mvn33'
+ pom: '{dist-pom}'
+ goals: >
+ clean install dependency:tree
+ -DoutputFile=dependency_tree.txt -s fake_remotes.xml
+ -V -B -Pq
+ -DskipTests=false
+ -Djenkins
+ -Dmaven.repo.local=/tmp/r
+ -Dorg.ops4j.pax.url.mvn.localRepository=/tmp/r
+ java-opts:
+ - '-Xmx1024m -XX:MaxPermSize=256m -Dmaven.compile.fork=true'
files: '**'
builders:
- # Step zero: Wipe file repositories up front.
- - shell: |
- echo "wipe r: the local Maven repository"
- rm -rfv /tmp/r
- echo "wipe n: the fake remote (Nexus) repository"
- rm -rfv /tmp/n
- echo "wipe t: the transient repository used in some manipulations"
- rm -rfv /tmp/t
- # Step one: Online build of the project, using local repository /tmp/r/ and deploying artifacts to /tmp/n/.
- # Ordinary SingleFeatureTest failures are detected in the verify job, so we can use "q" profile here.
- - maven-target:
- maven-version: 'mvn33'
+ - distribution-check-wipe
+ - distribution-check-build-project:
pom: '$GERRIT_PROJECT/pom.xml'
- goals: >
- clean deploy dependency:tree
- -DoutputFile=dependency_tree.txt
- -V -B -Pq
- -Djenkins
- -Dstream={stream}
- -DaltDeploymentRepository=fake-nexus::default::file:///tmp/n/
- -Dmaven.repo.local=/tmp/r
- -Dorg.ops4j.pax.url.mvn.localRepository=/tmp/r
- java-opts:
- - '-Xmx4096m -XX:MaxPermSize=1024m -Dmaven.compile.fork=true'
- settings: '{mvn-settings}'
- settings-type: cfp
- global-settings: 'odl-global-settings'
- global-settings-type: cfp
- # Step two: Verify all deployed artifacts belong to the project's groupId.
- # This is done by moving the allowed directories out of /tmp/n and checking no files remained there.
- # The correct directory is derived from $GERRIT_PROJECT.
- - shell: |
- mkdir -p /tmp/t/org/opendaylight/$GERRIT_PROJECT
- mv /tmp/n/org/opendaylight/$GERRIT_PROJECT/* /tmp/t/org/opendaylight/$GERRIT_PROJECT/
- test `find /tmp/n/ -type f | wc -l` -eq 0
- rm -rf /tmp/n
- mv /tmp/t /tmp/n
- # Step three: Online build of integration distribution.
- # This step is mainly used for downloading other project artifacts.
- # Running SingleFeaturesTest here does not seem to be required, so -Pq is used again.
- - maven-target:
- maven-version: 'mvn33'
- pom: 'distribution/pom.xml'
- goals: >
- clean install dependency:tree
- -DoutputFile=dependency_tree.txt
- -V -B -Pq
- -Djenkins
- -Dstream={stream}
- -Dmaven.repo.local=/tmp/r
- -Dorg.ops4j.pax.url.mvn.localRepository=/tmp/r
- java-opts:
- - '-Xmx1024m -XX:MaxPermSize=256m -Dmaven.compile.fork=true'
- settings: '{mvn-settings}'
- settings-type: cfp
- global-settings: 'odl-global-settings'
- global-settings-type: cfp
- # Step four: Delete snapshot artifacts from the local repository.
- # This is critical to detect orphaned artifacts or missing project-internal dependency declarations.
- # Also other files related to maven repository resolution are removed,
- # and then empty directories are removed, in order to simplify debugging.
- - shell: !include-raw-escape: integration-distribution-delete-snaphot-artifacts.sh
- # Now the ugly part. It seems that the only way to tell Maven 2+
- # which remote repositories to use is via settings.xml file.
- # So we create such a file here, but it needs most of odlparent:settings.xml
- - shell: |
- echo '
- <settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
- https://maven.apache.org/xsd/settings-1.0.0.xsd">
- <profiles>
- <profile>
- <id>opendaylight-release</id>
- <repositories>
- <repository>
- <id>opendaylight-mirror</id>
- <name>opendaylight</name>
- <url>https://nexus.opendaylight.org/content/repositories/public/</url>
- <releases><updatePolicy>never</updatePolicy></releases>
- <snapshots><enabled>false</enabled></snapshots>
- </repository>
- </repositories>
- <pluginRepositories>
- <pluginRepository>
- <id>opendaylight-plugin-mirror</id>
- <name>opendaylight-plugin</name>
- <url>https://nexus.opendaylight.org/content/repositories/public/</url>
- <releases><updatePolicy>never</updatePolicy></releases>
- <snapshots><enabled>false</enabled></snapshots>
- </pluginRepository>
- </pluginRepositories>
- </profile>
- <profile>
- <id>file-snapshots</id>
- <repositories>
- <repository>
- <id>file-snapshots</id>
- <name>file</name>
- <url>file:///tmp/n/</url>
- <releases><enabled>false</enabled></releases>
- </repository>
- </repositories>
- <pluginRepositories>
- <pluginRepository>
- <id>file-plugin-snapshots</id>
- <name>file-plugin</name>
- <url>file:///tmp/n/</url>
- <releases><enabled>false</enabled></releases>
- </pluginRepository>
- </pluginRepositories>
- </profile>
- </profiles>
- <activeProfiles>
- <activeProfile>file-snapshots</activeProfile>
- <activeProfile>opendaylight-release</activeProfile>
- </activeProfiles>
- </settings>
- ' > fake_remotes.xml
- # # Notes: The settings are minimal in order to detect breakage scenarios while allowing for the following quirks:
- # # * Some plugins seem to have hardcoded repos, for example check-license looks at repository.apache.org
- # # * Some plugin artifacts (related to surefire) are not downloaded when tests are skipped.
- # # * populate-local-repo looks at oss.sonatype.org and does not store things (like guava) to /tmp/r
- # Step five: Repeat the distribution build but with the new settings.
- # Here, only the project snapshot artifacts deployed to /tmp/n are available,
- # which faithfully reproduces conditions in later verify-like job runs.
- # We cannot use --offline, because: "Cannot access file (file:///tmp/n) in offline mode"
- # This is where SingleFeatureTest is not skipped.
- - maven-target:
- maven-version: 'mvn33'
- pom: 'distribution/pom.xml'
- goals: >
- clean install dependency:tree
- -DoutputFile=dependency_tree.txt -s fake_remotes.xml
- -V -B -Pq
- -DskipTests=false
- -Djenkins
- -Dstream={stream}
- -Dmaven.repo.local=/tmp/r
- -Dorg.ops4j.pax.url.mvn.localRepository=/tmp/r'
- java-opts:
- - '-Xmx1024m -XX:MaxPermSize=256m -Dmaven.compile.fork=true'
- # Step six: Run Karaf and verify no critical failures are present.
+ - distribution-check-verify-groupid:
+ gerrit-project: '$GERRIT_PROJECT'
+ - distribution-check-download-deps:
+ dist-pom: 'distribution/pom.xml'
+ - distribution-check-delete-snapshots
+ - distribution-check-configure-remotes
+ - distribution-check-repeat-build:
+ dist-pom: 'distribution/pom.xml'
- integration-distribution-check
publishers:
name: tox_builder_verify
# Required Variables:
- # stream: release stream (eg. boron or carbon)
# branch: git branch (eg. stable/boron or master)
# toxdir: directory containing the project's tox.ini relative to
# the workspace. Empty works if tox.ini is at project root.
+ # Note that stream value is not needed.
project-type: freestyle
node: '{build-node}'
<<: *tox_builder_verify
+# Specification for projects outside release cycle, to avoid referring a stream.
+# This is useful mailny for integration-test-jobs, which defines streams for Integration/Distribution,
+# but the tox job is related to Integration/Test only.
+
+- job-template:
+ name: '{project-name}-verify-tox-master'
+
+ # Job template for verify jobs executing tox envlist
+ #
+ # The purpose of this job template is to run tox for projects using this
+ # template.
+ #
+ # Required Variables:
+ # toxdir: directory containing the project's tox.ini relative to
+ # the workspace. Empty works if tox.ini is at project root.
+
+ branch: master
+
+ <<: *tox_builder_verify
+
# Python (pep8) specific, project should migrate to {project-name}-verify-tox-{stream}
- job-template:
-jenkins-job-builder==1.6.1
+jenkins-job-builder==1.6.2
--- /dev/null
+---
+- project:
+ name: sfc-patch-test
+ jobs:
+ - '{project}-patch-test-{feature}-{stream}'
+
+ # The project name
+ project: 'sfc'
+
+ # Project branches
+ stream:
+ - carbon:
+ branch: 'master'
+ jdk: 'openjdk8'
+
+ feature:
+ - basic:
+ csit-list: >
+ sfc-csit-3node-rest-basic-all-{stream}
+
+ - full:
+ csit-list: >
+ sfc-csit-3node-docker-full-deploy-all-{stream}
echo "---> Restart spectrometer-api"
pkill gunicorn
sleep 5 # Sleep 5 seconds for cooldown
+ spectrometer server sync
nohup gunicorn -b 0.0.0.0:5000 'spectrometer:run_app()' --timeout 600 -k gevent >> ~/gunicorn.out &
sleep 120 # Sleep 2 minutes to allow Spectrometer to initialize
--- /dev/null
+---
+- project:
+ name: sxp-csit-routing
+ jobs:
+ - '{project}-csit-3node-periodic-{functionality}-{install}-{stream}'
+
+ # The project name
+ project: 'sxp'
+
+ # The functionality under test
+ functionality: 'routing'
+
+ # Project branches
+ stream:
+ - carbon:
+ branch: 'master'
+ jre: 'openjdk8'
+
+ install:
+ - only:
+ scope: 'only'
+ - all:
+ scope: 'all'
+
+ # Features to install
+ install-features: 'odl-restconf,odl-sxp-controller,odl-sxp-route'
+
+ # Tools VMs:
+ tools_system_count: 2
+
+ # Robot custom options:
+ robot-options: ''
--- /dev/null
+---
+- project:
+ name: tsdr-csit-elasticsearch
+ jobs:
+ - '{project}-csit-1node-{functionality}-{install}-{stream}'
+ - '{project}-csit-verify-1node-{functionality}'
+
+ # The project name
+ project: 'tsdr'
+
+ # The functionality under test to verify
+ functionality: 'elasticsearch-datastore'
+
+ # Project branches
+ stream:
+ - carbon:
+ branch: 'master'
+ jre: 'openjdk8'
+
+ install:
+ - only:
+ scope: 'only'
+
+ # Features to install
+ install-features: >
+ odl-restconf,
+ odl-tsdr-elasticsearch,
+ odl-tsdr-openflow-statistics-collector,
+ odl-tsdr-netflow-statistics-collector,
+ odl-tsdr-syslog-collector
+
+ # Robot custom options
+ robot-options: ''
- mitaka:
openstack-branch: 'stable/mitaka'
odl-ml2-branch: 'stable/mitaka'
- odl-ml2-driver-version: 'v1'
- openstack_system_image: 'CentOS 7 - devstack-mitaka - 20170210-1356'
+ openstack_system_image: 'CentOS 7 - devstack-mitaka - 20170314-2255'
- newton:
openstack-branch: 'stable/newton'
odl-ml2-branch: 'stable/newton'
- odl-ml2-driver-version: 'v1'
- openstack_system_image: 'CentOS 7 - devstack-newton - 20170210-1344'
+ openstack_system_image: 'CentOS 7 - devstack-newton - 20170314-2256'
schedule: ''
disable-openstack-services: 'n-net,q-l3'
tenant-network-type: 'local'
-
- public-physical-network: 'physnet1'
-
- enable-networking-l2gw: 'no'
-
- disable-odl-l3-service-plugin: 'no'
-
- security-group-mode: 'none'
-
- robot-options: ''
+---
heat_template_version: 2016-04-08
parameters:
vm_0_group:
type: "OS::Heat::ResourceGroup"
properties:
- count: { get_param: vm_0_count }
+ count: {get_param: vm_0_count}
resource_def:
type: generic-server.yaml
properties:
- job_name: { get_param: job_name }
- silo: { get_param: silo }
- ssh_key: { get_param: silo }
+ job_name: {get_param: job_name}
+ silo: {get_param: silo}
+ ssh_key: {get_param: silo}
index: "%index%"
- vm_flavor: { get_param: vm_0_flavor }
- vm_image: { get_param: vm_0_image }
+ vm_flavor: {get_param: vm_0_flavor}
+ vm_image: {get_param: vm_0_image}
vm_1_group:
type: "OS::Heat::ResourceGroup"
properties:
- count: { get_param: vm_1_count }
+ count: {get_param: vm_1_count}
resource_def:
type: generic-server.yaml
properties:
- job_name: { get_param: job_name }
- silo: { get_param: silo }
- ssh_key: { get_param: silo }
+ job_name: {get_param: job_name}
+ silo: {get_param: silo}
+ ssh_key: {get_param: silo}
index: "%index%"
- vm_flavor: { get_param: vm_1_flavor }
- vm_image: { get_param: vm_1_image }
+ vm_flavor: {get_param: vm_1_flavor}
+ vm_image: {get_param: vm_1_image}
outputs:
vm_0_ips:
description: IP addresses of the 1st vm types
- value: { get_attr: [vm_0_group, ip] }
+ value: {get_attr: [vm_0_group, ip]}
vm_1_ips:
description: IP addresses of the 2nd vm types
- value: { get_attr: [vm_1_group, ip] }
+ value: {get_attr: [vm_1_group, ip]}
+---
heat_template_version: 2016-04-08
parameters:
vm_0_group:
type: "OS::Heat::ResourceGroup"
properties:
- count: { get_param: vm_0_count }
+ count: {get_param: vm_0_count}
resource_def:
type: generic-server.yaml
properties:
- job_name: { get_param: job_name }
- silo: { get_param: silo }
- ssh_key: { get_param: silo }
+ job_name: {get_param: job_name}
+ silo: {get_param: silo}
+ ssh_key: {get_param: silo}
index: "%index%"
- vm_flavor: { get_param: vm_0_flavor }
- vm_image: { get_param: vm_0_image }
+ vm_flavor: {get_param: vm_0_flavor}
+ vm_image: {get_param: vm_0_image}
vm_1_group:
type: "OS::Heat::ResourceGroup"
properties:
- count: { get_param: vm_1_count }
+ count: {get_param: vm_1_count}
resource_def:
type: generic-server.yaml
properties:
- job_name: { get_param: job_name }
- silo: { get_param: silo }
- ssh_key: { get_param: silo }
+ job_name: {get_param: job_name}
+ silo: {get_param: silo}
+ ssh_key: {get_param: silo}
index: "%index%"
- vm_flavor: { get_param: vm_1_flavor }
- vm_image: { get_param: vm_1_image }
+ vm_flavor: {get_param: vm_1_flavor}
+ vm_image: {get_param: vm_1_image}
vm_2_group:
type: "OS::Heat::ResourceGroup"
properties:
- count: { get_param: vm_2_count }
+ count: {get_param: vm_2_count}
resource_def:
type: generic-server.yaml
properties:
- job_name: { get_param: job_name }
- silo: { get_param: silo }
- ssh_key: { get_param: silo }
+ job_name: {get_param: job_name}
+ silo: {get_param: silo}
+ ssh_key: {get_param: silo}
index: "%index%"
- vm_flavor: { get_param: vm_2_flavor }
- vm_image: { get_param: vm_2_image }
+ vm_flavor: {get_param: vm_2_flavor}
+ vm_image: {get_param: vm_2_image}
outputs:
vm_0_ips:
description: IP addresses of the 1st vm types
- value: { get_attr: [vm_0_group, ip] }
+ value: {get_attr: [vm_0_group, ip]}
vm_1_ips:
description: IP addresses of the 2nd vm types
- value: { get_attr: [vm_1_group, ip] }
+ value: {get_attr: [vm_1_group, ip]}
vm_2_ips:
description: IP addresses of the 3rd vm types
- value: { get_attr: [vm_2_group, ip] }
+ value: {get_attr: [vm_2_group, ip]}
+---
heat_template_version: 2016-04-08
parameters:
instance:
type: "OS::Nova::Server"
properties:
- flavor: { get_param: vm_flavor }
- image: { get_param: vm_image }
+ flavor: {get_param: vm_flavor}
+ image: {get_param: vm_image}
name:
str_replace:
template: SILO-JOB_NAME-VM_TYPE-INDEX
params:
- "SILO": { get_param: silo }
- "JOB_NAME": { get_param: job_name }
+ "SILO": {get_param: silo}
+ "JOB_NAME": {get_param: job_name}
"VM_TYPE":
str_split:
- ' - '
- - { get_param: vm_image }
+ - {get_param: vm_image}
- 1
- "INDEX": { get_param: index }
+ "INDEX": {get_param: index}
networks:
- network: RC-ODL
- key_name: { get_param: ssh_key }
+ key_name: {get_param: ssh_key}
user_data: |
#!/bin/bash
- until ping -c1 git.opendaylight.org &>/dev/null; do echo "Waiting until git.opendaylight.org is resolvable..."; done
+ until ping -c1 git.opendaylight.org &>/dev/null
+ do
+ echo "Waiting until git.opendaylight.org is resolvable..."
+ done
git clone https://git.opendaylight.org/gerrit/releng/builder /builder
/builder/jenkins-scripts/jenkins-init-script.sh
outputs:
ip:
description: IP address of the instance
- value: { get_attr: [instance, networks, RC-ODL, 0] }
+ value: {get_attr: [instance, networks, RC-ODL, 0]}
# force any errors to cause the script and job to end in failure
set -xeu -o pipefail
+ensure_kernel_install() {
+ # Workaround for mkinitrd failing on occassion.
+ # On CentOS 7 it seems like the kernel install can fail it's mkinitrd
+ # run quietly, so we may not notice the failure. This script retries for a
+ # few times before giving up.
+ initramfs_ver=$(rpm -q kernel | tail -1 | sed "s/kernel-/initramfs-/")
+ grub_conf="/boot/grub/grub.conf"
+ # Public cloud does not use /boot/grub/grub.conf and uses grub2 instead.
+ if [ ! -e "$grub_conf" ]; then
+ echo "$grub_conf not found. Using Grub 2 conf instead."
+ grub_conf="/boot/grub2/grub.cfg"
+ fi
+
+ for i in $(seq 3); do
+ if grep "$initramfs_ver" "$grub_conf"; then
+ break
+ fi
+ echo "Kernel initrd missing. Retrying to install kernel..."
+ yum reinstall -y kernel
+ done
+ if ! grep "$initramfs_ver" "$grub_conf"; then
+ cat /boot/grub/grub.conf
+ echo "ERROR: Failed to install kernel."
+ exit 1
+ fi
+}
+
rh_systems() {
# Handle the occurance where SELINUX is actually disabled
SELINUX=$(grep -E '^SELINUX=(disabled|permissive|enforcing)$' /etc/selinux/config)
echo "---> Updating operating system"
yum clean all
yum install -y deltarpm
-
- # Workaround for kernel panic issue that appears sometimes after kernel update
- # https://www.centos.org/forums/viewtopic.php?t=22425
- yum remove -y kernel
yum update -y
- yum install -y kernel
+
+ ensure_kernel_install
# add in components we need or want on systems
echo "---> Installing base packages"
fi
;;
RedHat|CentOS)
- if [ "$(echo $FACTER_OSVER | cut -d'.' -f1)" -ge "7" ]
+ if [ "$(echo "$FACTER_OSVER" | cut -d'.' -f1)" -ge "7" ]
then
echo "---> not modifying java alternatives as OpenJDK 1.7.0 does not exist"
else
EOF
# Add hostname to /etc/hosts to fix 'unable to resolve host' issue with sudo
- sed -i "/127.0.0.1/s/$/ `hostname`/" /etc/hosts
+ sed -i "/127.0.0.1/s/$/ $(hostname)/" /etc/hosts
echo "---> Updating operating system"
# Use retry loop to install packages for failing mirrors
for i in {0..5}
do
+ echo "Attempt $i of installing base packages..."
apt-get clean
apt-get update -m
apt-get upgrade -m
for pkg in unzip xz-utils puppet git git-review libxml-xpath-perl
do
+ # shellcheck disable=SC2046
if [ $(dpkg-query -W -f='${Status}' $pkg 2>/dev/null | grep -c "ok installed") -eq 0 ]; then
- apt-get install $pkg;
+ apt-cache policy $pkg
+ apt-get install $pkg
fi
done
done
pip install -c requirements/upper-constraints.txt -r ${proj}/test-requirements.txt
done
-echo '---> Installing openvswitch from openstack repo'
-# the newton release has ovs 2.5.0
-yum install -y http://rdoproject.org/repos/openstack-newton/rdo-release-newton.rpm
+if [ "$branch" == "stable/mitaka" ] || [ "$branch" == "stable/liberty" ]; then
+ # the newton release has ovs 2.5.0
+ echo '---> Installing openvswitch from openstack Newton repo (2.5.0)'
+ yum install -y http://rdoproject.org/repos/openstack-newton/rdo-release-newton.rpm
+else
+ # the ocata release has ovs 2.6.1
+ echo '---> Installing openvswitch from openstack Ocata repo (2.6.1)'
+ yum install -y http://rdoproject.org/repos/openstack-ocata/rdo-release-ocata.rpm
+fi
+
yum install -y --nogpgcheck openvswitch
cd $OLDPWD
# vim: sw=4 ts=4 sts=4 et tw=72 :
-# force any errors to cause the script and job to end in failure
+# Force any errors to cause the script and job to end in failure
set -xeu -o pipefail
# The following packages are not needed by all projects, but they are
{readline,unixODBC}-devel yum-utils fedora-packager \
libxslt-devel crudini
-#Install python3 and dependencies
+# Install python3 and dependencies, needed for Coala linting at least
yum install -y python34
yum install -y python34-{devel,virtualenv,setuptools,pip}
-# Install python dependencies
+# Install python dependencies, useful generally
yum install -y python-{devel,virtualenv,setuptools,pip}
# Needed by autorelease scripts
yum install -y xmlstarlet
-# sshpass for the current deploy test to be runable immediatelly after
-# build
+# Needed by docs project
+yum install -y graphviz
+
+# Needed by deploy test
yum install -y sshpass
# tcpmd5 is wanting to do 32bit ARM cross-compilation and is specifically
# x86_64 packages for them
yum install -y glibc-devel.i686 kernel-headers
-# The following is needed by opendove, if this is to be perfomed against
-# an EL6 system some of these packages are not availalble (or at the
-# wrong version) in publically available repositories as such this
-# should only really be done on an EL7 (or F18+) system
+# Needed by opendove
yum install -y {jansson,libevent,libnl,libuuid}-devel
-#The following is needed for the vsemprovider build in vtn project.
-#these packages will enable C# compilation.
+# Needed for vsemprovider build in vtn project to enable C# compilation.
rpm --import "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF"
-#Added the mono tools repository
+# Add the mono tools repository
yum-config-manager -y --add-repo http://download.mono-project.com/repo/centos/
-#install the mono toolchain and nuget
+# Install the mono toolchain and nuget
yum -y install mono-complete nuget
-#end changes for vsemprovider in VTN
-
-# TSDR dependencies
+# Needed by TSDR
echo "Installing the Hbase Server..."
mkdir /tmp/Hbase
cd /tmp/Hbase
wget --no-verbose http://apache.osuosl.org/hbase/hbase-0.94.27/hbase-0.94.27.tar.gz
tar -xvf hbase-0.94.27.tar.gz
+# Needed by TSDR
echo "Installing the Cassandra Server..."
mkdir /tmp/cassandra
cd /tmp/cassandra
wget --no-verbose http://apache.osuosl.org/cassandra/2.1.16/apache-cassandra-2.1.16-bin.tar.gz
tar -xvf apache-cassandra-2.1.16-bin.tar.gz
-
# Generally useful for all projects
echo "Installing the Elasticsearch node..."
mkdir /tmp/elasticsearch
wget --no-verbose https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.7.5.tar.gz
tar -xvzf elasticsearch-1.7.5.tar.gz
-
-# The following installs hashicorp's packer binary which is required for
-# the {verify,merge}-packer jobs
+# Installs Hashicorp's Packer binary, required for {verify,merge}-packer jobs
mkdir /tmp/packer
cd /tmp/packer
wget https://releases.hashicorp.com/packer/0.12.2/packer_0.12.2_linux_amd64.zip
unzip packer_0.12.2_linux_amd64.zip -d /usr/local/bin/
# rename packer to avoid conflict with binary in cracklib
mv /usr/local/bin/packer /usr/local/bin/packer.io
+
-#!/bin/bash
+#!/bin/bash -x
# vim: sw=4 ts=4 sts=4 et tw=72 :
echo 'PS1="[\u@\h \W]> "' >> /etc/skel/.bashrc
echo '---> Install OpenVSwitch 2.5.0'
-add-apt-repository -y ppa:sgauthier/openvswitch-dpdk
-apt-get update -y --force-yes
-apt-get install -y --force-yes openvswitch-switch openvswitch-vtep
-
-echo '---> Installing mininet 2.2.1'
-git clone git://github.com/mininet/mininet
-cd mininet
-git checkout -b 2.2.1 2.2.1
-cd ..
-mininet/util/install.sh -nf
-
-echo '---> Installing MT-Cbench'
-apt-get install -y --force-yes build-essential snmp libsnmp-dev snmpd libpcap-dev \
+apt-get update -m
+apt-get install openvswitch-switch openvswitch-vtep
+
+echo '---> Installing mininet'
+apt-get install mininet
+
+echo '---> Installing build pre-requisites'
+apt-get install build-essential snmp libsnmp-dev snmpd libpcap-dev \
autoconf make automake libtool libconfig-dev libssl-dev libffi-dev libssl-doc pkg-config
+
git clone https://github.com/intracom-telecom-sdn/mtcbench.git
-mtcbench/build_mtcbench.sh
-cp mtcbench/oflops/cbench/cbench /usr/local/bin/
+mtcbench/deploy/docker/provision.sh
+# TODO: remove workaround for build issue with mtcbench
+# when mtcbench dependency build correctly
+# https://github.com/intracom-telecom-sdn/mtcbench/issues/10
+mtcbench/build_mtcbench.sh || true
+cd mtcbench/oflops/cbench
+make
+cp cbench /usr/local/bin/
echo '---> Installing exabgp'
-apt-get install -y --force-yes exabgp
+apt-get install exabgp
echo '---> All Python package installation should happen in virtualenv'
-apt-get install -y --force-yes python-virtualenv python-pip
+apt-get install python-virtualenv python-pip
# Install vlan for vlan based tests in VTN suites
-apt-get install -y --force-yes -qq vlan
+apt-get install vlan
# Install netaddr package which is needed by some custom mininet topologies
-apt-get install -y --force-yes -qq python-netaddr
+apt-get install python-netaddr
# Install minimal python requirements to get virtualenv going
# Additional python dependencies should be installed via JJB configuration
# inside project jobs using a virtualenv setup.
-yum install -y python-{devel,setuptools,virtualenv} @development
+yum install -y @development \
+ python-devel \
+ python-setuptools \
+ python-virtualenv
+
+# TODO: Move docker-py and netaddr to virtualenv in the csit jobs.
+yum install -y python-docker-py \
+ python-netaddr
# Install dependencies for robotframework and robotframework-sshlibrary
# installed elsewhere
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"provisioners": [
{
"type": "shell",
- "inline": ["mkdir -p /tmp/packer"]
+ "inline": [
+ "mkdir -p /tmp/packer"
+ ]
},
{
"type": "file",
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
"public_tenant": null,
"public_user": null,
"public_pass": null,
-
"private_base_image": null,
"private_network": null,
"private_cloud_user": null,
"private_tenant": null,
"private_user": null,
"private_pass": null,
-
"distro": null,
"cloud_user_data": null
},
{
"public_base_image": "CentOS 7 (PVHVM)",
"public_cloud_user": "root",
-
"private_base_image": "CentOS 7 (cloudimg 1510)",
"private_cloud_user": "centos",
-
"distro": "CentOS 7",
"cloud_user_data": "provision/rh-user_data.sh"
}
{
"public_auth_url": "https://identity.api.rackspacecloud.com/v2.0/",
-
"public_tenant": "TENNANTID",
"public_user": "USERID",
"public_pass": "USERPASS",
"public_network": "cac67a72-aefc-48f8-ae55-9affa3540dd0",
-
"private_auth_url": "https://privapi.opendaylight.org:5000/v2.0",
-
"private_tenant": "TENNANTID",
"private_user": "USERID",
"private_pass": "USERPASS",
{
"public_base_image": "Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)",
"public_cloud_user": "root",
-
"private_base_image": "Ubuntu 14.04 LTS Trusty Tahr (cloudimg)",
"private_cloud_user": "ubuntu",
-
"distro": "Ubuntu 14.04",
"cloud_user_data": "provision/null_data.sh"
}
{
"public_base_image": "Ubuntu 16.04 LTS (Xenial Xerus) (PVHVM)",
"public_cloud_user": "root",
-
"private_base_image": "Ubuntu 16.04 LTS (2016-05-03 cloudimg)",
"private_cloud_user": "ubuntu",
-
"distro": "Ubuntu 16.04",
"cloud_user_data": "provision/null_data.sh"
}
# Thanh Ha (The Linux Foundation) - Initial implementation
##############################################################################
-directory="."
-if [ ! -z "$1" ]; then
- directory="$1"
-fi
+directory=${1:-"."}
echo "Scanning $directory"
-for x in $(find $directory -type f); do
- if LC_ALL=C grep -q '[^[:print:][:space:]]' "$x"; then
- echo "file "$x" contains non-ascii characters"
- exit 1
- fi
-done
+if LC_ALL=C grep -r '[^[:print:][:space:]]' "$directory"; then
+ echo "Found files containing non-ascii characters."
+ exit 1
+fi
echo "All files are ASCII only"
+
search_string=$1
echo -n "Enter system (sandbox|releng): "
-read system
+read -r system
echo -n "Enter username: "
-read username
+read -r username
echo -n "Enter api_token: "
-read password
+read -r password
-echo $username:$password
+echo "$username:$password"
-wget -O jenkins-jobs.xml https://jenkins.opendaylight.org/$system/api/xml
+wget -O jenkins-jobs.xml "https://jenkins.opendaylight.org/$system/api/xml"
-jobs=`xmlstarlet sel -t -m '//hudson/job' \
+jobs=$(xmlstarlet sel -t -m '//hudson/job' \
-n -v 'name' jenkins-jobs.xml | \
- grep ${search_string}`
+ grep "$search_string")
-for job in `echo $jobs | tr "\n" " "`; do
+for job in $(echo "$jobs" | tr "\n" " "); do
echo "Deleting $job"
curl -X POST "https://$username:$password@jenkins.opendaylight.org/$system/job/${job}/doDelete"
done
replace_string=$2
echo -n "Enter system (sandbox|releng): "
-read system
+read -r system
echo -n "Enter username: "
-read username
+read -r username
echo -n "Enter api_token: "
-read password
+read -r password
-echo $username:$password
+echo "$username:$password"
-wget -O jenkins-jobs.xml https://jenkins.opendaylight.org/$system/api/xml
+wget -O jenkins-jobs.xml "https://jenkins.opendaylight.org/$system/api/xml"
-jobs=`xmlstarlet sel -t -m '//hudson/job' \
+jobs=$(xmlstarlet sel -t -m '//hudson/job' \
-n -v 'name' jenkins-jobs.xml | \
- grep ${search_string}`
+ grep "$search_string")
-for job in `echo $jobs | tr "\n" " "`; do
- new_job=`echo $job | sed -e "s/${search_string}/${replace_string}/"`
+for job in $(echo "$jobs" | tr "\n" " "); do
+ new_job="${job//$search_string/$replace_string}"
echo "Renaming $job to $new_job"
- curl --data "newName=${new_job}" "https://$username:$password@jenkins.opendaylight.org/$system/job/${job}/doRename"
+ #curl --data "newName=${new_job}" "https://$username:$password@jenkins.opendaylight.org/$system/job/${job}/doRename"
done
[tox]
minversion = 1.6
-envlist = docs,pep8,yamllint
+envlist = coala,docs,pep8
skipsdist = true
+[testenv:coala]
+basepython = python3
+deps =
+ coala
+ coala-bears
+commands =
+ python3 -m nltk.downloader punkt maxent_treebank_pos_tagger averaged_perceptron_tagger
+ coala --non-interactive
+
[testenv:docs]
deps = sphinx
commands = sphinx-build -b html -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
[testenv:pep8]
deps = flake8
commands = flake8 scripts/
-
-[testenv:yamllint]
-deps = yamllint
-commands = yamllint -c yamllint.conf jjb/
-
extends: default
rules:
+ empty-lines:
+ max-end: 1
line-length:
max: 120