[submodule "jjb/global-jjb"]
- path = jjb/global-jjb
+ path = global-jjb
url = https://github.com/lfit/releng-global-jjb
[submodule "packer/common-packer"]
path = packer/common-packer
--- /dev/null
+Subproject commit 70c97bbbfbd071c6e75f7a3d47a453ee863c69ec
nexus-snapshot-repo: opendaylight.snapshot
git-url: 'ssh://jenkins-$SILO@git.opendaylight.org:29418'
lftools-version: <1.0.0
- packer-version: 1.2.4
# defaults for parameters installing openstack for csit jobs
devstack-hash: ''
sm-features: ''
# CSIT images configuration
- openstack_system_image_queens: ZZCI - CentOS 7 - devstack-pike - 20171208-1649
+ openstack_system_image_queens: ZZCI - CentOS 7 - devstack-queens - 20180911-204058.666
openstack_system_image_pike: ZZCI - CentOS 7 - devstack-pike - 20171208-1649
stack-template: csit-2-instance-type.yaml
docker_system_count: 1
+++ /dev/null
-Subproject commit 18bb0a40f9ba00fd8417b32fbde9593b6d968c53
--- /dev/null
+../../global-jjb/jenkins-admin
\ No newline at end of file
--- /dev/null
+../../global-jjb/jenkins-init-scripts
\ No newline at end of file
--- /dev/null
+../../global-jjb/jjb
\ No newline at end of file
--- /dev/null
+../../global-jjb/shell
\ No newline at end of file
netconf-csit-3node-cluster-stress-all-fluorine,
netconf-csit-3node-clustering-all-fluorine,
netconf-csit-3node-clustering-scale-all-fluorine,
-netvirt-csit-1node-0cmb-1ctl-1cmp-openstack-queens-sfc-fluorine,
-netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-fluorine,
-netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-itm-direct-tunnels-fluorine,
-netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-snat-conntrack-fluorine,
+netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-sfc-fluorine,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upgrade-fluorine,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upgrade-snat-conntrack-fluorine,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-fluorine,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-snat-conntrack-fluorine,
netvirt-csit-1node-1cmb-0ctl-0cmp-openstack-queens-upstream-stateful-fluorine,
netvirt-csit-1node-1cmb-0ctl-0cmp-openstack-queens-upstream-stateful-snat-conntrack-fluorine,
-netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-fluorine,
-netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-itm-direct-tunnels-fluorine,
-netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-snat-conntrack-fluorine,
netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-fluorine,
netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-itm-direct-tunnels-fluorine,
netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-snat-conntrack-fluorine,
-netvirt-csit-hwvtep-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-fluorine,
netvirt-csit-hwvtep-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-fluorine,
-netvirt-csit-hwvtep-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-fluorine,
netvirt-csit-hwvtep-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-fluorine,
openflowplugin-csit-1node-bundle-based-reconciliation-all-fluorine,
openflowplugin-csit-1node-cbench-only-fluorine,
netconf-csit-3node-cluster-stress-all-neon,
netconf-csit-3node-clustering-all-neon,
netconf-csit-3node-clustering-scale-all-neon,
-netvirt-csit-1node-0cmb-1ctl-1cmp-openstack-queens-sfc-neon,
-netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-itm-direct-tunnels-neon,
-netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-neon,
-netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-snat-conntrack-neon,
+netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-sfc-neon,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upgrade-neon,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upgrade-snat-conntrack-neon,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-itm-direct-tunnels-neon,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-snat-conntrack-neon,
netvirt-csit-1node-1cmb-0ctl-0cmp-openstack-queens-upstream-stateful-neon,
netvirt-csit-1node-1cmb-0ctl-0cmp-openstack-queens-upstream-stateful-snat-conntrack-neon,
-netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-itm-direct-tunnels-neon,
-netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-neon,
-netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-snat-conntrack-neon,
netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-itm-direct-tunnels-neon,
netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-neon,
netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-snat-conntrack-neon,
-netvirt-csit-hwvtep-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-neon,
netvirt-csit-hwvtep-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-neon,
-netvirt-csit-hwvtep-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-neon,
netvirt-csit-hwvtep-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-neon,
openflowplugin-csit-1node-bundle-based-reconciliation-all-neon,
openflowplugin-csit-1node-cbench-only-neon,
netconf-csit-3node-clustering-all-oxygen,
netconf-csit-3node-clustering-scale-all-oxygen,
netvirt-csit-1node-0cmb-1ctl-1cmp-openstack-queens-sfc-oxygen,
-netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-itm-direct-tunnels-oxygen,
-netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-oxygen,
-netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-snat-conntrack-oxygen,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-itm-direct-tunnels-oxygen,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-oxygen,
netvirt-csit-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-snat-conntrack-oxygen,
netvirt-csit-1node-1cmb-0ctl-0cmp-openstack-queens-upstream-stateful-oxygen,
netvirt-csit-1node-1cmb-0ctl-0cmp-openstack-queens-upstream-stateful-snat-conntrack-oxygen,
-netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-itm-direct-tunnels-oxygen,
-netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-oxygen,
-netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-snat-conntrack-oxygen,
netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-itm-direct-tunnels-oxygen,
netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-oxygen,
netvirt-csit-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-snat-conntrack-oxygen,
-netvirt-csit-hwvtep-1node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-oxygen,
netvirt-csit-hwvtep-1node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-oxygen,
-netvirt-csit-hwvtep-3node-0cmb-1ctl-2cmp-openstack-pike-upstream-stateful-oxygen,
netvirt-csit-hwvtep-3node-0cmb-1ctl-2cmp-openstack-queens-upstream-stateful-oxygen,
openflowplugin-csit-1node-bundle-based-reconciliation-all-oxygen,
openflowplugin-csit-1node-cbench-only-oxygen,
setup_live_migration_compute ${!CONTROLIP} ${!CONTROLIP}
fi
[ -n "${OVS_INSTALL}" ] && install_ovs ${!CONTROLIP} /tmp/ovs_rpms
+ if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then
+ # This should be really done by networking-odl devstack plugin,
+ # but in the meantime do it ourselves
+ ssh ${!CONTROLIP} "sudo ovs-vsctl set Open_vSwitch . external_ids:of-tunnel=true"
+ fi
echo "Stack the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${CONTROLIP}"
ssh ${!CONTROLIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
ssh ${!CONTROLIP} "ps -ef | grep stack.sh"
install_rdo_release ${!COMPUTEIP}
setup_live_migration_compute ${!COMPUTEIP} ${!CONTROLIP}
[ -n "${OVS_INSTALL}" ] && install_ovs ${!COMPUTEIP} /tmp/ovs_rpms
+ if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then
+ # This should be really done by networking-odl devstack plugin,
+ # but in the meantime do it ourselves
+ ssh ${!COMPUTEIP} "sudo ovs-vsctl set Open_vSwitch . external_ids:of-tunnel=true"
+ fi
echo "Stack the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${!COMPUTEIP}"
ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
jobs:
- '{project-name}-distribution-check-{stream}'
- '{project-name}-maven-javadoc-jobs'
+ - '{project-name}-rtd-jobs':
+ build-node: centos7-builder-2c-2g
+ project-pattern: lispflowmapping
+ rtd-build-url: https://readthedocs.org/api/v2/webhook/odl-lispflowmapping/47783/
+ rtd-token: 181be9dd804e4969b9f318a6f1988e3cbee9d9a8
- odl-maven-jobs
stream: neon
jobs:
- '{project-name}-distribution-check-{stream}'
- '{project-name}-maven-javadoc-jobs'
+ - '{project-name}-rtd-jobs':
+ build-node: centos7-builder-2c-2g
+ project-pattern: lispflowmapping
+ rtd-build-url: https://readthedocs.org/api/v2/webhook/odl-lispflowmapping/47783/
+ rtd-token: 181be9dd804e4969b9f318a6f1988e3cbee9d9a8
- odl-maven-jobs
stream: fluorine
mvn-settings: 'mdsal-settings'
mvn-opts: '-Xmx2048m'
+ sign-artifacts: true
dependencies: 'odlparent-merge-{stream},yangtools-merge-{stream}'
email-upstream: '[mdsal] [odlparent] [yangtools]'
mvn-settings: 'mdsal-settings'
mvn-goals: '-Dmaven.compile.fork=true clean deploy -Dintegrationtests'
mvn-opts: '-Xmx2048m'
+ sign-artifacts: true
build-node: centos7-builder-8c-8g
build-timeout: 90
dependencies: 'odlparent-merge-{stream},yangtools-merge-{stream}'
- project:
name: mdsal-sonar
jobs:
- - gerrit-maven-sonar
+ - gerrit-maven-sonar:
+ mvn-params: >
+ -Dodl.jacoco.aggregateFile=$WORKSPACE/target/jacoco.exec
+ -Dsonar.jacoco.reportPath=$WORKSPACE/target/jacoco.exec
+
project: 'mdsal'
project-name: 'mdsal'
branch: 'master'
openstack_system2_flavor: odl-highcpu-8
os-cmb-cnt: 0
os-ctl-cnt: 1
- os-cmp-cnt: 1
+ os-cmp-cnt: 2
topology:
- 1node:
- openstack_system2_count: 1
+ openstack_system2_count: 2
odl_system_count: 1
enable-haproxy: 'no'
install-features: 'odl-netvirt-sfc'
enable-openstack-plugins: 'networking-odl,networking-sfc'
testplan: '{project}-extensions-sfc.txt'
+ openstack:
+ - queens:
+ openstack-branch: 'stable/queens'
+ odl-ml2-branch: 'stable/queens'
+ odl-ml2-port-binding: 'pseudo-agentdb-binding'
+ openstack_system_image: '{openstack_system_image_queens}'
+ openstack_system2_image: '{openstack_system_image_queens}'
+
stream:
- neon:
branch: 'master'
- oxygen:
branch: 'stable/oxygen'
ovs-install: 'v2.6.1-nsh'
-
- openstack:
- - queens:
- openstack-branch: 'stable/queens'
- odl-ml2-branch: 'stable/queens'
- odl-ml2-port-binding: 'pseudo-agentdb-binding'
- openstack_system_image: '{openstack_system_image_queens}'
- openstack_system2_image: '{openstack_system_image_queens}'
+ os-cmp-cnt: 1
+ openstack_system2_count: 1
+ openstack_system_image: '{openstack_system_image_pike}'
+ openstack_system2_image: '{openstack_system_image_pike}'
- project:
name: netvirt-csit-1cmb-0ctl-0cmp-openstack-integration
mvn-settings: odlparent-settings
mvn-opts: '-Xmx1024m -XX:MaxPermSize=256m'
mvn-version: mvn35
+ sign-artifacts: true
dependencies: ''
email-upstream: '[odlparent]'
mvn-settings: odlparent-settings
mvn-opts: '-Xmx1024m -XX:MaxPermSize=256m'
+ sign-artifacts: true
dependencies: ''
email-upstream: '[odlparent]'
mvn-settings: odlparent-settings
mvn-opts: '-Xmx1024m -XX:MaxPermSize=256m'
+ sign-artifacts: true
dependencies: ''
email-upstream: '[odlparent]'
mvn-settings: odlparent-settings
mvn-opts: '-Xmx1024m -XX:MaxPermSize=256m'
+ sign-artifacts: true
dependencies: ''
email-upstream: '[odlparent]'
}
EOF
-python openci_publish -H 129.192.69.55 -U ${ACTIVEMQ_USER} -p ${ACTIVEMQ_PASSWORD} -n openci.prototype -B ./json_body.txt
-
echo "Constructed $PUBLISH_EVENT_TYPE"
echo "--------------------------------------------"
cat ./json_body.txt
echo "--------------------------------------------"
+python openci_publish -H 129.192.69.55 -U ${ACTIVEMQ_USER} -p ${ACTIVEMQ_PASSWORD} -n openci.prototype -B ./json_body.txt
+
deactivate
default: "'autorelease': 'SUCCESS'"
description: 'The confidence level the published artifact gained'
+ wrappers:
+ - credentials-binding:
+ - username-password-separated:
+ credential-id: openci-connect-activemq
+ username: ACTIVEMQ_USER
+ password: ACTIVEMQ_PASSWORD
+ - workspace-cleanup
+
builders:
# this build step in the job executes create-ape.sh to construct the
# ArtifactPublishedEvent (ape) and publishes it using the python utility
build-timeout: '{build-timeout}'
builders:
+ - shell: !include-raw: setup-ansible.sh
- shell: !include-raw: test-ansible-rpm.sh
- shell: !include-raw: test-ansible-odl-user.sh
- shell: !include-raw: test-odl-logs.sh
--- /dev/null
+#!/bin/bash
+
+# Options:
+# -x: Echo commands
+# -e: Fail on errors
+# -o pipefail: Fail on errors in scripts this calls, give stacktrace
+set -ex -o pipefail
+
+# Install required packages
+virtualenv rpm_build
+source rpm_build/bin/activate
+PYTHON="rpm_build/bin/python"
+$PYTHON -m pip install --upgrade pip
+
+# Install Ansible
+sudo yum install -y ansible
+
+# Install local version of ansible-opendaylight to path expected by Ansible.
+# Could almost do this by setting ANSIBLE_ROLES_PATH=$WORKSPACE, but Ansible
+# expects the dir containing the role to have the name of role. The JJB project
+# is called "ansible", which causes the cloned repo name to not match the role
+# name "opendaylight". So we need a cp/mv either way and this is simplest.
+sudo cp -R $WORKSPACE/ansible /etc/ansible/roles/opendaylight
#!/bin/bash
-# Options:
-# -x: Echo commands
-# -e: Fail on errors
-# -o pipefail: Fail on errors in scripts this calls, give stacktrace
-set -ex -o pipefail
-
-# Install required packages
-virtualenv rpm_build
-source rpm_build/bin/activate
-rpm_build/bin/python -m pip install --upgrade pip
-
-# Install Ansible
-sudo yum install -y ansible
-
-# Install local version of ansible-opendaylight to path expected by Ansible.
-# Could almost do this by setting ANSIBLE_ROLES_PATH=$WORKSPACE, but Ansible
-# expects the dir containing the role to have the name of role. The JJB project
-# is called "ansible", which causes the cloned repo name to not match the role
-# name "opendaylight". So we need a cp/mv either way and this is simplest.
-sudo cp -R $WORKSPACE/ansible /etc/ansible/roles/opendaylight
-
-# Install OpenDaylight via repo using example Ansible playbook
-sudo ansible-playbook -i "localhost," -c local $WORKSPACE/ansible/examples/rpm_8_devel.yml
-
# Create Ansible custom module directories
sudo mkdir -p /usr/share/ansible/plugins/modules
# Copy the custom module to the directory above
sudo cp $WORKSPACE/ansible/library/odl_usermod.py /usr/share/ansible/plugins/modules/
-# Execute the tests playnook
-sudo ansible-playbook -i "localhost," -c local $WORKSPACE/ansible/tests/test-odl-users.yaml -vvv
+# Execute the odl-user-test playbook
+sudo ansible-playbook -i "localhost," -c local $WORKSPACE/ansible/tests/test-odl-users.yaml -v
#!/bin/bash
-# Options:
-# -x: Echo commands
-# -e: Fail on errors
-# -o pipefail: Fail on errors in scripts this calls, give stacktrace
-set -ex -o pipefail
-
-# Install required packages
-virtualenv rpm_build
-source rpm_build/bin/activate
-PYTHON="rpm_build/bin/python"
-$PYTHON -m pip install --upgrade pip
-
-# Install Ansible
-sudo yum install -y ansible
-
-# Install local version of ansible-opendaylight to path expected by Ansible.
-# Could almost do this by setting ANSIBLE_ROLES_PATH=$WORKSPACE, but Ansible
-# expects the dir containing the role to have the name of role. The JJB project
-# is called "ansible", which causes the cloned repo name to not match the role
-# name "opendaylight". So we need a cp/mv either way and this is simplest.
-sudo cp -R $WORKSPACE/ansible /etc/ansible/roles/opendaylight
-
# Install OpenDaylight via repo using example Ansible playbook
-sudo ansible-playbook -i "localhost," -c local $WORKSPACE/ansible/examples/rpm_8_devel_odl_api.yml
-
-# Add more tests
+sudo ansible-playbook -i "localhost," -c local $WORKSPACE/ansible/examples/rpm_8_devel_odl_api.yml --extra-vars "@$WORKSPACE/ansible/examples/log_vars.json"
#!/bin/bash
-# Options:
-# -x: Echo commands
-# -e: Fail on errors
-# -o pipefail: Fail on errors in scripts this calls, give stacktrace
-set -ex -o pipefail
-
-# Install required packages
-virtualenv rpm_build
-source rpm_build/bin/activate
-rpm_build/bin/python -m pip install --upgrade pip
-
-# Install Ansible
-sudo yum install -y ansible
-
-# Install local version of ansible-opendaylight to path expected by Ansible.
-# Could almost do this by setting ANSIBLE_ROLES_PATH=$WORKSPACE, but Ansible
-# expects the dir containing the role to have the name of role. The JJB project
-# is called "ansible", which causes the cloned repo name to not match the role
-# name "opendaylight". So we need a cp/mv either way and this is simplest.
-sudo cp -R $WORKSPACE/ansible /etc/ansible/roles/opendaylight
-
-# Install OpenDaylight via repo using example Ansible playbook
-sudo ansible-playbook -i "localhost," -c local $WORKSPACE/ansible/examples/rpm_8_devel.yml
-
# Execute the test ODL logs playbook
-sudo ansible-playbook -i "localhost," -c local $WORKSPACE/ansible/tests/test-odl-logs.yaml -vvv
+sudo ansible-playbook -i "localhost," -c local $WORKSPACE/ansible/tests/test-odl-logs.yaml -v
builders:
- lf-infra-pre-build
+ - shell: |
+ #!/bin/bash -l
+ pip install --user --upgrade lftools[openstack]~=0.17.1
# Servers
- odl-openstack-cleanup-stale-stacks
- odl-openstack-cleanup-stale-nodes
- robot-list
recurse: false
+- view:
+ name: '00-Empty View'
+ description: >
+ Empty job view. This is used as the default landing view to keep the
+ Jenkins UI responding better while a) under high load and b) when there
+ are a lot of jobs defined in the system'
+ view-type: list
+ filter-executors: false
+ filter-queue: false
+ recurse: false
+ regex: ''
+
- view:
name: 01-Recent
regex: '.*'
mvn-settings: 'yangtools-settings'
mvn-opts: '-Xmx1024m'
+ sign-artifacts: true
dependencies: ''
email-upstream: '[yangtools]'
mvn-settings: 'yangtools-settings'
mvn-opts: '-Xmx1024m'
+ sign-artifacts: true
dependencies: ''
email-upstream: '[yangtools]'
mvn-settings: 'yangtools-settings'
mvn-opts: '-Xmx1024m'
+ sign-artifacts: true
dependencies: ''
email-upstream: '[yangtools]'