--- /dev/null
+# OpenDaylight CI Packer
+
+[Packer][1] is a tool for automatically creating VM and container images,
+configuring them and post-processing them into standard output formats.
+
+We build OpenDaylight's CI images via Packer.
+
+## Building
+
+You'll need to [install Packer][2], of course.
+
+OpenDaylight's Packer configuration is divided into build-specific variables,
+output-specific templates and a set of shared provisioning scripts. To do a
+specific build, combine the template for the desired output artifact type with
+a variable file. To build a new java-builder instance the following would be done:
+
+```
+packer build -var-file=vars/cloud-env.json -var-file/centos.json templates/java-builder.json
+```
+
+**NOTE:** vars/cloud-env.json is a gitignored file as it contains private
+information. There is a vars/cloud-env.json.example file that may be used as a
+base for creating the one needed.
+
+This would build bootable image in two different OpenStack clouds. In specific,
+Rackspace's Public cloud and a private OpenStack cloud.
+
+From a high level, the builds:
+
+* Boot a specified base image in both clouds.
+* Run a set of shell scripts, listed in the template's shell provisioner
+ section, to do any configuration required by the builder.
+* Execute a shutdown of the running instance in the clouds.
+* Execute a 'nova image-create' operation against the shutdown instance.
+* Perform a 'nova delete' operation against the shutdown instance.
+
+[1]: https://www.packer.io/
+[2]: https://www.packer.io/intro/getting-started/setup.html
echo "---> Updating operating system"
yum clean all -q
+ yum install -y -q deltarpm
yum update -y -q
# add in components we need or want on systems
echo "---> Attempting to detect OS"
# upstream cloud images use the distro name as the initial user
-ORIGIN=$(logname)
+ORIGIN=$(if [ -e /etc/redhat-release ]
+ then
+ echo redhat
+ else
+ echo ubuntu
+ fi)
+#ORIGIN=$(logname)
case "${ORIGIN}" in
- fedora|centos)
+ fedora|centos|redhat)
echo "---> RH type system detected"
rh_systems
;;
# do the package install via puppet so that we know it actually installs
# properly and it also makes it quieter but with better error reporting
echo "---> Installing Group Based Policy requirements"
-puppet apply /vagrant/gbp_packages.pp
+puppet apply /tmp/packer/gbp_packages.pp
# configure docker networking so that it does not conflict with LF internal networks
# configure docker daemon to listen on port 5555 enabling remote managment
# docker
echo "---> Installing docker"
-puppet apply /vagrant/docker_setup.pp
+puppet apply /tmp/packer/docker_setup.pp
echo "---> stopping docker"
puppet apply -e "service { 'docker': ensure => stopped }"
# OVS
echo "---> Installing ovs"
puppet module install puppetlabs-vcsrepo
-puppet apply /vagrant/ovs_setup.pp
+puppet apply /tmp/packer/ovs_setup.pp
pushd /root/ovs
DEB_BUILD_OPTIONS='parallel=8 nocheck' fakeroot debian/rules binary | \
--- /dev/null
+#!/bin/bash
+# vi: ts=4 sw=4 sts=4 et :
+
+# Nothing to do for Ubuntu specific provisioning
--- /dev/null
+#!/bin/bash
+# vi: ts=4 sw=4 sts=4 et :
+
+/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;
--- /dev/null
+#!/bin/bash
+
+# vim: sw=2 ts=2 sts=2 et :
+
+rm -rf /etc/Pegasus/*.cnf /etc/Pegasus/*.crt /etc/Pegasus/*.csr \
+ /etc/Pegasus/*.pem /etc/Pegasus/*.srl /root/anaconda-ks.cfg \
+ /root/anaconda-post.log /root/initial-setup-ks.cfg /root/install.log \
+ /root/install.log.syslog /var/cache/fontconfig/* /var/cache/gdm/* \
+ /var/cache/man/* /var/lib/AccountService/users/* /var/lib/fprint/* \
+ /var/lib/logrotate.status /var/log/*.log* /var/log/BackupPC/LOG \
+ /var/log/ConsoleKit/* /var/log/anaconda.syslog /var/log/anaconda/* \
+ /var/log/apache2/*_log /var/log/apache2/*_log-* /var/log/apt/* \
+ /var/log/aptitude* /var/log/audit/* /var/log/btmp* /var/log/ceph/*.log \
+ /var/log/chrony/*.log /var/log/cron* /var/log/cups/*_log /var/log/debug* \
+ /var/log/dmesg* /var/log/exim4/* /var/log/faillog* /var/log/gdm/* \
+ /var/log/glusterfs/*glusterd.vol.log /var/log/glusterfs/glusterfs.log \
+ /var/log/httpd/*log /var/log/installer/* /var/log/jetty/jetty-console.log \
+ /var/log/journal/* /var/log/lastlog* /var/log/libvirt/libvirtd.log \
+ /var/log/libvirt/lxc/*.log /var/log/libvirt/qemu/*.log \
+ /var/log/libvirt/uml/*.log /var/log/lightdm/* /var/log/mail/* \
+ /var/log/maillog* /var/log/messages* /var/log/ntp /var/log/ntpstats/* \
+ /var/log/ppp/connect-errors /var/log/rhsm/* /var/log/sa/* /var/log/secure* \
+ /var/log/setroubleshoot/*.log /var/log/spooler* /var/log/squid/*.log \
+ /var/log/syslog* /var/log/tallylog* /var/log/tuned/tuned.log /var/log/wtmp* \
+ /var/named/data/named.run
+
+rm -rf ~/.viminfo /etc/ssh/ssh*key* /root/.ssh/*
+
+# kill any cloud-init related bits
+rm -rf /var/lib/cloud/*
+
+# clean-up any manual packer uploads
+rm -rf /tmp/packer
+
+# Force a system sync and sleep to get around any SSD issues
+echo "Forcing sync and sleep for 10sec"
+sync
+sleep 10
--- /dev/null
+{
+ "variables": {
+ "public_base_image": null,
+ "public_network": null,
+ "public_cloud_user": null,
+ "public_auth_url": null,
+ "public_tenant": null,
+ "public_user": null,
+ "public_pass": null,
+
+ "private_base_image": null,
+ "private_network": null,
+ "private_cloud_user": null,
+ "private_auth_url": null,
+ "private_tenant": null,
+ "private_user": null,
+ "private_pass": null,
+
+ "distro": null,
+ "cloud_user_data": null
+ },
+ "builders": [
+ {
+ "type": "openstack",
+ "name": "public_cloud",
+ "identity_endpoint": "{{user `public_auth_url`}}",
+ "tenant_name": "{{user `public_tenant`}}",
+ "username": "{{user `public_user`}}",
+ "password": "{{user `public_pass`}}",
+ "region": "DFW",
+ "ssh_username": "{{user `public_cloud_user`}}",
+ "image_name": "{{user `distro`}} - baseline - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `public_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `public_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ },
+ {
+ "type": "openstack",
+ "name": "private_cloud",
+ "identity_endpoint": "{{user `private_auth_url`}}",
+ "tenant_name": "{{user `private_tenant`}}",
+ "username": "{{user `private_user`}}",
+ "password": "{{user `private_pass`}}",
+ "ssh_username": "{{user `private_cloud_user`}}",
+ "image_name": "{{user `distro`}} - baseline - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `private_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `private_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "scripts": [
+ "provision/baseline.sh",
+ "provision/system_reseal.sh"
+ ],
+ "execute_command": "chmod +x {{ .Path }}; if [ \"$UID\" == \"0\" ]; then {{ .Vars }} '{{ .Path }}'; else {{ .Vars }} sudo -E '{{ .Path }}'; fi"
+ }
+ ]
+}
--- /dev/null
+{
+ "variables": {
+ "public_base_image": null,
+ "public_network": null,
+ "public_cloud_user": null,
+ "public_auth_url": null,
+ "public_tenant": null,
+ "public_user": null,
+ "public_pass": null,
+
+ "private_base_image": null,
+ "private_network": null,
+ "private_cloud_user": null,
+ "private_auth_url": null,
+ "private_tenant": null,
+ "private_user": null,
+ "private_pass": null,
+
+ "distro": null,
+ "cloud_user_data": null
+ },
+ "builders": [
+ {
+ "type": "openstack",
+ "name": "public_cloud",
+ "identity_endpoint": "{{user `public_auth_url`}}",
+ "tenant_name": "{{user `public_tenant`}}",
+ "username": "{{user `public_user`}}",
+ "password": "{{user `public_pass`}}",
+ "region": "DFW",
+ "ssh_username": "{{user `public_cloud_user`}}",
+ "image_name": "{{user `distro`}} - devstack - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `public_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `public_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ },
+ {
+ "type": "openstack",
+ "name": "private_cloud",
+ "identity_endpoint": "{{user `private_auth_url`}}",
+ "tenant_name": "{{user `private_tenant`}}",
+ "username": "{{user `private_user`}}",
+ "password": "{{user `private_pass`}}",
+ "ssh_username": "{{user `private_cloud_user`}}",
+ "image_name": "{{user `distro`}} - devstack - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `private_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `private_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "scripts": [
+ "provision/baseline.sh",
+ "provision/devstack.sh",
+ "provision/system_reseal.sh"
+ ],
+ "execute_command": "chmod +x {{ .Path }}; if [ \"$UID\" == \"0\" ]; then {{ .Vars }} '{{ .Path }}'; else {{ .Vars }} sudo -E '{{ .Path }}'; fi"
+ }
+ ]
+}
--- /dev/null
+{
+ "variables": {
+ "public_base_image": null,
+ "public_network": null,
+ "public_cloud_user": null,
+ "public_auth_url": null,
+ "public_tenant": null,
+ "public_user": null,
+ "public_pass": null,
+
+ "private_base_image": null,
+ "private_network": null,
+ "private_cloud_user": null,
+ "private_auth_url": null,
+ "private_tenant": null,
+ "private_user": null,
+ "private_pass": null,
+
+ "distro": null,
+ "cloud_user_data": null
+ },
+ "builders": [
+ {
+ "type": "openstack",
+ "name": "public_cloud",
+ "identity_endpoint": "{{user `public_auth_url`}}",
+ "tenant_name": "{{user `public_tenant`}}",
+ "username": "{{user `public_user`}}",
+ "password": "{{user `public_pass`}}",
+ "region": "DFW",
+ "ssh_username": "{{user `public_cloud_user`}}",
+ "image_name": "{{user `distro`}} - docker - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `public_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `public_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ },
+ {
+ "type": "openstack",
+ "name": "private_cloud",
+ "identity_endpoint": "{{user `private_auth_url`}}",
+ "tenant_name": "{{user `private_tenant`}}",
+ "username": "{{user `private_user`}}",
+ "password": "{{user `private_pass`}}",
+ "ssh_username": "{{user `private_cloud_user`}}",
+ "image_name": "{{user `distro`}} - docker - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `private_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `private_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "scripts": [
+ "provision/baseline.sh",
+ "provision/docker.sh",
+ "provision/system_reseal.sh"
+ ],
+ "execute_command": "chmod +x {{ .Path }}; if [ \"$UID\" == \"0\" ]; then {{ .Vars }} '{{ .Path }}'; else {{ .Vars }} sudo -E '{{ .Path }}'; fi"
+ }
+ ]
+}
--- /dev/null
+{
+ "variables": {
+ "public_base_image": null,
+ "public_network": null,
+ "public_cloud_user": null,
+ "public_auth_url": null,
+ "public_tenant": null,
+ "public_user": null,
+ "public_pass": null,
+
+ "private_base_image": null,
+ "private_network": null,
+ "private_cloud_user": null,
+ "private_auth_url": null,
+ "private_tenant": null,
+ "private_user": null,
+ "private_pass": null,
+
+ "distro": null,
+ "cloud_user_data": null
+ },
+ "builders": [
+ {
+ "type": "openstack",
+ "name": "public_cloud",
+ "identity_endpoint": "{{user `public_auth_url`}}",
+ "tenant_name": "{{user `public_tenant`}}",
+ "username": "{{user `public_user`}}",
+ "password": "{{user `public_pass`}}",
+ "region": "DFW",
+ "ssh_username": "{{user `public_cloud_user`}}",
+ "image_name": "{{user `distro`}} - gbp - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `public_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `public_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ },
+ {
+ "type": "openstack",
+ "name": "private_cloud",
+ "identity_endpoint": "{{user `private_auth_url`}}",
+ "tenant_name": "{{user `private_tenant`}}",
+ "username": "{{user `private_user`}}",
+ "password": "{{user `private_pass`}}",
+ "ssh_username": "{{user `private_cloud_user`}}",
+ "image_name": "{{user `distro`}} - gbp - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `private_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `private_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "inline": ["mkdir -p /tmp/packer"]
+ },
+ {
+ "type": "file",
+ "source": "provision/gbp_puppet/",
+ "destination": "/tmp/packer"
+ },
+ {
+ "type": "shell",
+ "scripts": [
+ "provision/baseline.sh",
+ "provision/gbp.sh",
+ "provision/system_reseal.sh"
+ ],
+ "execute_command": "chmod +x {{ .Path }}; if [ \"$UID\" == \"0\" ]; then {{ .Vars }} '{{ .Path }}'; else {{ .Vars }} sudo -E '{{ .Path }}'; fi"
+ }
+ ]
+}
--- /dev/null
+{
+ "variables": {
+ "public_base_image": null,
+ "public_network": null,
+ "public_cloud_user": null,
+ "public_auth_url": null,
+ "public_tenant": null,
+ "public_user": null,
+ "public_pass": null,
+
+ "private_base_image": null,
+ "private_network": null,
+ "private_cloud_user": null,
+ "private_auth_url": null,
+ "private_tenant": null,
+ "private_user": null,
+ "private_pass": null,
+
+ "distro": null,
+ "cloud_user_data": null
+ },
+ "builders": [
+ {
+ "type": "openstack",
+ "name": "public_cloud",
+ "identity_endpoint": "{{user `public_auth_url`}}",
+ "tenant_name": "{{user `public_tenant`}}",
+ "username": "{{user `public_user`}}",
+ "password": "{{user `public_pass`}}",
+ "region": "DFW",
+ "ssh_username": "{{user `public_cloud_user`}}",
+ "image_name": "{{user `distro`}} - java-builder - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `public_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `public_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ },
+ {
+ "type": "openstack",
+ "name": "private_cloud",
+ "identity_endpoint": "{{user `private_auth_url`}}",
+ "tenant_name": "{{user `private_tenant`}}",
+ "username": "{{user `private_user`}}",
+ "password": "{{user `private_pass`}}",
+ "ssh_username": "{{user `private_cloud_user`}}",
+ "image_name": "{{user `distro`}} - java-builder - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `private_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `private_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "scripts": [
+ "provision/baseline.sh",
+ "provision/java-builder.sh",
+ "provision/system_reseal.sh"
+ ],
+ "execute_command": "chmod +x {{ .Path }}; if [ \"$UID\" == \"0\" ]; then {{ .Vars }} '{{ .Path }}'; else {{ .Vars }} sudo -E '{{ .Path }}'; fi"
+ }
+ ]
+}
--- /dev/null
+{
+ "variables": {
+ "public_base_image": null,
+ "public_network": null,
+ "public_cloud_user": null,
+ "public_auth_url": null,
+ "public_tenant": null,
+ "public_user": null,
+ "public_pass": null,
+
+ "private_base_image": null,
+ "private_network": null,
+ "private_cloud_user": null,
+ "private_auth_url": null,
+ "private_tenant": null,
+ "private_user": null,
+ "private_pass": null,
+
+ "distro": null,
+ "cloud_user_data": null
+ },
+ "builders": [
+ {
+ "type": "openstack",
+ "name": "public_cloud",
+ "identity_endpoint": "{{user `public_auth_url`}}",
+ "tenant_name": "{{user `public_tenant`}}",
+ "username": "{{user `public_user`}}",
+ "password": "{{user `public_pass`}}",
+ "region": "DFW",
+ "ssh_username": "{{user `public_cloud_user`}}",
+ "image_name": "{{user `distro`}} - mininet-ovs-2.3 - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `public_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `public_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ },
+ {
+ "type": "openstack",
+ "name": "private_cloud",
+ "identity_endpoint": "{{user `private_auth_url`}}",
+ "tenant_name": "{{user `private_tenant`}}",
+ "username": "{{user `private_user`}}",
+ "password": "{{user `private_pass`}}",
+ "ssh_username": "{{user `private_cloud_user`}}",
+ "image_name": "{{user `distro`}} - mininet-ovs-2.3 - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `private_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `private_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "scripts": [
+ "provision/baseline.sh",
+ "provision/mininet-ovs-2.3.sh",
+ "provision/system_reseal.sh"
+ ],
+ "execute_command": "chmod +x {{ .Path }}; if [ \"$UID\" == \"0\" ]; then {{ .Vars }} '{{ .Path }}'; else {{ .Vars }} sudo -E '{{ .Path }}'; fi"
+ }
+ ]
+}
--- /dev/null
+{
+ "variables": {
+ "public_base_image": null,
+ "public_network": null,
+ "public_cloud_user": null,
+ "public_auth_url": null,
+ "public_tenant": null,
+ "public_user": null,
+ "public_pass": null,
+
+ "private_base_image": null,
+ "private_network": null,
+ "private_cloud_user": null,
+ "private_auth_url": null,
+ "private_tenant": null,
+ "private_user": null,
+ "private_pass": null,
+
+ "distro": null,
+ "cloud_user_data": null
+ },
+ "builders": [
+ {
+ "type": "openstack",
+ "name": "public_cloud",
+ "identity_endpoint": "{{user `public_auth_url`}}",
+ "tenant_name": "{{user `public_tenant`}}",
+ "username": "{{user `public_user`}}",
+ "password": "{{user `public_pass`}}",
+ "region": "DFW",
+ "ssh_username": "{{user `public_cloud_user`}}",
+ "image_name": "{{user `distro`}} - mininet - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `public_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `public_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ },
+ {
+ "type": "openstack",
+ "name": "private_cloud",
+ "identity_endpoint": "{{user `private_auth_url`}}",
+ "tenant_name": "{{user `private_tenant`}}",
+ "username": "{{user `private_user`}}",
+ "password": "{{user `private_pass`}}",
+ "ssh_username": "{{user `private_cloud_user`}}",
+ "image_name": "{{user `distro`}} - mininet - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `private_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `private_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "scripts": [
+ "provision/baseline.sh",
+ "provision/mininet.sh",
+ "provision/system_reseal.sh"
+ ],
+ "execute_command": "chmod +x {{ .Path }}; if [ \"$UID\" == \"0\" ]; then {{ .Vars }} '{{ .Path }}'; else {{ .Vars }} sudo -E '{{ .Path }}'; fi"
+ }
+ ]
+}
--- /dev/null
+{
+ "variables": {
+ "public_base_image": null,
+ "public_network": null,
+ "public_cloud_user": null,
+ "public_auth_url": null,
+ "public_tenant": null,
+ "public_user": null,
+ "public_pass": null,
+
+ "private_base_image": null,
+ "private_network": null,
+ "private_cloud_user": null,
+ "private_auth_url": null,
+ "private_tenant": null,
+ "private_user": null,
+ "private_pass": null,
+
+ "distro": null,
+ "cloud_user_data": null
+ },
+ "builders": [
+ {
+ "type": "openstack",
+ "name": "public_cloud",
+ "identity_endpoint": "{{user `public_auth_url`}}",
+ "tenant_name": "{{user `public_tenant`}}",
+ "username": "{{user `public_user`}}",
+ "password": "{{user `public_pass`}}",
+ "region": "DFW",
+ "ssh_username": "{{user `public_cloud_user`}}",
+ "image_name": "{{user `distro`}} - robot - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `public_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `public_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ },
+ {
+ "type": "openstack",
+ "name": "private_cloud",
+ "identity_endpoint": "{{user `private_auth_url`}}",
+ "tenant_name": "{{user `private_tenant`}}",
+ "username": "{{user `private_user`}}",
+ "password": "{{user `private_pass`}}",
+ "ssh_username": "{{user `private_cloud_user`}}",
+ "image_name": "{{user `distro`}} - robot - {{isotime \"20060102-1504\"}}",
+ "source_image_name": "{{user `private_base_image`}}",
+ "flavor": "general1-1",
+ "networks": [
+ "{{user `private_network`}}"
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell",
+ "scripts": [
+ "provision/baseline.sh",
+ "provision/robot.sh",
+ "provision/system_reseal.sh"
+ ],
+ "execute_command": "chmod +x {{ .Path }}; if [ \"$UID\" == \"0\" ]; then {{ .Vars }} '{{ .Path }}'; else {{ .Vars }} sudo -E '{{ .Path }}'; fi"
+ }
+ ]
+}
--- /dev/null
+# exclude private cloud-env settings
+cloud-env.json
--- /dev/null
+{
+ "public_base_image": "CentOS 7 (PVHVM)",
+ "public_network": "cac67a72-aefc-48f8-ae55-9affa3540dd0",
+ "public_cloud_user": "root",
+
+ "private_base_image": "CentOS 7 (cloudimg 1510)",
+ "private_network": "6bd0cc77-9896-4fba-a663-88c1a4bbd9a2",
+ "private_cloud_user": "centos",
+
+ "distro": "CentOS 7",
+ "cloud_user_data": "provision/rh-user_data.sh"
+}
--- /dev/null
+{
+ "public_auth_url": "https://identity.api.rackspacecloud.com/v2.0/",
+
+ "public_tenant": "TENNANTID",
+ "public_user": "USERID",
+ "public_pass": "USERPASS",
+
+
+ "private_auth_url": "https://privapi.opendaylight.org:5000/v2.0",
+
+ "private_tenant": "TENNANTID",
+ "private_user": "USERID",
+ "private_pass": "USERPASS"
+}
--- /dev/null
+{
+ "public_base_image": "Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)",
+ "public_network": "cac67a72-aefc-48f8-ae55-9affa3540dd0",
+ "public_cloud_user": "root",
+
+ "private_base_image": "Ubuntu 14.04 LTS Trusty Tahr (cloudimg)",
+ "private_network": "6bd0cc77-9896-4fba-a663-88c1a4bbd9a2",
+ "private_cloud_user": "ubuntu",
+
+ "distro": "Ubuntu 14.04",
+ "cloud_user_data": "provision/null_data.sh"
+}
--- /dev/null
+{
+ "public_base_image": "Ubuntu 16.04 LTS (Xenial Xerus) (PVHVM)",
+ "public_network": "cac67a72-aefc-48f8-ae55-9affa3540dd0",
+ "public_cloud_user": "root",
+
+ "private_base_image": "Ubuntu 16.04 LTS (2016-05-03 cloudimg)",
+ "private_network": "6bd0cc77-9896-4fba-a663-88c1a4bbd9a2",
+ "private_cloud_user": "ubuntu",
+
+ "distro": "Ubuntu 16.04",
+ "cloud_user_data": "provision/null_data.sh"
+}
+++ /dev/null
-The purpose of these various directories is to have Vagrant definitions
-that are then snapshotted for use as slaves in the OpenDaylight and
-ODLForge environments.
-
-If building up in a Rackspace environment using this for the first time
-there is a particular order that should be taken to produce a finalized
-image.
-
-1. Bring a vagrant image up using the rackspace-convert-base definition.
- This will prepare a basic Rackspace image to operate properly when
- being managed by vagrant. It is purposely very limited in what it
- does.
-
-2. After the rackspace-convert-base image is up and you receive the
- notice to snapshot the image perform a ```nova create-image```
- against the running instance. Once the snapshot is complete you may
- destroy the currently running vagrant image (it's easiest if the
- create-image is done with --poll so you know when it's complete)
-
-3. Bring up one of the various other vagrant images passing
- ```RSIMAGE=${a_vagrant_image_id}``` where ```$a_vagrant_image_id```
- is the imageID that was generated after the snapshotting operation in
- step 2. You probably also want to execute using ```RSRESEAL=true` to
- have the brought up image resealed for cloning purposes.
-
-4. If you executed with ```RSRESEAL=true``` now is the time to take the
- snapshot of the current running vagrant. See step 2
-
-5. The final step in preparing an image for use in the Linux Foundation
- managed environments to then take the image produced in step 4 and
- run the ```lf-networking``` vagrant definition using it. See the
- README.md in that vagrant folder for the required extra environment
- variables.
-
-6. Snapshot the new vagrant, see step 2 for details.
-
-At this point a new Rackspace image will be ready for a given network
-configuration. If you, the reader, are looking to utilize any of this
-for your own Rackspace managed environment, or standard Vagrant then
-step 5 & 6 will likely not be needed as they are specific to how the
-Linux Foundation manages the Jenkins environment for OpenDaylight.
+++ /dev/null
-baseline can be used for preparing basic test images. It's suitable for
-use only as a verification that our baseline library script is working
-as expected or for a very vanilla image.
-
-This is controlled by the IMAGE environment variable
-
-ex:
-
-$ export RESEAL=true
-$ IMAGE='CentOS 7' vagrant up --provider=openstack
-
-If $RESEAL is not set then the system will not be cleaned up in
-preparation for snapshotting. This is mostly useful for troubleshooting
-a vagrant definition before you do your final creation and snapshot.
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the rackspace provider dummy box
- config.vm.box = "dummy"
-
- # rackspace systems even with cloud-init
- # don't seem to have the cloud init user ${osname}
- # getting the ssh key for some reason, root does
- # so use that
- config.ssh.username = 'root'
-
- # DEPRECATED
- # ==========
- #
- # NOTE: The Rackspace provider section is deprecated as we are moving into a
- # private OpenStack cloud. It may be revived after we've migrated and have a
- # chance to do work to reconfigure the Rackspace public cloud to work for
- # burst access
- #
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/rackspace/Vagrantfile
- # rs.username
- # rs.api_key
- # rs.rackspace_region
- #
- # If you are not using a SSH token / smartcard also set this
- # rs.key_name
- # config.ssh.private_key_path -- set this outside the rackspace block
- # in your base box
- config.vm.provider :rackspace do |rs|
- # create these base builds always on the smallest system possible
- rs.flavor = 'general1-1'
-
- # allow for switching to ORD cloud but default to DFW
- if (ENV['RSREGION'] == 'ord')
- rs.rackspace_region = :ord
- else
- rs.rackspace_region = :dfw
- end
-
- # Default to the Fedora 20 image unless overridden by a RSIMAGE
- # environment variable
- if ENV['IMAGE']
- rs.image = ENV['IMAGE']
- else
- rs.image = 'Fedora 20 (Heisenbug) (PVHVM)'
- end
- end
- # /DEPRECATED
-
- # Configuration used by ODL Private cloud
- # Should be mostly usable by any OpenStack cloud that can
- # utilize upstream cloud images
- config.vm.provider :openstack do |os, override|
- if ENV['BOX']
- override.vm.box = ENV['BOX']
- else
- override.vm.box = 'dummy'
- end
- config.ssh.username = 'centos'
- os.flavor = 'm1.small'
-
- # require an IMAGE to be passed in
- # IMAGE must be a human name and not an image ID!
- if ENV['IMAGE']
- os.image = ENV['IMAGE']
- else
- os.image = 'BAD IMAGE'
- override.ssh.username = 'baduser'
- end
-
- case ENV['IMAGE']
- when /.*ubuntu.*/i
- override.ssh.username = 'ubuntu'
-
- when /.*fedora.*/i
- override.ssh.username = 'fedora'
-
- # take care of the tty requirement by fedora for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
-
- when /.*centos.*/i
- override.ssh.username = 'centos'
-
- # take care of the tty requirement by centos for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
- end
-
- end
-
- # Do a full system update and enable enforcing if needed
- config.vm.provision 'shell', path: '../lib/baseline.sh'
-
- # Execute a system clean-up in prep for imaging so that this base
- # image can be used for other Rackspace Vagrant configurations
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
-end
+++ /dev/null
-basic-builder can be used to take an already converted Rackspace
-native base image into a usuable basic java system for use in the
-OpenDaylight build and testing environment.
-
-Please see the rackspace-convert-base vagrant setup for creation of the
-needed base image.
-
-This vagrant expects (by default) a personal Rackspace image named
-
-'CentOS 7 - Vagrant ready'
-
-To spin up and utilize.
-
-$ RSIMAGE='${baseimagename}' vagrant up --provider=rackspace
-
-Will execute this vagrant against a differently named base image
-
-$ RSRESEAL=true vagrant up --provider=rackspace
-
-NOTE: resealing will cause the vagrant to run the resealing operation.
-This operation will intentionally destroy current SSH pubkeys installed
-on the system as well as reset log files and network configurations. You
-have been warned.
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the openstack provider dummy box
- config.vm.box = "dummy"
- config.ssh.username = 'root'
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/openstack/Vagrantfile
- #
- # os.openstack_auth_url
- # os.endpoint_type
- # os.flavor
- # os.tenant_name
- # os.username
- # os.password
- # os.networks
- #
- # If you are not using an SSH token / smartcard also set this
- # os.key_name
- # config.ssh.private_key_path -- set this outside the openstack block
- # in your base box
- config.vm.provider :openstack do |os, override|
- if ENV['BOX']
- override.vm.box = ENV['BOX']
- else
- override.vm.box = 'dummy'
- end
- config.ssh.username = 'centos'
- os.flavor = 'm1.small'
-
- # require an IMAGE to be passed in
- # IMAGE must be a human name and not an image ID!
- if ENV['IMAGE']
- os.image = ENV['IMAGE']
- else
- os.image = 'BAD IMAGE'
- override.ssh.username = 'baduser'
- end
-
- case ENV['IMAGE']
- when /.*ubuntu.*/i
- override.ssh.username = 'ubuntu'
-
- when /.*fedora.*/i
- override.ssh.username = 'fedora'
-
- # take care of the tty requirement by fedora for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
-
- when /.*centos.*/i
- override.ssh.username = 'centos'
-
- # take care of the tty requirement by centos for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
- end
- end
-
- # Explicitlly set default shared folder and load lib folder
- config.vm.synced_folder ".", "/vagrant"
- config.vm.synced_folder "../lib/", "/vagrant/lib"
-
- # Do a full system update and enable enforcing if needed
- config.vm.provision 'shell', path: '../lib/baseline.sh'
-
- # run our bootstrapping
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
- #################
- # FINAL CLEANUP #
- #################
-
- # set RESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RESEAL']
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
- end
-end
+++ /dev/null
-basic-java-node can be used to take an already converted Rackspace
-native base image into a usuable basic java system for use in the
-OpenDaylight build and testing environment.
-
-Please see the rackspace-convert-base vagrant setup for creation of the
-needed base image.
-
-This vagrant expects (by default) a personal Rackspace image named
-
-'CentOS 6.5 - Vagrant ready'
-
-To spin up and utilize.
-
-$ RSIMAGE='${baseimagename}' vagrant up --provider=rackspace
-
-Will execute this vagrant against a differently named base image
-
-$ RSRESEAL=true vagrant up --provider=rackspace
-
-NOTE: resealing will cause the vagrant to run the resealing operation.
-This operation will intentionally destroy current SSH pubkeys installed
-on the system as well as reset log files and network configurations. You
-have been warned.
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the rackspace provider dummy box
- config.vm.box = "dummy"
-
- # rackspace systems, even with cloud-init
- # don't seem to have the cloud int user ${osname} (or similar)
- # getting the ssh key for some reason, root does for sure
- # so use that
- config.ssh.username = 'root'
-
- # Fedora and EL systems default to requiring tty for sudo
- # This should have been disabled with the Vagrant ready
- # base box conversion (see rackspace-convert-base vagrant)
- # but just to be safe
- config.ssh.pty = true
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/rackspace/Vagrantfile
- # rs.username
- # rs.api_key
- # rs.rackspace_region
- #
- # If you are not using an SSH token / smartcard also set this
- # rs.key_name
- # config.ssh.private_key_path -- set this outside the rackspace block
- # in your base box
- config.vm.provider :rackspace do |rs|
- # create these base builds always on the smallest system possible
- rs.flavor = 'general1-1'
-
- # allow for switching to ORD cloud but default to DFW
- if (ENV['RSREGION'] == 'ord')
- rs.rackspace_region = :ord
- else
- rs.rackspace_region = :dfw
- end
-
- # Default the CentOS 6.5 - Vagrant ready image unless overriden by a RSIMAGE
- # environment variable
- if ENV['RSIMAGE']
- rs.image = ENV['RSIMAGE']
- else
- rs.image = 'CentOS 6.5 - Vagrant ready'
- end
- end
-
- # Explicitlly set default shared folder and load lib folder
- config.vm.synced_folder ".", "/vagrant"
- config.vm.synced_folder "../lib/", "/vagrant/lib"
-
- # run our bootstrapping for the ovsdb-devstack system
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
-
- #################
- # LF NETWORKING #
- #################
-
- if ENV['LFNETWORK']
- # reconfigure the network setup to support our special private setup
- config.vm.provision 'shell', path: '../lib/lf-networking/configure_lf_infra.sh',
- args: ENV['RSSUBDOMAIN']
- end
-
-
- #################
- # FINAL CLEANUP #
- #################
-
- # set RSRESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RSRESEAL']
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
- end
-end
+++ /dev/null
-#!/bin/bash
-
-# vim: sw=4 ts=4 sts=4 et tw=72 :
-
-yum clean all
-
-# Make sure the system is fully up to date
-yum update -q -y
-
-
+++ /dev/null
-basic-mininet-fedora-node can be used to take an already converted
-Rackspace native base image to a basic system with mininet and other
-test tools for use in the OpenDaylight build and testing environment
-
-Please see the rackspace-convert-base vagrant setup for creation of the
-needed base image.
-
-This vagrant expects (by default) a personal Rackspace image named
-
-'Fedora 21 - Vagrant ready'
-
-To spin up and utilize.
-
-$ RSIMAGE='${baseimagename}' vagrant up --provider=rackspace
-
-Will execute this vagrant against a differently named base image
-
-$ RSRESEAL=true vagrant up --provider=rackspace
-
-NOTE: resealing will cause the vagrant to run the resealing operation.
-This operation will intentionally destroy current SSH pubkeys installed
-on the system as well as reset log files and network configurations. You
-have been warned.
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the rackspace provider dummy box
- config.vm.box = "dummy"
-
- # rackspace systems, even with cloud-init
- # don't seem to have the cloud int user ${osname} (or similar)
- # getting the ssh key for some reason, root does for sure
- # so use that
- config.ssh.username = 'root'
-
- # Fedora and EL systems default to requiring tty for sudo
- # This should have been disabled with the Vagrant ready
- # base box conversion (see rackspace-convert-base vagrant)
- # but just to be safe
- config.ssh.pty = true
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/rackspace/Vagrantfile
- # rs.username
- # rs.api_key
- # rs.rackspace_region
- #
- # If you are not using an SSH token / smartcard also set this
- # rs.key_name
- # config.ssh.private_key_path -- set this outside the rackspace block
- # in your base box
- config.vm.provider :rackspace do |rs|
- # create these base builds always on the smallest system possible
- rs.flavor = 'general1-1'
-
- # allow for switching to ORD cloud but default to DFW
- if (ENV['RSREGION'] == 'ord')
- rs.rackspace_region = :ord
- else
- rs.rackspace_region = :dfw
- end
-
- # Default the Fedora 21 - Vagrant ready image unless overriden by a RSIMAGE
- # environment variable
- if ENV['RSIMAGE']
- rs.image = ENV['RSIMAGE']
- else
- rs.image = 'Fedora 21 - Vagrant ready'
- end
- end
-
- # run our bootstrapping for the ovsdb-devstack system
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
- # set RSRESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RSRESEAL']
- config.vm.provision 'shell', path: 'system_reseal.sh'
- end
-end
+++ /dev/null
-#!/bin/bash
-
-# vim: sw=4 ts=4 sts=4 et tw=72 :
-
-# update os
-yum clean all
-yum update -q -y
-
-# install openvswitch
-yum install -q -y openvswitch
-# make sure it's enabled on system start, no need to start it
-# during setup
-systemctl enable openvswitch
-
-# install mininet
-git clone git://github.com/mininet/mininet
-cd mininet
-git checkout -b 2.2.1 2.2.1
-cd ..
-mininet/util/install.sh -nf
-
+++ /dev/null
-#!/bin/bash
-
-# vim: sw=2 ts=2 sts=2 et :
-
-if [ -f /.autorelabel ]; then
- echo "**********************************************"
- echo "* SYSTEM REQUIRES RELABELING SKIPPING RESEAL *"
- echo "* PLEASE RESTART SYSTEM AND RERUN *"
- echo "* PROVISIONING SCRIPTS *"
- echo "**********************************************"
- exit 1;
-fi
-
-# clean-up from any prior cloud-init networking
-rm -rf /etc/sysconfig/network-scripts/{ifcfg,route}-eth*
-
-rm -rf /etc/Pegasus/*.cnf /etc/Pegasus/*.crt /etc/Pegasus/*.csr /etc/Pegasus/*.pem /etc/Pegasus/*.srl /root/anaconda-ks.cfg /root/anaconda-post.log /root/initial-setup-ks.cfg /root/install.log /root/install.log.syslog /var/cache/fontconfig/* /var/cache/gdm/* /var/cache/man/* /var/lib/AccountService/users/* /var/lib/fprint/* /var/lib/logrotate.status /var/log/*.log* /var/log/BackupPC/LOG /var/log/ConsoleKit/* /var/log/anaconda.syslog /var/log/anaconda/* /var/log/apache2/*_log /var/log/apache2/*_log-* /var/log/apt/* /var/log/aptitude* /var/log/audit/* /var/log/btmp* /var/log/ceph/*.log /var/log/chrony/*.log /var/log/cron* /var/log/cups/*_log /var/log/debug* /var/log/dmesg* /var/log/exim4/* /var/log/faillog* /var/log/gdm/* /var/log/glusterfs/*glusterd.vol.log /var/log/glusterfs/glusterfs.log /var/log/httpd/*log /var/log/installer/* /var/log/jetty/jetty-console.log /var/log/journal/* /var/log/lastlog* /var/log/libvirt/libvirtd.log /var/log/libvirt/lxc/*.log /var/log/libvirt/qemu/*.log /var/log/libvirt/uml/*.log /var/log/lightdm/* /var/log/mail/* /var/log/maillog* /var/log/messages* /var/log/ntp /var/log/ntpstats/* /var/log/ppp/connect-errors /var/log/rhsm/* /var/log/sa/* /var/log/secure* /var/log/setroubleshoot/*.log /var/log/spooler* /var/log/squid/*.log /var/log/syslog* /var/log/tallylog* /var/log/tuned/tuned.log /var/log/wtmp* /var/named/data/named.run
-
-rm -rf ~/.viminfo /etc/ssh/ssh*key*
-
-# kill any cloud-init related bits
-rm -rf /var/lib/cloud/*
-
-if [ -e /usr/bin/facter ]
-then
- if [ `/usr/bin/facter operatingsystem` = 'Ubuntu' ]
- then
- rm -rf /etc/hostname* /etc/hosts /etc/network/interfaces /etc/network/interfaces.*.bak~
- cat <<EOINT >> /etc/network/interfaces
-# Used by ifup(8) and ifdown(8). See the interfaces(5) manpage or
-# /usr/share/doc/ifupdown/examples for more information.
-# The loopback network interface
-auto lo
-iface lo inet loopback
-EOINT
- fi
-fi
-
-echo "********************************************"
-echo "* PLEASE SNAPSHOT IMAGE AT THIS TIME *"
-echo "********************************************"
+++ /dev/null
-basic-mininet-node can be used to take an already converted Rackspace
-native base image to a usuable basic system with mininet, netopeer /
-netconf and installation requirements for VTN already setup for use in
-the OpenDaylight build and testing environment
-
-Please see the rackspace-convert-base vagrant setup for creation of the
-needed base image.
-
-This vagrant expects (by default) a personal Rackspace image named
-
-'CentOS 6.5 - Vagrant ready'
-
-To spin up and utilize.
-
-$ RSIMAGE='${baseimagename}' vagrant up --provider=rackspace
-
-Will execute this vagrant against a differently named base image
-
-$ RSRESEAL=true vagrant up --provider=rackspace
-
-NOTE: resealing will cause the vagrant to run the resealing operation.
-This operation will intentionally destroy current SSH pubkeys installed
-on the system as well as reset log files and network configurations. You
-have been warned.
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the rackspace provider dummy box
- config.vm.box = "dummy"
-
- # rackspace systems, even with cloud-init
- # don't seem to have the cloud int user ${osname} (or similar)
- # getting the ssh key for some reason, root does for sure
- # so use that
- config.ssh.username = 'root'
-
- # Fedora and EL systems default to requiring tty for sudo
- # This should have been disabled with the Vagrant ready
- # base box conversion (see rackspace-convert-base vagrant)
- # but just to be safe
- config.ssh.pty = true
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/rackspace/Vagrantfile
- # rs.username
- # rs.api_key
- # rs.rackspace_region
- #
- # If you are not using an SSH token / smartcard also set this
- # rs.key_name
- # config.ssh.private_key_path -- set this outside the rackspace block
- # in your base box
- config.vm.provider :rackspace do |rs|
- # create these base builds always on the smallest system possible
- rs.flavor = 'general1-1'
-
- # allow for switching to ORD cloud but default to DFW
- if (ENV['RSREGION'] == 'ord')
- rs.rackspace_region = :ord
- else
- rs.rackspace_region = :dfw
- end
-
- # Default the CentOS 6.5 - Vagrant ready image unless overriden by a RSIMAGE
- # environment variable
- if ENV['RSIMAGE']
- rs.image = ENV['RSIMAGE']
- else
- rs.image = 'CentOS 6.5 - Vagrant ready'
- end
- end
-
- # Explicitlly set default shared folder and load lib folder
- config.vm.synced_folder ".", "/vagrant"
- config.vm.synced_folder "../lib/", "/vagrant/lib"
-
- # run our bootstrapping for the ovsdb-devstack system
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
-
- #################
- # LF NETWORKING #
- #################
-
- if ENV['LFNETWORK']
- # reconfigure the network setup to support our special private setup
- config.vm.provision 'shell', path: '../lib/lf-networking/configure_lf_infra.sh',
- args: ENV['RSSUBDOMAIN']
- end
-
-
- #################
- # FINAL CLEANUP #
- #################
-
- # set RSRESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RSRESEAL']
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
- end
-end
+++ /dev/null
-#!/bin/bash
-
-# vim: sw=4 ts=4 sts=4 et tw=72 :
-
-yum clean all
-# Add the ODL yum repo (not needed for java nodes, but useful for
-# potential later layers)
-yum install -q -y https://nexus.opendaylight.org/content/repositories/opendaylight-yum-epel-6-x86_64/rpm/opendaylight-release/0.1.0-1.el6.noarch/opendaylight-release-0.1.0-1.el6.noarch.rpm
-
-# Make sure the system is fully up to date
-yum update -q -y
-
-# Add in git (needed for most everything) and XML-XPath as it is useful
-# for doing CLI based CML parsing of POM files
-yum install -q -y git perl-{XML-XPath,Digest-SHA}
-
-# install all available openjdk-devel sets
-yum install -q -y 'java-*-openjdk-devel'
-
-# we currently use Java7 (aka java-1.7.0-openjdk) as our standard make
-# sure that this is the java that alternatives is pointing to, dynamic
-# spin-up scripts can switch to any of the current JREs installed if
-# needed
-alternatives --set java /usr/lib/jvm/jre-1.7.0-openjdk.x86_64/bin/java
-alternatives --set java_sdk_openjdk /usr/lib/jvm/java-1.7.0-openjdk.x86_64
-
-# To handle the prompt style that is expected all over the environment
-# with how use use robotframework we need to make sure that it is
-# consistent for any of the users that are created during dynamic spin
-# ups
-echo 'PS1="[\u@\h \W]> "' >> /etc/skel/.bashrc
-
-# add in mininet, openvswitch, and netopeer
-yum install -q -y netopeer-server-sl CPqD-ofsoftswitch13 mininet \
- telnet openvswitch
-
-# we need semanage installed for some of the next bit
-yum install -q -y policycoreutils-python
-
-# netconf / netopeer needs some special modifications to ssh
-semanage port -a -t ssh_port_t -p tcp '830'
-
-# The default /etc/ssh/sshd_config doesn't actually specify a port as such
-# we need to specify both 22 as well as 830 along with the netconf
-# subsystem
-echo << EOSSH >> /etc/ssh/sshd_config
-
-# Added for netconf / netopeer testing
-Port 22
-Port 830
-Subsystem netconf /usr/bin/netopeer-server-sl
-EOSSH
-
-# cbench installation for running openflow performance tests
-
-OF_DIR=$HOME/openflow # Directory that contains OpenFlow code
-OFLOPS_DIR=$HOME/oflops # Directory that contains oflops repo
-
-yum install -q -y net-snmp-devel libpcap-devel autoconf make automake libtool libconfig-devel
-
-git clone git://gitosis.stanford.edu/openflow.git $OF_DIR &> /dev/null
-git clone https://github.com/andi-bigswitch/oflops.git $OFLOPS_DIR &> /dev/null
-
-cd $OFLOPS_DIR
-./boot.sh &> /dev/null
-./configure --with-openflow-src-dir=$OF_DIR &> /dev/null
-make &> /dev/null
-make install &> /dev/null
+++ /dev/null
-The docker Vagrant can be used to take an already converted Rackspace native
-base image into a usuable docker ready image
-
-Please see the baseline vagrant setup for creation of the
-needed base image.
-
-This vagrant expects (by default) a personal Rackspace image named
-
-'Fedora 20 (Heisenbug) - Vagrant ready'
-
-To spin up and utilize.
-
-$ RSIMAGE="${baseimagename}" vagrant up --provider=rackspace
-
-Will execute this vagrant against a differently named base image
-
-$ RSIMAGE="${baseimagename}" RSRESEAL=true vagrant up --provider=rackspace
-
-NOTE: resealing will cause the vagrant to run the resealing operation.
-This operation will intentionally destroy current SSH pubkeys installed
-on the system as well as reset log files and network configurations. You
-have been warned.
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the openstack provider dummy box
- config.vm.box = "dummy"
- config.ssh.username = 'root'
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/openstack/Vagrantfile
- #
- # os.openstack_auth_url
- # os.endpoint_type
- # os.flavor
- # os.tenant_name
- # os.username
- # os.password
- # os.networks
- #
- # If you are not using an SSH token / smartcard also set this
- # os.key_name
- # config.ssh.private_key_path -- set this outside the openstack block
- # in your base box
- config.vm.provider :openstack do |os, override|
- if ENV['BOX']
- override.vm.box = ENV['BOX']
- else
- override.vm.box = 'dummy'
- end
- config.ssh.username = 'centos'
- os.flavor = 'm1.small'
-
- # require an IMAGE to be passed in
- # IMAGE must be a human name and not an image ID!
- if ENV['IMAGE']
- os.image = ENV['IMAGE']
- else
- os.image = 'BAD IMAGE'
- override.ssh.username = 'baduser'
- end
-
- case ENV['IMAGE']
- when /.*ubuntu.*/i
- override.ssh.username = 'ubuntu'
-
- when /.*fedora.*/i
- override.ssh.username = 'fedora'
-
- # take care of the tty requirement by fedora for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
-
- when /.*centos.*/i
- override.ssh.username = 'centos'
-
- # take care of the tty requirement by centos for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
- end
- end
-
- # Explicitlly set default shared folder and load lib folder
- config.vm.synced_folder ".", "/vagrant"
- config.vm.synced_folder "../lib/", "/vagrant/lib"
-
- # Do a full system update and enable enforcing if needed
- config.vm.provision 'shell', path: '../lib/baseline.sh'
-
- # run our bootstrapping
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
- #################
- # FINAL CLEANUP #
- #################
-
- # set RESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RESEAL']
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
- end
-end
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the openstack provider dummy box
- config.vm.box = "dummy"
- config.ssh.username = 'root'
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/openstack/Vagrantfile
- #
- # os.openstack_auth_url
- # os.endpoint_type
- # os.flavor
- # os.tenant_name
- # os.username
- # os.password
- # os.networks
- #
- # If you are not using an SSH token / smartcard also set this
- # os.key_name
- # config.ssh.private_key_path -- set this outside the openstack block
- # in your base box
- config.vm.provider :openstack do |os, override|
- if ENV['BOX']
- override.vm.box = ENV['BOX']
- else
- override.vm.box = 'dummy'
- end
- config.ssh.username = 'centos'
- os.flavor = 'm1.small'
-
- # require an IMAGE to be passed in
- # IMAGE must be a human name and not an image ID!
- if ENV['IMAGE']
- os.image = ENV['IMAGE']
- else
- os.image = 'BAD IMAGE'
- override.ssh.username = 'baduser'
- end
-
- case ENV['IMAGE']
- when /.*ubuntu.*/i
- override.ssh.username = 'ubuntu'
-
- when /.*fedora.*/i
- override.ssh.username = 'fedora'
-
- # take care of the tty requirement by fedora for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
-
- when /.*centos.*/i
- override.ssh.username = 'centos'
-
- # take care of the tty requirement by centos for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
- end
- end
-
- # Explicitlly set default shared folder and load lib folder
- config.vm.synced_folder ".", "/vagrant"
- config.vm.synced_folder "../lib/", "/vagrant/lib"
-
- # Do a full system update and enable enforcing if needed
- config.vm.provision 'shell', path: '../lib/baseline.sh'
-
- # run our bootstrapping
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
-
- #################
- # FINAL CLEANUP #
- #################
-
- # set RSRESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RSRESEAL']
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
- end
-end
+++ /dev/null
-integration-robotframework can be used to take an already converted
-Rackspace native base image into a usuable robotframework system for use
-in the OpenDaylight integration testing environment.
-
-Please see the rackspace-convert-base vagrant setup for creation of the
-needed base image.
-
-This vagrant expects (by default) a personal Rackspace image named
-
-'CentOS 6.5 - Vagrant ready'
-
-To spin up and utilize.
-
-$ RSIMAGE='${baseimagename}' vagrant up --provider=rackspace
-
-Will execute this vagrant against a differently named base image
-
-$ RSRESEAL=true vagrant up --provider=rackspace
-
-NOTE: resealing will cause the vagrant to run the resealing operation.
-This operation will intentionally destroy current SSH pubkeys installed
-on the system as well as reset log files and network configurations. You
-have been warned.
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the openstack provider dummy box
- config.vm.box = "dummy"
- config.ssh.username = 'root'
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/openstack/Vagrantfile
- #
- # os.openstack_auth_url
- # os.endpoint_type
- # os.flavor
- # os.tenant_name
- # os.username
- # os.password
- # os.networks
- #
- # If you are not using an SSH token / smartcard also set this
- # os.key_name
- # config.ssh.private_key_path -- set this outside the openstack block
- # in your base box
- config.vm.provider :openstack do |os, override|
- if ENV['BOX']
- override.vm.box = ENV['BOX']
- else
- override.vm.box = 'dummy'
- end
- config.ssh.username = 'centos'
- os.flavor = 'm1.small'
-
- # require an IMAGE to be passed in
- # IMAGE must be a human name and not an image ID!
- if ENV['IMAGE']
- os.image = ENV['IMAGE']
- else
- os.image = 'BAD IMAGE'
- override.ssh.username = 'baduser'
- end
-
- case ENV['IMAGE']
- when /.*ubuntu.*/i
- override.ssh.username = 'ubuntu'
-
- when /.*fedora.*/i
- override.ssh.username = 'fedora'
-
- # take care of the tty requirement by fedora for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
-
- when /.*centos.*/i
- override.ssh.username = 'centos'
-
- # take care of the tty requirement by centos for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
- end
- end
-
- # Explicitlly set default shared folder and load lib folder
- config.vm.synced_folder ".", "/vagrant"
- config.vm.synced_folder "../lib/", "/vagrant/lib"
-
- # Do a full system update and enable enforcing if needed
- config.vm.provision 'shell', path: '../lib/baseline.sh'
-
- # run our bootstrapping
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
- #################
- # FINAL CLEANUP #
- #################
-
- # set RESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RESEAL']
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
- end
-end
+++ /dev/null
-lf-networking is the final overlay that is run on images to make them
-usable as Jenkins slaves in the OpenDaylight or ODLForge environments.
-
-Please see the rackspace-convert-base vagrant setup for creation of the
-needed base image or use one of the other vagrant configurations
-(utilizing a convert base image) for the source image.
-
-This vagrant expects (by default) a personal Rackspace image named
-
-'CentOS 6.5 - Vagrant ready'
-
-To spin up and utilize.
-
-$ RSIMAGE='${baseimagename}' vagrant up --provider=rackspace
-
-Will execute this vagrant against a differently named base image
-
-This vagrant requires that an environment variable of RSSUBDOMAIN be
-configured so that the networking configuration can be carried out
-properly as the process used makes it difficult at best and impossible
-at worst to detect what the final networking setups should be. This
-needs to be detected before we create the base image due to how
-cloud-init overwrites certain features we're trying to override and we
-therefore 'chattr +i' certain configuration files to keep it from
-breaking things.
-
-RSSUBDOMAIN may be (currently) one of the following options:
-
-dfw.opendaylight.org
-dfw.odlforge.org
-ord.opendaylight.org
-
-NOTE: This vagrant will always execute the resealing operation. This
-operation will intentially destroy current SSH pubkeys installed on the
-system as well as reset log files and network configurations. You have
-been warned.
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- config.vm.box = "dummy"
-
- config.ssh.username = 'root'
-
- config.vm.provider :rackspace do |rs|
- rs.flavor = 'general1-1'
-
- if ENV['RSIMAGE']
- rs.image = ENV['RSIMAGE']
- else
- rs.image = 'CentOS 6.5 - Vagrant ready'
- end
-
- # allow for switching to ORD but default to DFW
- if (ENV['RSREGION'] == 'ord')
- rs.rackspace_region = :ord
- else
- rs.rackspace_region = :dfw
- end
- end
-
- # explicitly configure shared folder syncs
- config.vm.synced_folder '.', '/vagrant', :disabled => true
- config.vm.synced_folder '..', '/vagrant/lib'
-
- # reconfigure the network setup to support our special private setup
- config.vm.provision 'shell', path: 'configure_lf_infra.sh', args: ENV['RSSUBDOMAIN']
-
- # reseal the system for imaging
- config.vm.provision 'shell', path: '../system_reseal.sh'
-end
+++ /dev/null
-# make system modifications to handle being on a private Rackspace network
-
-# lint:ignore:80chars
-notice ("Operating system detected is: '${::operatingsystem} ${::operatingsystemrelease}'")
-# lint:endignore
-notice ("Subdomain being used is: '${::subdomain}'")
-
-# configure nameservers for domains
-case $::subdomain {
- /^dfw\./: {
- $ns1 = '72.3.128.241'
- $ns2 = '72.3.128.240'
- case $::subdomain {
- /opendaylight/: {
- $router = '10.30.11.1'
- }
- /odlforge/: {
- $router = '10.30.12.1'
- }
- default: {
- fail("Unrecognized subdomain ${::subdomain}")
- }
- }
- }
- /^ord\./: {
- $ns1 = '173.203.4.9'
- $ns2 = '173.203.4.8'
- $router = '10.30.32.1'
- }
- default: {
- fail("Unrecognized subdomain ${::subdomain}")
- }
-}
-
-# dnsmasq
-class { 'dnsmasq': }
-
-# Setup dnsmasq special domain handlers
-dnsmasq::conf { 'LF-ns1':
- ensure => present,
- content => 'server=/linux-foundation.org/172.17.192.30',
-}
-
-dnsmasq::conf { 'LF-ns2':
- ensure => present,
- content => 'server=/linux-foundation.org/172.17.192.31',
-}
-
-dnsmasq::conf { 'ODL-ns1':
- ensure => present,
- content => 'server=/opendaylight.org/172.17.192.30',
-}
-
-dnsmasq::conf { 'ODL-ns2':
- ensure => present,
- content => 'server=/opendaylight.org/172.17.192.31',
-}
-
-dnsmasq::conf { 'ODLForge-ns1':
- ensure => present,
- content => 'server=/odlforge.org/172.17.192.30',
-}
-
-dnsmasq::conf { 'ODLForge-ns2':
- ensure => present,
- content => 'server=/odlforge.org/172.17.192.31',
-}
-
-# fix the resolver
-file { '/etc/resolv.conf':
- content => "search ${::subdomain}
-nameserver 127.0.0.1
-nameserver ${ns1}
-nameserver ${ns2}
-options timeout:2
-",
-}
-
-file { '/etc/cloud/cloud.cfg.d/00_lf_resolv.cfg':
- content => "#cloud-config
-
-manage_resolv_conf: true
-
-resolv_conf:
- nameservers: ['127.0.0.1', '${ns1}', '${ns2}']
- searchdomains:
- - ${::subdomain}
- options:
- timeout: 2
-",
-}
-
-file_line { 'add_resolver':
- path => $::operatingsystem ? {
- 'Ubuntu' => '/etc/cloud/cloud.cfg',
- default => '/etc/cloud/cloud.cfg.d/10_rackspace.cfg',
- },
- line => ' - resolv_conf',
- after => ' - update_etc_hosts',
-}
-
-# OS specific configuration
-case $::operatingsystem {
- 'CentOS', 'Fedora', 'RedHat': {
- file { '/etc/sysconfig/network-scripts/route-eth0':
- content => "default via ${router} dev eth0",
- }
-
- # disable the DNS peerage so that our resolv.conf doesn't
- # get destroyed
- file_line { 'disable_peerdns':
- path => '/etc/sysconfig/network',
- line => 'PEERDNS=no',
- }
- }
- 'Ubuntu': {
- file { '/etc/network/if-up.d/0000routing':
- content => "#!/bin/sh\nip route add default via ${router} dev eth0",
- mode => '0755',
- }
-
- file { '/etc/resolvconf/resolv.conf.d/tail':
- content => "# opendaylight dns
-nameserver ${ns1}
-nameserver ${ns2}
-",
- }
- }
- default: {
- notice ("${::operatingsystem} is not supported by this configuration")
- }
-}
+++ /dev/null
-#!/bin/bash
-
-# script requires information about subdomain
-if [ -z "$1" ]; then
- >&2 echo "Please provide the subdomain to Vagrant"
- exit 1
-else
- SUBDOM=$1
-fi
-
-
-all_systems() {
- # install specific versions of puppet modules
- puppet module install puppetlabs-stdlib -v 4.5.1
- puppet module install puppetlabs-concat -v 1.2.0
- #puppet module install lex-dnsmasq -v 2.6.1
- puppet module install saz-dnsmasq -v 1.2.0
-
- # write the subdomain information into a custom facter fact
- mkdir -p /etc/facter/facts.d/
- echo "subdomain=${SUBDOM}" > /etc/facter/facts.d/subdomain.txt
-
- # final bits
- puppet apply /vagrant/lib/lf-networking/confignetwork.pp
-
-}
-
-rh_systems_init() {
- # remove current networking configurations
- rm -f /etc/sysconfig/network-scripts/ifcfg-eth*
-}
-
-rh_systems_post() {
- # don't let cloud-init do funny things to our routing
- chattr +i /etc/sysconfig/network-scripts/route-eth0
-
- # so that the network stack doesn't futz with our resolv config
- # after we've configured it
-# chattr +i /etc/resolv.conf
-}
-
-ubuntu_systems_post() {
- # don't let cloud-init destroy our routing
-# chattr +i /etc/network/if-up.d/0000routing
- echo "---> do nothing for now"
-}
-
-# Execute setup that all systems need
-all_systems
-
-echo "---> Checking distribution"
-FACTER_OSFAMILY=`/usr/bin/facter osfamily`
-FACTER_OS=`/usr/bin/facter operatingsystem`
-case "$FACTER_OSFAMILY" in
- RedHat)
- rh_systems_init
- rh_systems_post
- ;;
- Debian)
- case "$FACTER_OS" in
- Ubuntu)
- echo "---> Ubuntu found"
- ubuntu_systems_post
- ;;
- *)
- "---> Nothing to do for ${FACTER_OS}"
- ;;
- esac
- ;;
- *)
- echo "---> Unknown OS: ${FACTER_OSFAMILY}"
- ;;
-esac
-
-# vim: sw=4 ts=4 sts=4 et :
+++ /dev/null
-#!/bin/bash
-
-# vim: sw=2 ts=2 sts=2 et :
-
-if [ -f /.autorelabel ]; then
- echo "**********************************************"
- echo "* SYSTEM REQUIRES RELABELING SKIPPING RESEAL *"
- echo "* PLEASE RESTART SYSTEM AND RERUN *"
- echo "* PROVISIONING SCRIPTS *"
- echo "**********************************************"
- exit 1;
-fi
-
-rm -rf /etc/Pegasus/*.cnf /etc/Pegasus/*.crt /etc/Pegasus/*.csr /etc/Pegasus/*.pem /etc/Pegasus/*.srl /root/anaconda-ks.cfg /root/anaconda-post.log /root/initial-setup-ks.cfg /root/install.log /root/install.log.syslog /var/cache/fontconfig/* /var/cache/gdm/* /var/cache/man/* /var/lib/AccountService/users/* /var/lib/fprint/* /var/lib/logrotate.status /var/log/*.log* /var/log/BackupPC/LOG /var/log/ConsoleKit/* /var/log/anaconda.syslog /var/log/anaconda/* /var/log/apache2/*_log /var/log/apache2/*_log-* /var/log/apt/* /var/log/aptitude* /var/log/audit/* /var/log/btmp* /var/log/ceph/*.log /var/log/chrony/*.log /var/log/cron* /var/log/cups/*_log /var/log/debug* /var/log/dmesg* /var/log/exim4/* /var/log/faillog* /var/log/gdm/* /var/log/glusterfs/*glusterd.vol.log /var/log/glusterfs/glusterfs.log /var/log/httpd/*log /var/log/installer/* /var/log/jetty/jetty-console.log /var/log/journal/* /var/log/lastlog* /var/log/libvirt/libvirtd.log /var/log/libvirt/lxc/*.log /var/log/libvirt/qemu/*.log /var/log/libvirt/uml/*.log /var/log/lightdm/* /var/log/mail/* /var/log/maillog* /var/log/messages* /var/log/ntp /var/log/ntpstats/* /var/log/ppp/connect-errors /var/log/rhsm/* /var/log/sa/* /var/log/secure* /var/log/setroubleshoot/*.log /var/log/spooler* /var/log/squid/*.log /var/log/syslog* /var/log/tallylog* /var/log/tuned/tuned.log /var/log/wtmp* /var/named/data/named.run
-
-rm -rf ~/.viminfo /etc/ssh/ssh*key* ~/.ssh/* /root/.ssh/* /home/$(logname)/.ssh/*
-
-# kill any cloud-init related bits
-rm -rf /var/lib/cloud/*
-
-# cleanup /vagrant
-rm -rf /vagrant
-
-# Force a system sync and sleep to get around any SSD issues
-echo "Forcing sync and sleep for 10sec"
-sync
-sleep 10
-
-echo "********************************************"
-echo "* PLEASE SNAPSHOT IMAGE AT THIS TIME *"
-echo "********************************************"
+++ /dev/null
-ovsdb-devstack can be used to take an already converted Rackspace native
-base image into a usuable DevStack base image for ODL DevStack testing.
-
-Please see the rackspace-convert-base vagrant setup for creation of the
-needed base image.
-
-This vagrant expects (by default) a personal image named
-
-'Fedora 20 (Heisenbug) - Vagrant ready'
-
-To spin up and utilize.
-
-$ RSIMAGE='${baseimage_name}' vagrant up --provider=rackspace
-
-Will execute this vagrant against a differently named base image
-
-$ RSRESEAL=true vagrant up --provider=rackspace
-
-NOTE: resealing will cause the vagrant to run the resealing operation.
-This operation will intentionally destroy current SSH pubkeys installed
-on the system as well as reset log files and network configurations. You
-have been warned.
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the openstack provider dummy box
- config.vm.box = "dummy"
- config.ssh.username = 'root'
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/openstack/Vagrantfile
- #
- # os.openstack_auth_url
- # os.endpoint_type
- # os.flavor
- # os.tenant_name
- # os.username
- # os.password
- # os.networks
- #
- # If you are not using an SSH token / smartcard also set this
- # os.key_name
- # config.ssh.private_key_path -- set this outside the openstack block
- # in your base box
- config.vm.provider :openstack do |os, override|
- if ENV['BOX']
- override.vm.box = ENV['BOX']
- else
- override.vm.box = 'dummy'
- end
- config.ssh.username = 'centos'
- os.flavor = 'm1.small'
-
- # require an IMAGE to be passed in
- # IMAGE must be a human name and not an image ID!
- if ENV['IMAGE']
- os.image = ENV['IMAGE']
- else
- os.image = 'BAD IMAGE'
- override.ssh.username = 'baduser'
- end
-
- case ENV['IMAGE']
- when /.*ubuntu.*/i
- override.ssh.username = 'ubuntu'
-
- when /.*fedora.*/i
- override.ssh.username = 'fedora'
-
- # take care of the tty requirement by fedora for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
-
- when /.*centos.*/i
- override.ssh.username = 'centos'
-
- # take care of the tty requirement by centos for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
- end
- end
-
- # Explicitlly set default shared folder and load lib folder
- config.vm.synced_folder ".", "/vagrant"
- config.vm.synced_folder "../lib/", "/vagrant/lib"
-
- # Do a full system update and enable enforcing if needed
- config.vm.provision 'shell', path: '../lib/baseline.sh'
-
- # run our bootstrapping
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
- #################
- # FINAL CLEANUP #
- #################
-
- # set RESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RESEAL']
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
- end
-end
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the openstack provider dummy box
- config.vm.box = "dummy"
- config.ssh.username = 'root'
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/openstack/Vagrantfile
- #
- # os.openstack_auth_url
- # os.endpoint_type
- # os.flavor
- # os.tenant_name
- # os.username
- # os.password
- # os.networks
- #
- # If you are not using an SSH token / smartcard also set this
- # os.key_name
- # config.ssh.private_key_path -- set this outside the openstack block
- # in your base box
- config.vm.provider :openstack do |os, override|
- if ENV['BOX']
- override.vm.box = ENV['BOX']
- else
- override.vm.box = 'dummy'
- end
- config.ssh.username = 'centos'
- os.flavor = 'm1.small'
-
- # require an IMAGE to be passed in
- # IMAGE must be a human name and not an image ID!
- if ENV['IMAGE']
- os.image = ENV['IMAGE']
- else
- os.image = 'BAD IMAGE'
- override.ssh.username = 'baduser'
- end
-
- case ENV['IMAGE']
- when /.*ubuntu.*/i
- override.ssh.username = 'ubuntu'
-
- when /.*fedora.*/i
- override.ssh.username = 'fedora'
-
- # take care of the tty requirement by fedora for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
-
- when /.*centos.*/i
- override.ssh.username = 'centos'
-
- # take care of the tty requirement by centos for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
- end
- end
-
- # Explicitlly set default shared folder and load lib folder
- config.vm.synced_folder ".", "/vagrant"
- config.vm.synced_folder "../lib/", "/vagrant/lib"
-
- # Do a full system update and enable enforcing if needed
- config.vm.provision 'shell', path: '../lib/baseline.sh'
-
- # run our bootstrapping
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
-
- #################
- # FINAL CLEANUP #
- #################
-
- # set RSRESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RSRESEAL']
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
- end
-end
+++ /dev/null
-# -*- mode: ruby -*-
-# vi: set ft=ruby sw=2 ts=2 sts=2 et :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- # root off of the openstack provider dummy box
- config.vm.box = "dummy"
- config.ssh.username = 'root'
-
- # make sure to set the following in your
- # ~/.vagrant.d/boxes/dummy/0/openstack/Vagrantfile
- #
- # os.openstack_auth_url
- # os.endpoint_type
- # os.flavor
- # os.tenant_name
- # os.username
- # os.password
- # os.networks
- #
- # If you are not using an SSH token / smartcard also set this
- # os.key_name
- # config.ssh.private_key_path -- set this outside the openstack block
- # in your base box
- config.vm.provider :openstack do |os, override|
- if ENV['BOX']
- override.vm.box = ENV['BOX']
- else
- override.vm.box = 'dummy'
- end
- config.ssh.username = 'centos'
- os.flavor = 'm1.small'
-
- # require an IMAGE to be passed in
- # IMAGE must be a human name and not an image ID!
- if ENV['IMAGE']
- os.image = ENV['IMAGE']
- else
- os.image = 'BAD IMAGE'
- override.ssh.username = 'baduser'
- end
-
- case ENV['IMAGE']
- when /.*ubuntu.*/i
- override.ssh.username = 'ubuntu'
-
- when /.*fedora.*/i
- override.ssh.username = 'fedora'
-
- # take care of the tty requirement by fedora for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
-
- when /.*centos.*/i
- override.ssh.username = 'centos'
-
- # take care of the tty requirement by centos for sudo
- os.user_data = "#!/bin/bash
-/bin/sed -i 's/ requiretty/ !requiretty/' /etc/sudoers;"
- end
- end
-
- # Explicitlly set default shared folder and load lib folder
- config.vm.synced_folder ".", "/vagrant"
- config.vm.synced_folder "../lib/", "/vagrant/lib"
-
- # Do a full system update and enable enforcing if needed
- config.vm.provision 'shell', path: '../lib/baseline.sh'
-
- # run our bootstrapping
- config.vm.provision 'shell', path: 'bootstrap.sh'
-
-
- #################
- # FINAL CLEANUP #
- #################
-
- # set RSRESEAL to... anything if you want to snap an image of this box
- # not setting the environment variable will cause the system to come
- # up fully and not be in a resealable state
- if ENV['RSRESEAL']
- config.vm.provision 'shell', path: '../lib/system_reseal.sh'
- end
-end