--- /dev/null
+---
+- name: Sleep for {{ sleep_time }} seconds to allow stacking to start
+ tags:
+ - openstack
+ - stack
+ - workaround
+ command: sleep {{ sleep_time }}
+
+- name: Get PID of devstack::chown process
+ tags:
+ - openstack
+ - stack
+ - workaround
+ shell: "docker exec -i {{ container_name }} pgrep chown || exit 0"
+ register: chown_pid
+
+- name: Show output of pgrep
+ tags:
+ - openstack
+ - stack
+ - workaround
+ debug:
+ msg: "{{ chown_pid }}"
+ when: chown_pid is defined
+
+- name: Show "kill" command
+ tags:
+ - openstack
+ - stack
+ - workaround
+ debug:
+ msg: "docker exec -d {{ container_name }} kill -15 {{ chown_pid.stdout }}"
+ when: chown_pid.stdout != ""
+
+- name: Kill devstack chown process because it's unnecessary
+ tags:
+ - openstack
+ - stack
+ - workaround
+ shell: |
+ echo "Sleeping for 5 seconds to allow chown to get stuck"
+ sleep 5
+ "docker exec -d {{ container_name }} kill -15 {{ chown_pid.stdout }}"
+ when: chown_pid.stdout != ""
+
--- /dev/null
+---
+# Ansible task to pull a docker image from the repository
+- name: "Pull docker image for {{ node_type }}-node: {{ docker_registry }}/{{ node_type }}:{{ container_tag }}"
+ tags:
+ - pull
+ - docker
+ - container
+ docker_image:
+ name: "{{ docker_registry }}/s3p/{{ node_type }}"
+ tag: "{{ container_tag }}"
+ pull: True
+ state: present
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
--- /dev/null
+---
+# file: common/tasks/restart_stacking.yml
+# info: restarts compute nodes
+# to force restart, pass
+# --extra-vars "restart_containers=True"
+- name: Set devstack to 'OFFLINE' mode for compute nodes
+ tags:
+ - openstack
+ - stack
+ - restart
+ shell: |
+ docker exec --user stack "{{ container_name }}" sed -i 's:^# \(OFFLINE=True\):\1:g' /home/stack/compute.odl.local.conf
+ docker exec --user stack "{{ container_name }}" sed -i 's:^# \(RECLONE=False\):\1:g' /home/stack/compute.odl.local.conf
+ when:
+ - stack_in_offline_mode
+ - "'compute' in container_name"
+
+- name: RE-start stacking (devstack) in container "{{ container_name }}"
+ tags:
+ - openstack
+ - stack
+ - restart
+ remote_user: root
+ become: yes
+ become_method: sudo
+ shell: docker exec --detach --user stack "{{ container_name }}" /bin/bash -c /home/stack/restart.sh
+
+- include: kill_chown.yml
+
+# vim: set et sw=2 ts=2 :
+
--- /dev/null
+---
+- name: Set devstack to 'OFFLINE' mode for compute nodes
+ tags:
+ - openstack
+ - stack
+ shell: |
+ docker exec --user stack "{{ container_name }}" sed -i 's:^# \(OFFLINE=True\):\1:g' /home/stack/compute.odl.local.conf
+ docker exec --user stack "{{ container_name }}" sed -i 's:^# \(RECLONE=False\):\1:g' /home/stack/compute.odl.local.conf
+ when:
+ - stack_in_offline_mode
+ - "'compute' in container_name"
+
+- name: Start stacking (devstack)
+ tags:
+ - openstack
+ - stack
+ - start
+ remote_user: root
+ become: yes
+ become_method: sudo
+ shell: docker exec --detach --user stack "{{ container_name }}" /bin/bash -c /home/stack/start.sh
+
+- include: kill_chown.yml
+
+# TODO: have the openstack nodes "check in" when they've finished stacking
+# + maybe with ansible pull mode
+# vim: set et sw=2 ts=2 :
+
--- /dev/null
+#!/bin/bash
+# this will
+# 1) create a pair of veth interfaces
+# 2) add one to a physical bridge (assumed to exist)
+# 3) add the peer to a docker container netns
+# 4) set its IP address
+
+set -e
+
+function fn_get_host_index {
+ local PHYS_HOST_NAME=${1}
+ [ -z "$1" ] && echo "ERROR: a host ID number must be supplied" && exit
+ local __hix=${PHYS_HOST_NAME##"an11-"} # trim leading rack ID from hostname (e.g. an11-31-odl -> 31-odl-perf)
+ local H_IXd=${__hix%%-*} # trim trailing characters after rack position (e.g. 31-odl-perf -> 31)
+ H_IXd=${H_IXd##"0"} # trim leading zeroes
+ echo "$H_IXd"
+}
+
+function fn_link_container_netns {
+ # echo "INFO: linking net namespace of container $CONTAINER_NAME"
+ mkdir -p $HOST_NETNS_ROOT
+ # derived variables
+ SANDBOX_KEY=$(docker inspect -f '{{.NetworkSettings.SandboxKey}}' $CONTAINER_NAME)
+ NETNS_NAME="netns-$CONTAINER_NAME"
+ NETNS_LINK="${HOST_NETNS_ROOT}/${NETNS_NAME}"
+ # unlink the netns if a link already exists
+ if [ -L "$NETNS_LINK" ] ; then
+ unlink $NETNS_LINK
+ fi
+ ln -s $SANDBOX_KEY $NETNS_LINK
+ # ls -al $HOST_NETNS_ROOT
+}
+
+function fn_attach_veth_to_container {
+ ## Attach veth to container
+ # input:
+ # A_IX: adapter index, becomes suffix of veth name in host or ethphys{A_IX} in container
+ # ethphys01 == ethmgmt
+ # ethphys02 == ethdata
+ # NETNS_NAME: netns_link from link_container_netns.yml
+ #
+ # set in main: CONTAINER_VETH_NAME="ethphys${A_IX}"
+
+ ip link set $VETH_CONT netns $NETNS_NAME
+ ip netns exec $NETNS_NAME ip link set dev $VETH_CONT name $CONTAINER_VETH_NAME
+ # set the device mac address
+ ip netns exec $NETNS_NAME ip link set dev $CONTAINER_VETH_NAME address $CONTAINER_MAC
+ # set the adapter IP address
+ ip netns exec $NETNS_NAME ip address add $CONTAINER_IP dev $CONTAINER_VETH_NAME
+ #echo "Container net-namespace contents:"
+ ip netns exec $NETNS_NAME ip link set dev $CONTAINER_VETH_NAME up
+ #ip netns exec $NETNS_NAME ip a s
+ #echo
+}
+
+function fn_create_and_link_veth {
+ ## Create veth pair (peers)
+ VETH_BASE="ve${H_IXx}${C_IXx}${A_IX}"
+ VETH_HOST=${VETH_BASE}h
+ VETH_CONT=${VETH_BASE}c
+ ## remove link from host netns if it already exists
+ if [ -n "$(ip link show $VETH_HOST)" ]; then
+ ip link set dev $VETH_HOST down
+ ip link delete $VETH_HOST
+ fi
+ ## create veth pair
+ ip link add $VETH_HOST type veth peer name $VETH_CONT
+ ip link set dev $VETH_HOST up
+ ## attach veth in host netns to PHYS_BRIDGE
+ brctl addif $PHYS_BRIDGE_NAME $VETH_HOST
+
+ fn_attach_veth_to_container
+}
+
+
+function fn_display_link_status {
+ # if all goes well, we've linked the container to the bridge, update the counter
+ if [ $? -eq 0 ] ; then
+ # display status info
+ echo "Successfully linked container $CONTAINER_NAME to bridge $PHYS_BRIDGE_NAME"
+ echo -e "H_IX: \t${H_IXd} (0x${H_IXx})"
+ echo -e "C_IX: \t${C_IXd} (0x${C_IXx})"
+ echo -e "C_MAC: \t${CONTAINER_MAC}"
+ echo -e "C_IP4: \t${CONTAINER_IP}"
+ echo -e "C_veth:\t${CONTAINER_VETH_NAME} (${VETH_CONT})"
+ echo -e "H_veth:\t${VETH_HOST}"
+ echo
+ fi
+}
+
+# main:
+# lab constants
+# common/vars/main.yml
+MAC_PREFIX="{{ veth_mac_address_prefix }}"
+HOST_NETNS_ROOT="{{ host_netns_root }}"
+NETMASK_LEN="{{ management_subnet_netmask }}"
+
+# parse input arguments
+PHYS_HOST_NAME="${1}"
+CONTAINER_ID_NUMBER="${2}"
+# container name can be constructed from ID num: compute-<hostID>-<CONTAINER_ID_NUMBER>
+CONTAINER_TYPE=${3}
+
+# determine subnet from bridge
+
+# this is deterministic where each container gets an index which is used
+# + to create the IP address, MAC address, VETH numbering, etc
+H_IXd=$(fn_get_host_index $PHYS_HOST_NAME )
+# host index (rack position), convert to 2 hex digits
+# H_IXx (hex representation of host id) can be passed as an input argument or used from the environment
+H_IXx=${H_IXx:-$(printf "%.2x" $H_IXd)}
+SUBNET_SEGMENT="${H_IXd}"
+
+
+# For last octet of IP address:
+# host=1, service=2, network=3, compute=11-200, floatingIP=201-254
+case "$CONTAINER_TYPE" in
+ service)
+ # echo "CONTAINER_TYPE = service"
+ CONTAINER_NAME=service-node
+ CONTAINER_ID_NUMBER=2
+ ;;
+ network)
+ # echo "CONTAINER_TYPE = network"
+ CONTAINER_NAME=network-node
+ CONTAINER_ID_NUMBER=3
+ ;;
+ measure)
+ # echo "CONTAINER_TYPE = measure"
+ CONTAINER_NAME=measure-node
+ CONTAINER_ID_NUMBER=4
+ ;;
+ compute) # echo "CONTAINER_TYPE = compute"
+ CONTAINER_NAME="compute-${H_IXd}-${CONTAINER_ID_NUMBER}"
+ # echo "Compute node # = $CONTAINER_ID_NUMBER"
+ ;;
+ *) echo "ERROR: Invalid CONTAINER_TYPE \"$CONTAINER_TYPE\" specified"
+ exit 1
+esac
+
+# description:
+# input: container type (string), "ID" (int, 11-200) supplied on the command line
+# this script will:
+# 1) link the container to both {{ management_bridge }} and {{ data_bridge }}
+# 2) modify their MAC addresses accordingly
+# 3) supply IP addresse
+
+fn_link_container_netns
+
+C_IXd=$CONTAINER_ID_NUMBER
+C_IXx=$(printf "%.2x" $C_IXd)
+
+# connect the adapter
+for ADAPTER_IX in {1..2}; do
+ A_IX="$(printf "%.2x" $ADAPTER_IX)"
+ case "$ADAPTER_IX" in
+ 1)
+ # create links to the management bridge
+ NETMASK_LEN="{{ management_subnet_netmask }}"
+ PHYS_BRIDGE_NAME="{{ management_bridge }}"
+ SUBNET_BASE="{{ management_subnet_prefix }}"
+ CONTAINER_VETH_NAME="ethmgmt"
+ ;;
+ 2)
+ # create links to the tenant/data bridge
+ NETMASK_LEN="{{ data_subnet_netmask }}"
+ PHYS_BRIDGE_NAME="{{ data_bridge }}"
+ SUBNET_BASE="{{ data_subnet_prefix }}"
+ CONTAINER_VETH_NAME="ethdata"
+ ;;
+ *) echo "ERROR: Invalid ADAPTER_IX \"$ADAPTER_IX\" specified"
+ exit
+ esac
+ SUBNET_PREFIX="${SUBNET_BASE}.${SUBNET_SEGMENT}"
+ # container index (container id per host), convert to 2 hex digits
+ CONTAINER_IP="${SUBNET_PREFIX}.${C_IXd}/${NETMASK_LEN}"
+ CONTAINER_MAC="${MAC_PREFIX}:${H_IXx}:${C_IXx}:${A_IX}"
+
+ # make links
+ fn_create_and_link_veth
+ fn_display_link_status
+done
+
+# echo "You can remove the links created just now by simply removing the veth peer from the root netns with:"
+# echo " ip link delete $VETH_HOST"
+
+unlink $HOST_NETNS_ROOT/$NETNS_NAME
+
+# vim: set ft=sh sw=4 ts=4 et ai :
+
# common variables
---
# network infrastructure
-## lab network resources
+## lab network resources - define in /etc/ansible/hosts
# lab_http_proxy:
# lab_https_proxy:
# infrastructure_server:
# docker_registry_port:
# docker_registry:
+# OpenStack framework config
+stack_in_offline_mode: True
+auto_stack: True
+# run 'restart.sh' instead of 'start.sh' - mutually exclusive with auto_stack
+restart_containers: False
+use_odl_network: True
+# network variables
## network configuration of host machines
mgmt_iface: eno3
data_iface: eno4
management_interface: "{{ mgmt_iface }}"
data_interface: "{{ data_iface }}"
management_subnet_prefix: "10.129"
-mgmt_ip_prefix: "10.129"
management_subnet_netmask: "16"
data_subnet_prefix: "10.130"
-data_ip_prefix: "10.130"
data_subnet_netmask: "16"
+# prefix for veth addresses
+veth_mac_address_prefix: "fe:53:00"
+host_netns_root: "/var/run/netns"
# IP address of the Linux bridge on the physical host
-mgmt_ip: "{{ mgmt_ip_prefix }}.{{ rackpos }}.1/{{ test_netmask }}"
-data_ip: "{{ data_ip_prefix }}.{{ rackpos }}.1/{{ test_netmask }}"
+mgmt_ip: "{{ management_subnet_prefix }}.{{ rackpos }}.1/{{ management_subnet_netmask }}"
+data_ip: "{{ data_subnet_prefix }}.{{ rackpos }}.1/{{ data_subnet_netmask }}"
+# NOTE: WARNING!!! these names must be identical to those in service-node and compute-nodes' local.conf
+mgmt_veth_name: "ethmgmt"
+data_veth_name: "ethdata"
# lab/default IP addresses of physical hosts
-mgmt_lab_ip_prefix: "10.11.26"
-data_lab_ip_prefix: "10.11.126"
lab_netmask: 22
# position and rackpos are an integer "index" of physical machines
position: "{{ rackpos }}"
mgmt_lab_ip: "{{ mgmt_lab_ip_prefix }}.{{ rackpos }}/{{ lab_netmask }}"
data_lab_ip: "{{ data_lab_ip_prefix }}.{{ rackpos }}/{{ lab_netmask }}"
-# node operating system defaults
-# Variables for emulated framework containers
-## container configuration
-docker_systemd_version: "v0.1"
-compute_image: "s3p/compute"
-compute_version: "v0.5s"
-measure_image: "s3p/measure"
-measure_version: "v0.1.1"
-service_image: "s3p/service"
-service_version: "v0.4sc"
-control_node_image: "{{ docker_registry }}/{{ service_image }}:{{ service_version }}"
-
## cluster configuration
### the service_host ip should be conform to infrastructure routing
#### service_host_phys_host is the host_index for the physical server
-service_host_phys_host: "20"
service_host_container_index: "2"
service_host_mgmt_ip: "{{ management_subnet_prefix }}.{{ service_host_phys_host }}.{{ service_host_container_index }}"
control_node_container_name: "service-node"
# network parameters for service and compute nodes
lab_no_proxy: "localhost,10.0.0.0/8,192.168.0.0/16,172.17.0.0/16,127.0.0.1,127.0.0.0/8,{{ service_host_mgmt_ip }}"
-# OpenStack framework config
-use_odl_network: "True"
+
+# docker configuration variables
+## container configuration
+service_image: "s3p/service"
+compute_image: "s3p/compute"
+measure_image: "s3p/measure"
+docker_systemd_version: "v0.1"
+compute_version: "v0.4.1s"
+measure_version: "v0.1.1"
+service_version: "v0.4s"
+# service_version: "v0.4.1sb4"
+ # v0.4sb4
+ # v0.4.1sc
+control_node_image: "{{ docker_registry }}/{{ service_image }}:{{ service_version }}"
+compute_node_image: "{{ docker_registry }}/{{ compute_image }}:{{ compute_version }}"
+sleep_time: 10
+# vim: set et ai sw=2 ts=2 ft=yaml :
+
--- /dev/null
+---
+- include: pull_compute.yml
+
+- include: run_compute.yml
+ register: compute_node_spawn_result
+
+- name: Display compute node container name
+ debug:
+ msg: "Compute node {{ container_name }} successfully spawned"
+ verbosity: 1
+ when: compute_node_spawn_result is defined
+
+- name: Start Stacking compute node
+ tags:
+ - compute
+ - openstack
+ - stack
+ include: ../../common/tasks/start_stacking.yml
+ when:
+ - "{{ auto_stack }}"
+
+- name: REstart stacking in compute nodes
+ tags:
+ - compute
+ - openstack
+ - restart
+ include: ../../common/tasks/restart_stacking.yml
+ when:
+ - "{{ restart_containers }}"
+
+# vim: set et sw=2 ts=2 :
+
--- /dev/null
+---
+- name: Pull compute-node docker image from the registry
+ tags:
+ - pull
+ - docker
+ - containers
+ vars:
+ node_type: compute
+ container_tag: "{{ compute_version }}"
+ include: ../../common/tasks/pull_image.yml
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
--- /dev/null
+---
+- name: Print debug info
+ tags:
+ - compute
+ - debug
+ debug:
+ msg: "Spawning compute node '{{ container_name }}' with COMP_ID={{ COMP_ID }}"
+# verbosity: 1
+
+- name: Spawn compute node
+ remote_user: root
+ become: yes
+ become_method: sudo
+ tags:
+ - spawn
+ - compute
+ - containers
+ - docker
+ docker_container:
+ name: "{{ container_name }}"
+ hostname: "{{ container_name }}"
+ image: "{{ compute_container_image }}"
+ state: started
+ privileged: yes
+ capabilities: ALL
+ volumes:
+ - /dev:/dev
+ - /lib/modules:/lib/modules
+ - /sys/fs/cgroup:/sys/fs/cgroup:ro
+ published_ports:
+ - "{{ web_port_map }}"
+ stop_signal: SIGRTMIN+3
+ security_opts:
+ - "apparmor:docker-unconfined"
+ env:
+ TZ: "America/Los_Angeles"
+ ODL_NETWORK: "{{ use_odl_network }}"
+ SERVICE_HOST: "{{ service_host_mgmt_ip }}"
+ container: "docker"
+ http_proxy: "{{ lab_http_proxy }}"
+ https_proxy: "{{ lab_https_proxy }}"
+ no_proxy: "{{ lab_no_proxy }}"
+ TERM: screen
+ register: compute_node_spawn_result
+
+- name: Connect container to network with ansible (idempotent)
+ tags:
+ - network
+ - compute
+ - experimental
+ include_role:
+ name: network
+
+# vim: set et sw=2 ts=2 :
--- /dev/null
+---
+# rackpos is a parameter of the physical hosts == position
+# COMP_ID must be passed as an 'extra-var'
+# like 'ansible-playbook ... --extra-vars "COMP_ID=11" '
+node_type: compute
+container_name: "compute-{{ rackpos }}-{{ COMP_ID }}"
+compute_container_image: "{{ docker_registry }}/{{ compute_image }}:{{ compute_version }}"
+web_port_map: "600{{ COMP_ID }}:80"
+cpuset_cpu1: "{{ COMP_ID }}"
+cpu2_offset: "{{ (ansible_processor_cores * ansible_processor_threads_per_core)|int }}"
+cpuset_cpu2: "{{ (COMP_ID)|int + (cpu2_offset)|int }}"
+cpuset: "{{ cpuset_cpu1 }},{{ cpuset_cpu2 }}"
+
--- /dev/null
+---
+- hosts: cluster0
+ roles:
+ - common
+ - network
--- /dev/null
+---
+- hosts: cluster0
+ become: yes
+ vars:
+ # name_filter may be supplied as a variable or
+ # on the command line with:
+ # --extra-vars "name_filter=compute*"
+ name_filter: 'compute*'
+ tasks:
+ - include: ../infra/tasks/purge_containers.yml
+++ /dev/null
----
-- hosts: compute-node-hosts
- become: yes
- roles:
- - role: infra
--- /dev/null
+---
+- hosts: cluster0
+ roles:
+ - common
+ - infra
--- /dev/null
+---
+- hosts: compute-node-hosts
+ become: yes
+ vars:
+ # COMP_ID must be supplied as a variable or
+ # on the command line with:
+ # --extra-vars "COMP_ID=(11-250)"
+ COMP_ID: 11
+ roles:
+ - common
+ - compute
--- /dev/null
+---
+- hosts: compute-node-hosts
+ become: yes
+ # COMP_ID must be supplied as a variable or
+ # on the command line with:
+ # --extra-vars "COMP_ID=(11-250)"
+ roles:
+ - common
+ - compute
--- /dev/null
+---
+- hosts: service-node-hosts
+ become: yes
+ roles:
+ - common
+ - service
+
---
-- include: parse_apparmor_profile.yml
+- name: Parse apparmor profile
+ tags:
+ - files
+ - docker
+ shell: apparmor_parser -r -W "{{ profile_path }}"
+ listen: parse apparmor profile
- name: restart docker
tags: docker
---
+- name: Show interface info
+ tags:
+ - infra
+ - network
+ debug:
+ msg: "mgmt_lab_ip {{ mgmt_lab_ip }}, mgmt_ip {{ mgmt_ip }}, interface {{ management_interface}} iface {{ mgmt_iface }}"
+
- name: Setup mgmt bridge
+ tags:
+ - infra
+ - network
vars:
interface: "{{ management_interface }}"
bridge: "{{ management_bridge }}"
when: "'{{ management_bridge }}' not in ansible_interfaces"
- name: Setup data bridge
+ tags:
+ - infra
+ - network
vars:
interface: "{{ data_interface }}"
bridge: "{{ data_bridge }}"
- install
action: apt pkg={{item}} state=installed
with_items:
+ - bridge-utils
- git
- sshpass
- openssh-server
---
+- include: install_packages.yml
- include: copy_apparmor_profile.yml
+- name: Show interface info
+ tags:
+ - infra
+ - network
+ debug:
+ msg: "mgmt_lab_ip {{ mgmt_lab_ip }}, mgmt_ip {{ mgmt_ip }}, interface {{ management_interface}} iface {{ mgmt_iface }}"
- include: create_bridges.yml
-- include: install_packages.yml
- include: setup_docker_daemon.yml
# vim: set et ts=2 sw=2 ai ft=yaml :
--- /dev/null
+---
+- name: Get list of running containers
+ tags:
+ - cleanup
+ - status
+ shell: "docker ps --quiet --all --filter \"name={{ name_filter }}\" "
+ register: container_list
+
+- name: Print list of containers matching name_filter
+ tags:
+ - cleanup
+ - status
+ debug:
+ msg: "Containers matching '{{ name_filter }}' = {{ container_list.stdout }}"
+ when: container_list.stdout != ""
+
+- name: Remove containers matching filter
+ tags:
+ - cleanup
+ - purge
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ container_list.stdout_lines }}"
+ when: container_list.stdout != ""
--- /dev/null
+---
+# task to create a veth-link pair between a bridge and a container
+# See https://github.com/intel-odl/socets/blob/master/ansible/connect_container_to_networks.sh
+
+- name: Set host and container index info
+ tags:
+ - container
+ - network
+ set_fact:
+ host_ix_hex: "{{ '%02x'|format(HOST_ID|int) }}"
+ comp_ix_hex: "{{ '%02x'|format(COMP_ID|int) }}"
+
+- name: Set veth pair base name
+ tags:
+ - container
+ - network
+ set_fact:
+ veth_base: "ve{{ host_ix_hex }}{{ comp_ix_hex }}"
+ adapter_ix: "{% if bridge_name=='br_mgmt' %}01{% else %}02{% endif %}"
+
+- name: Set host and container veth names
+ tags:
+ - container
+ - network
+ set_fact:
+ veth_host: "{{ veth_base }}{{ adapter_ix }}h"
+ veth_cont: "{{ veth_base }}{{ adapter_ix }}c"
+ adapter_ix_hex: "{{ '%02x'|format(adapter_ix|int) }}"
+
+- name: Delete existing veth links
+ tags:
+ - container
+ - network
+ - cleanup
+ shell: "ip link delete {{ veth_host }} || exit 0"
+
+- name: Get veth link status info for host
+ tags:
+ - container
+ - network
+ command: ip -o link show type veth
+ register: ip_link_status
+
+- name: Create veth pair for {{ bridge_name }} bridge
+ tags:
+ - container
+ - network
+ command: "ip link add {{ veth_host }} type veth peer name {{ veth_cont }}"
+ when: veth_host not in ip_link_status.stdout
+
+# if the veth_cont link already exists, it should be in the container
+- name: Set veth_host up
+ tags:
+ - container
+ - network
+ command: "ip link set dev {{ veth_host }} up"
+ when: veth_host not in ip_link_status.stdout
+
+# assume that the bridge already exists, setup by infra role
+- name: Get bridge status for {{ bridge_name }}
+ tags:
+ - container
+ - network
+ command: "brctl show {{ bridge_name }}"
+ register: bridge_status
+
+- name: Bind veth link to {{ bridge_name }} bridge
+ tags:
+ - container
+ - network
+ command: "brctl addif {{ bridge_name }} {{ veth_host }}"
+ when: veth_host not in bridge_status.stdout
+
+- name: Get name of veth adapter IN-side container
+ tags:
+ - container
+ - network
+ set_fact:
+ container_veth_name: "{% if bridge_name=='br_mgmt' %}{{ mgmt_veth_name }}{% else %}{{ data_veth_name }}{% endif %}"
+ subnet_prefix: "{% if bridge_name=='br_mgmt' %}{{ management_subnet_prefix }}{% else %}{{ data_subnet_prefix }}{% endif %}"
+ netmask: "{% if bridge_name=='br_mgmt' %}{{ management_subnet_netmask }}{% else %}{{ data_subnet_netmask }}{% endif %}"
+
+- name: Determine container interface IP for {{ bridge_name }}
+ tags:
+ - container
+ - network
+ set_fact:
+ container_ip: "{{ subnet_prefix }}.{{ HOST_ID }}.{{ COMP_ID }}/{{ netmask }}"
+ container_mac: "{{ veth_mac_address_prefix }}:{{ host_ix_hex }}:{{ comp_ix_hex }}:{{ adapter_ix_hex }}"
+
+- name: Check for veth adapter presence in container (docker exec)
+ tags:
+ - container
+ - network
+ command: docker exec {{ container_name }} ip link show
+ register: container_link_status
+
+- name: Show container adapter info
+ tags:
+ - container
+ - network
+ - debug
+ debug:
+ msg: "Container name: {{ container_name }},
+ veth_host: {{ veth_host }},
+ veth_cont: {{ veth_cont }},
+ container_ip: {{ container_ip }},
+ container_mac: {{ container_mac }},
+ container_veth_name: {{ container_veth_name }},
+ container_netns_link: {{ netns_link }}
+ container_netns_name: {{ netns_name }}"
+ verbosity: 3
+
+- name: Attach veth adapter to container
+ tags:
+ - container
+ - network
+ command: ip link set {{ veth_cont }} netns {{ netns_name }}
+ when: container_veth_name not in container_link_status.stdout
+
+- name: Set container veth adapter name
+ tags:
+ - container
+ - network
+ command: ip netns exec {{ netns_name }} ip link set dev {{ veth_cont }} name {{ container_veth_name }}
+ when: container_veth_name not in container_link_status.stdout
+
+- name: Set container veth adapter L2 HW address
+ tags:
+ - container
+ - network
+ command: ip netns exec {{ netns_name }} ip link set dev {{ container_veth_name }} address {{ container_mac }}
+ when: container_veth_name not in container_link_status.stdout
+
+- name: Set container veth adapter L3 IP address
+ tags:
+ - container
+ - network
+ command: ip netns exec {{ netns_name }} ip address add {{ container_ip }} dev {{ container_veth_name }}
+ when: container_veth_name not in container_link_status.stdout
+
+- name: Set container veth adapter 'up'
+ tags:
+ - container
+ - network
+ command: ip netns exec {{ netns_name }} ip link set dev {{ container_veth_name }} up
+ when: container_veth_name not in container_link_status.stdout
+
+# vim: set et ai ts=2 sw=2 sts=2 :
+
--- /dev/null
+---
+- name: Get SandboxKey for container
+ tags:
+ - container
+ - network
+ vars:
+ # container_name: must be passed on command line. E.g. '--extra-vars "container_name=compute-5-11"'
+ command: "docker inspect --format {% raw %} '{{.NetworkSettings.SandboxKey}}' {% endraw %} {{ container_name }}"
+ register: sandbox_key
+
+- name: Print sandbox_key
+ tags:
+ - container
+ - network
+ - debug
+ debug:
+ msg: "Container '{{ container_name }}' has SandboxKey '{{ sandbox_key.stdout }}'"
+ verbosity: 2
+
+- name: Ensure host netns directory exists
+ tags:
+ - container
+ - network
+ file:
+ path: "{{ host_netns_root }}"
+ state: directory
+ mode: 0755
+
+- name: Set_fact netns_name
+ tags:
+ - container
+ - network
+ set_fact:
+ netns_name: "netns-{{ container_name }}"
+
+- name: Set_fact container_netns link name
+ tags:
+ - container
+ - network
+ set_fact:
+ netns_link: "{{ host_netns_root }}/{{ netns_name }}"
+
+- name: Remove netns link if it already exists
+ tags:
+ - container
+ - network
+ - cleanup
+ file:
+ path: "{{ netns_link }}"
+ state: absent
+ force: yes
+
+- name: Link container netns to host netns
+ tags:
+ - container
+ - network
+ file:
+ src: "{{ sandbox_key.stdout }}"
+ path: "{{ netns_link }}"
+ state: link
+ force: yes
+
--- /dev/null
+---
+# task to link a container to a bridge
+- include: link_container_netns.yml
+ register: netns_to_delete
+
+- include: add_veth_links_to_container.yml
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
--- /dev/null
+---
+# - name: Set_facts for linking containers to bridges
+# set_fact:
+# HOST_ID: "{{ rackpos }}"
+# container_name: "compute-{{ HOST_ID }}-{{ COMP_ID }}"
+# container_netns_name: "netns-{{ container_name }}"
+# # third octet of ip address for containers & host bridge
+# SUBNET_SEGMENT: "{{ rackpos }}"
+# # H_IXd == integer host index (within rack/datacenter)
+# H_IXd: "{{ rackpos }}"
+# # H_IXx == H_IXd as a hex number for mac addresses
+# # C_IX == Container index within host (e.g. 11)
+# C_IX: "{{ COMP_ID }}"
+
+- name: "Link container to management bridge"
+ vars:
+ bridge_name: "{{ management_bridge }}"
+ include: link_to_bridge.yml
+
+- name: "Link container to data bridge"
+ vars:
+ bridge_name: "{{ data_bridge }}"
+ include: link_to_bridge.yml
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
--- /dev/null
+---
+HOST_ID: "{{ rackpos }}"
+container_netns_name: "netns-{{ container_name }}"
+
+# third octet of ip address for containers & host bridge
+SUBNET_SEGMENT: "{{ rackpos }}"
+
+# H_IXd == integer host index (within rack/datacenter)
+H_IXd: "{{ rackpos }}"
+
+# H_IXx == H_IXd as a hex number for mac addresses
+# C_IX == Container index within host (e.g. 11)
+C_IX: "{{ COMP_ID }}"
--- /dev/null
+[all:vars]
+lab_http_proxy=""
+lab_https_proxy=""
+mgmt_lab_ip_prefix="10.20.30"
+data_lab_ip_prefix="10.20.31"
+infrastructure_server="odl-registry"
+registry_ip_address="10.20.32.1"
+docker_registry_port=4000
+docker_registry="{{ infrastructure_server }}:{{ docker_registry_port }}"
+service_host_phys_host="20"
+
+[cluster0:children]
+service-node-hosts
+compute-node-hosts
+
+[compute-node-hosts]
+pod1-05 ansible_host=10.20.30.5 rackpos=5
+pod1-06 ansible_host=10.20.30.6 rackpos=6
+pod1-08 ansible_host=10.20.30.8 rackpos=8
+pod1-09 ansible_host=10.20.30.9 rackpos=9
+pod1-10 ansible_host=10.20.30.10 rackpos=10
+
+[service-node-hosts]
+pod1-20 ansible_host=10.20.30.20 rackpos=20
+
+# vim: set et ai sw=2 ts=2 ft=dosini :
+
--- /dev/null
+---
+- include: pull_service.yml
+
+- include: run_service.yml
+
+- name: Display control node status
+ debug:
+ msg: "Control node successfully spawned with status: '{{ control_node_spawn_result }}'"
+ verbosity: 1
+
+- name: Start Stacking service node
+ tags:
+ - service
+ - openstack
+ - stack
+ include: ../../common/tasks/start_stacking.yml
+ when:
+ - control_node_spawn_result is defined
+ - "{{ auto_stack }}"
+ - False
+
+# vim: set et sw=2 ts=2 :
+
--- /dev/null
+---
+- name: Pull service-node docker image from the registry
+ tags:
+ - pull
+ - docker
+ - containers
+ - service
+ vars:
+ node_type: service
+ container_tag: "{{ service_version }}"
+ include: ../../common/tasks/pull_image.yml
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
--- /dev/null
+---
+- name: Print debug info
+ tags:
+ - service
+ - debug
+ debug:
+ msg: "Spawning service node '{{ container_name }}' with COMP_ID={{ COMP_ID }}"
+ verbosity: 2
+
+- name: Spawn Service/control node
+ remote_user: root
+ become: yes
+ become_method: sudo
+ tags:
+ - spawn
+ - service
+ - containers
+ - docker
+ docker_container:
+ name: "{{ control_node_container_name }}"
+ hostname: "{{ control_node_container_name }}"
+ image: "{{ control_node_image }}"
+ state: started
+ privileged: yes
+ capabilities: ALL
+ volumes:
+ - /dev:/dev
+ - /lib/modules:/lib/modules
+ - /sys/fs/cgroup:/sys/fs/cgroup:ro
+ published_ports:
+ - 50080:80
+ - 58181:8181
+ - 56080:6080
+ - 58000:8000
+ stop_signal: SIGRTMIN+3
+ security_opts:
+ - "apparmor:docker-unconfined"
+ env:
+ TZ: "America/Los_Angeles"
+ JAVA_HOME: "/usr/lib/jvm/java-8-openjdk-amd64"
+ JAVA_MAX_MEM: "16g"
+ ODL_NETWORK: "{{ use_odl_network }}"
+ SERVICE_HOST: "{{ service_host_mgmt_ip }}"
+ container: "docker"
+ http_proxy: "{{ lab_http_proxy }}"
+ https_proxy: "{{ lab_https_proxy }}"
+ no_proxy: "{{ lab_no_proxy }}"
+ TERM: screen
+ register: control_node_spawn_result
+
+- name: Connect container to network with ansible (idempotent)
+ tags:
+ - network
+ - service
+ - experimental
+ include_role:
+ name: network
+ vars:
+ COMP_ID: 2
+
+# vim: set et sw=2 ts=2 :
--- /dev/null
+---
+# Variables for emulated framework containers
+container_name: "{{ control_node_container_name }}"
+node_type: service
+COMP_ID: 2
---
# file: site.yml
-- include: infrastructure.yml
+# setup framework infrastructure
+- include: examples/example_setup_infrastructure.yml
+# spawn a service node
+- include: examples/example_start_service_node.yml
+# spawn a compute node with COMP_ID=11
+- include: examples/example_spawn_compute_node_11.yml
+
# vim: set et ai sw=2 ts=2 :