Spawn nodes & connect network with ansible roles 17/62717/4
authorMatt Welch <matt.welch@intel.com>
Tue, 5 Sep 2017 15:14:10 +0000 (11:14 -0400)
committerMatt Welch <matt.welch@intel.com>
Tue, 5 Sep 2017 19:09:48 +0000 (15:09 -0400)
Refactor for better task encapsulation
Add kill_chown.yml to kill stuck chown processes.
Add restart_stacking.yml to restart compute nodes.
Move additional lab-specific variables into inventory.
Update tags to enble more granular task selection.
Modify container networking to use ansible instead of the bash script.
Add ansible tasks to pull container images from the registry.
Add tasks to purge containers matching a supplied string.
Modify order of infra tasks to install packages first, including
bridge-utils.
Add sampe hosts file.
Update site.yml to include a complete cloud setup.
Changed tabs to spaces for SpaceConsistencyBear.

Enable roles to spawn compute and service nodes
Common task added to start stacking on a compute or service node.
New common variable auto_stack added to control stacking after spawn.
Example ansible scripts to setup infra, spawn compute, & spawn service.
Compute and service roles spawn docker containers & connect them to the
network with a bash script.
Handler for apparmor parser moved to handlers/main.yml.
Site.yml now starts a service node by default.
Removed EOL whitespace.

Change-Id: I892afc76fffdcd85e399c5d2596eb4e5020cc318
Signed-off-by: Matt Welch <matt.welch@intel.com>
33 files changed:
roles/common/tasks/kill_chown.yml [new file with mode: 0644]
roles/common/tasks/pull_image.yml [new file with mode: 0644]
roles/common/tasks/restart_stacking.yml [new file with mode: 0644]
roles/common/tasks/start_stacking.yml [new file with mode: 0644]
roles/common/templates/connect_container_to_network.sh.j2 [new file with mode: 0644]
roles/common/vars/main.yml
roles/compute/tasks/main.yml [new file with mode: 0644]
roles/compute/tasks/pull_compute.yml [new file with mode: 0644]
roles/compute/tasks/run_compute.yml [new file with mode: 0644]
roles/compute/vars/main.yml [new file with mode: 0644]
roles/examples/example_connect_container_to_network.yml [new file with mode: 0644]
roles/examples/example_purge_containers.yml [new file with mode: 0644]
roles/examples/example_setup_infra.yml [deleted file]
roles/examples/example_setup_infrastructure.yml [new file with mode: 0644]
roles/examples/example_spawn_compute_node_11.yml [new file with mode: 0644]
roles/examples/example_spawn_compute_node_COMP_ID.yml [new file with mode: 0644]
roles/examples/example_start_service_node.yml [new file with mode: 0644]
roles/infra/handlers/main.yml
roles/infra/tasks/create_bridges.yml
roles/infra/tasks/install_packages.yml
roles/infra/tasks/main.yml
roles/infra/tasks/purge_containers.yml [new file with mode: 0644]
roles/network/tasks/add_veth_links_to_container.yml [new file with mode: 0644]
roles/network/tasks/link_container_netns.yml [new file with mode: 0644]
roles/network/tasks/link_to_bridge.yml [new file with mode: 0644]
roles/network/tasks/main.yml [new file with mode: 0644]
roles/network/vars/main.yml [new file with mode: 0644]
roles/sample_hosts_file [new file with mode: 0644]
roles/service/tasks/main.yml [new file with mode: 0644]
roles/service/tasks/pull_service.yml [new file with mode: 0644]
roles/service/tasks/run_service.yml [new file with mode: 0644]
roles/service/vars/main.yml [new file with mode: 0644]
roles/site.yml

diff --git a/roles/common/tasks/kill_chown.yml b/roles/common/tasks/kill_chown.yml
new file mode 100644 (file)
index 0000000..da4ca34
--- /dev/null
@@ -0,0 +1,45 @@
+---
+- name: Sleep for {{ sleep_time }} seconds to allow stacking to start
+  tags:
+    - openstack
+    - stack
+    - workaround
+  command: sleep {{ sleep_time }}
+
+- name: Get PID of devstack::chown process
+  tags:
+    - openstack
+    - stack
+    - workaround
+  shell: "docker exec -i {{ container_name }} pgrep chown || exit 0"
+  register: chown_pid
+
+- name: Show output of pgrep
+  tags:
+    - openstack
+    - stack
+    - workaround
+  debug:
+    msg: "{{ chown_pid }}"
+  when: chown_pid is defined
+
+- name: Show "kill" command
+  tags:
+    - openstack
+    - stack
+    - workaround
+  debug:
+    msg: "docker exec -d {{ container_name }} kill -15 {{ chown_pid.stdout }}"
+  when: chown_pid.stdout != ""
+
+- name: Kill devstack chown process because it's unnecessary
+  tags:
+    - openstack
+    - stack
+    - workaround
+  shell: |
+    echo "Sleeping for 5 seconds to allow chown to get stuck"
+    sleep 5
+    "docker exec -d {{ container_name }} kill -15 {{ chown_pid.stdout }}"
+  when: chown_pid.stdout != ""
+
diff --git a/roles/common/tasks/pull_image.yml b/roles/common/tasks/pull_image.yml
new file mode 100644 (file)
index 0000000..a3e20a6
--- /dev/null
@@ -0,0 +1,15 @@
+---
+# Ansible task to pull a docker image from the repository
+- name: "Pull docker image for {{ node_type }}-node: {{ docker_registry }}/{{ node_type }}:{{ container_tag }}"
+  tags:
+    - pull
+    - docker
+    - container
+  docker_image:
+    name: "{{ docker_registry }}/s3p/{{ node_type }}"
+    tag: "{{ container_tag }}"
+    pull: True
+    state: present
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
diff --git a/roles/common/tasks/restart_stacking.yml b/roles/common/tasks/restart_stacking.yml
new file mode 100644 (file)
index 0000000..54a77b8
--- /dev/null
@@ -0,0 +1,31 @@
+---
+# file: common/tasks/restart_stacking.yml
+# info: restarts compute nodes
+#       to force restart, pass
+#       --extra-vars "restart_containers=True"
+- name: Set devstack to 'OFFLINE' mode for compute nodes
+  tags:
+    - openstack
+    - stack
+    - restart
+  shell: |
+    docker exec --user stack "{{ container_name }}" sed -i 's:^# \(OFFLINE=True\):\1:g' /home/stack/compute.odl.local.conf
+    docker exec --user stack "{{ container_name }}" sed -i 's:^# \(RECLONE=False\):\1:g' /home/stack/compute.odl.local.conf
+  when:
+    - stack_in_offline_mode
+    - "'compute' in container_name"
+
+- name: RE-start stacking (devstack) in container "{{ container_name }}"
+  tags:
+    - openstack
+    - stack
+    - restart
+  remote_user: root
+  become: yes
+  become_method: sudo
+  shell: docker exec --detach --user stack "{{ container_name }}" /bin/bash -c /home/stack/restart.sh
+
+- include: kill_chown.yml
+
+# vim: set et sw=2 ts=2 :
+
diff --git a/roles/common/tasks/start_stacking.yml b/roles/common/tasks/start_stacking.yml
new file mode 100644 (file)
index 0000000..0947c14
--- /dev/null
@@ -0,0 +1,28 @@
+---
+- name: Set devstack to 'OFFLINE' mode for compute nodes
+  tags:
+    - openstack
+    - stack
+  shell: |
+    docker exec --user stack "{{ container_name }}" sed -i 's:^# \(OFFLINE=True\):\1:g' /home/stack/compute.odl.local.conf
+    docker exec --user stack "{{ container_name }}" sed -i 's:^# \(RECLONE=False\):\1:g' /home/stack/compute.odl.local.conf
+  when:
+    - stack_in_offline_mode
+    - "'compute' in container_name"
+
+- name: Start stacking (devstack)
+  tags:
+    - openstack
+    - stack
+    - start
+  remote_user: root
+  become: yes
+  become_method: sudo
+  shell: docker exec --detach --user stack "{{ container_name }}" /bin/bash -c /home/stack/start.sh
+
+- include: kill_chown.yml
+
+# TODO: have the openstack nodes "check in" when they've finished stacking
+#       + maybe with ansible pull mode
+# vim: set et sw=2 ts=2 :
+
diff --git a/roles/common/templates/connect_container_to_network.sh.j2 b/roles/common/templates/connect_container_to_network.sh.j2
new file mode 100644 (file)
index 0000000..0fdbdf3
--- /dev/null
@@ -0,0 +1,190 @@
+#!/bin/bash
+# this will
+# 1) create a pair of veth interfaces
+# 2) add one to a physical bridge (assumed to exist)
+# 3) add the peer to a docker container netns
+# 4) set its IP address
+
+set -e
+
+function fn_get_host_index {
+    local PHYS_HOST_NAME=${1}
+    [ -z "$1" ] && echo "ERROR: a host ID number must be supplied" && exit
+    local __hix=${PHYS_HOST_NAME##"an11-"} # trim leading rack ID from hostname (e.g. an11-31-odl -> 31-odl-perf)
+    local H_IXd=${__hix%%-*} # trim trailing characters after rack position (e.g. 31-odl-perf -> 31)
+    H_IXd=${H_IXd##"0"} # trim leading zeroes
+    echo "$H_IXd"
+}
+
+function fn_link_container_netns {
+    # echo "INFO: linking net namespace of container $CONTAINER_NAME"
+    mkdir -p $HOST_NETNS_ROOT
+    # derived variables
+    SANDBOX_KEY=$(docker inspect -f '{{.NetworkSettings.SandboxKey}}' $CONTAINER_NAME)
+    NETNS_NAME="netns-$CONTAINER_NAME"
+    NETNS_LINK="${HOST_NETNS_ROOT}/${NETNS_NAME}"
+    # unlink the netns if a link already exists
+    if [ -L "$NETNS_LINK" ] ; then
+        unlink $NETNS_LINK
+    fi
+    ln -s $SANDBOX_KEY $NETNS_LINK
+    # ls -al $HOST_NETNS_ROOT
+}
+
+function fn_attach_veth_to_container {
+    ## Attach veth to container
+    # input:
+    # A_IX: adapter index, becomes suffix of veth name in host or ethphys{A_IX} in container
+    # ethphys01 == ethmgmt
+    # ethphys02 == ethdata
+    # NETNS_NAME: netns_link from link_container_netns.yml
+    #
+    # set in main: CONTAINER_VETH_NAME="ethphys${A_IX}"
+
+    ip link set $VETH_CONT netns $NETNS_NAME
+    ip netns exec $NETNS_NAME ip link set dev $VETH_CONT name $CONTAINER_VETH_NAME
+    # set the device mac address
+    ip netns exec $NETNS_NAME ip link set dev $CONTAINER_VETH_NAME address $CONTAINER_MAC
+    # set the adapter IP address
+    ip netns exec $NETNS_NAME ip address add $CONTAINER_IP dev $CONTAINER_VETH_NAME
+    #echo "Container net-namespace contents:"
+    ip netns exec $NETNS_NAME ip link set dev $CONTAINER_VETH_NAME up
+    #ip netns exec $NETNS_NAME ip a s
+    #echo
+}
+
+function fn_create_and_link_veth {
+    ## Create veth pair (peers)
+    VETH_BASE="ve${H_IXx}${C_IXx}${A_IX}"
+    VETH_HOST=${VETH_BASE}h
+    VETH_CONT=${VETH_BASE}c
+    ## remove link from host netns if it already exists
+    if [ -n "$(ip link show $VETH_HOST)" ]; then
+        ip link set dev $VETH_HOST down
+        ip link delete $VETH_HOST
+    fi
+    ## create veth pair
+    ip link add $VETH_HOST type veth peer name $VETH_CONT
+    ip link set dev $VETH_HOST up
+    ## attach veth in host netns to PHYS_BRIDGE
+    brctl addif $PHYS_BRIDGE_NAME $VETH_HOST
+
+    fn_attach_veth_to_container
+}
+
+
+function fn_display_link_status {
+    # if all goes well, we've linked the container to the bridge, update the counter
+    if [ $? -eq 0 ] ; then
+        # display status info
+        echo "Successfully linked container $CONTAINER_NAME to bridge $PHYS_BRIDGE_NAME"
+        echo -e "H_IX:  \t${H_IXd} (0x${H_IXx})"
+        echo -e "C_IX:  \t${C_IXd} (0x${C_IXx})"
+        echo -e "C_MAC: \t${CONTAINER_MAC}"
+        echo -e "C_IP4: \t${CONTAINER_IP}"
+        echo -e "C_veth:\t${CONTAINER_VETH_NAME} (${VETH_CONT})"
+        echo -e "H_veth:\t${VETH_HOST}"
+        echo
+    fi
+}
+
+# main:
+# lab constants
+# common/vars/main.yml
+MAC_PREFIX="{{ veth_mac_address_prefix }}"
+HOST_NETNS_ROOT="{{ host_netns_root }}"
+NETMASK_LEN="{{ management_subnet_netmask }}"
+
+# parse input arguments
+PHYS_HOST_NAME="${1}"
+CONTAINER_ID_NUMBER="${2}"
+# container name can be constructed from ID num: compute-<hostID>-<CONTAINER_ID_NUMBER>
+CONTAINER_TYPE=${3}
+
+# determine subnet from bridge
+
+# this is deterministic where each container gets an index which is used
+# + to create the IP address, MAC address, VETH numbering, etc
+H_IXd=$(fn_get_host_index $PHYS_HOST_NAME  )
+# host index (rack position), convert to 2 hex digits
+# H_IXx (hex representation of host id) can be passed as an input argument or used from the environment
+H_IXx=${H_IXx:-$(printf "%.2x" $H_IXd)}
+SUBNET_SEGMENT="${H_IXd}"
+
+
+# For last octet of IP address:
+# host=1, service=2, network=3, compute=11-200, floatingIP=201-254
+case "$CONTAINER_TYPE" in
+    service)
+        # echo "CONTAINER_TYPE = service"
+        CONTAINER_NAME=service-node
+        CONTAINER_ID_NUMBER=2
+        ;;
+    network)
+        # echo "CONTAINER_TYPE = network"
+        CONTAINER_NAME=network-node
+        CONTAINER_ID_NUMBER=3
+        ;;
+    measure)
+        # echo "CONTAINER_TYPE = measure"
+        CONTAINER_NAME=measure-node
+        CONTAINER_ID_NUMBER=4
+        ;;
+    compute)  # echo "CONTAINER_TYPE = compute"
+        CONTAINER_NAME="compute-${H_IXd}-${CONTAINER_ID_NUMBER}"
+        # echo "Compute node # = $CONTAINER_ID_NUMBER"
+        ;;
+    *)  echo "ERROR: Invalid CONTAINER_TYPE \"$CONTAINER_TYPE\" specified"
+        exit 1
+esac
+
+# description:
+# input: container type (string), "ID"  (int, 11-200) supplied on the command line
+#   this script will:
+# 1) link the container to both {{ management_bridge }} and {{ data_bridge }}
+# 2) modify their MAC addresses accordingly
+# 3) supply IP addresse
+
+fn_link_container_netns
+
+C_IXd=$CONTAINER_ID_NUMBER
+C_IXx=$(printf "%.2x" $C_IXd)
+
+# connect the adapter
+for ADAPTER_IX in {1..2}; do
+    A_IX="$(printf "%.2x" $ADAPTER_IX)"
+    case "$ADAPTER_IX" in
+        1)
+            # create links to the management bridge
+            NETMASK_LEN="{{ management_subnet_netmask }}"
+            PHYS_BRIDGE_NAME="{{ management_bridge }}"
+            SUBNET_BASE="{{ management_subnet_prefix }}"
+            CONTAINER_VETH_NAME="ethmgmt"
+            ;;
+        2)
+            # create links to the tenant/data bridge
+            NETMASK_LEN="{{ data_subnet_netmask }}"
+            PHYS_BRIDGE_NAME="{{ data_bridge }}"
+            SUBNET_BASE="{{ data_subnet_prefix }}"
+            CONTAINER_VETH_NAME="ethdata"
+            ;;
+        *)  echo "ERROR: Invalid ADAPTER_IX \"$ADAPTER_IX\" specified"
+            exit
+    esac
+    SUBNET_PREFIX="${SUBNET_BASE}.${SUBNET_SEGMENT}"
+    # container index (container id per host), convert to 2 hex digits
+    CONTAINER_IP="${SUBNET_PREFIX}.${C_IXd}/${NETMASK_LEN}"
+    CONTAINER_MAC="${MAC_PREFIX}:${H_IXx}:${C_IXx}:${A_IX}"
+
+    # make links
+    fn_create_and_link_veth
+    fn_display_link_status
+done
+
+# echo "You can remove the links created just now by simply removing the veth peer from the root netns with:"
+# echo "    ip link delete $VETH_HOST"
+
+unlink $HOST_NETNS_ROOT/$NETNS_NAME
+
+# vim: set ft=sh sw=4 ts=4 et ai :
+
index 00cd5c82f12846ac881d2ded661d6e2e917101a1..4d7b2d4c5252402c6c2110ab4156f2a5ace719bc 100644 (file)
@@ -1,7 +1,7 @@
 # common variables
 ---
 # network infrastructure
-## lab network resources
+## lab network resources - define in /etc/ansible/hosts
 # lab_http_proxy:
 # lab_https_proxy:
 # infrastructure_server:
@@ -9,7 +9,14 @@
 # docker_registry_port:
 # docker_registry:
 
+# OpenStack framework config
+stack_in_offline_mode: True
+auto_stack: True
+# run 'restart.sh' instead of 'start.sh' - mutually exclusive with auto_stack
+restart_containers: False
+use_odl_network: True
 
+# network variables
 ## network configuration of host machines
 mgmt_iface: eno3
 data_iface: eno4
@@ -19,19 +26,21 @@ test_netmask: 16
 management_interface: "{{ mgmt_iface }}"
 data_interface: "{{ data_iface }}"
 management_subnet_prefix: "10.129"
-mgmt_ip_prefix: "10.129"
 management_subnet_netmask: "16"
 data_subnet_prefix: "10.130"
-data_ip_prefix: "10.130"
 data_subnet_netmask: "16"
+# prefix for veth addresses
+veth_mac_address_prefix: "fe:53:00"
+host_netns_root: "/var/run/netns"
 
 # IP address of the Linux bridge on the physical host
-mgmt_ip: "{{ mgmt_ip_prefix }}.{{ rackpos }}.1/{{ test_netmask }}"
-data_ip: "{{ data_ip_prefix }}.{{ rackpos }}.1/{{ test_netmask }}"
+mgmt_ip: "{{ management_subnet_prefix }}.{{ rackpos }}.1/{{ management_subnet_netmask }}"
+data_ip: "{{ data_subnet_prefix }}.{{ rackpos }}.1/{{ data_subnet_netmask }}"
+# NOTE: WARNING!!! these names must be identical to those in service-node and compute-nodes' local.conf
+mgmt_veth_name: "ethmgmt"
+data_veth_name: "ethdata"
 
 # lab/default IP addresses of physical hosts
-mgmt_lab_ip_prefix: "10.11.26"
-data_lab_ip_prefix: "10.11.126"
 lab_netmask: 22
 # position and rackpos are an integer "index" of physical machines
 position: "{{ rackpos }}"
@@ -39,22 +48,9 @@ position: "{{ rackpos }}"
 mgmt_lab_ip: "{{ mgmt_lab_ip_prefix }}.{{ rackpos }}/{{ lab_netmask }}"
 data_lab_ip: "{{ data_lab_ip_prefix }}.{{ rackpos }}/{{ lab_netmask }}"
 
-# node operating system defaults
-# Variables for emulated framework containers
-## container configuration
-docker_systemd_version: "v0.1"
-compute_image: "s3p/compute"
-compute_version: "v0.5s"
-measure_image: "s3p/measure"
-measure_version: "v0.1.1"
-service_image: "s3p/service"
-service_version: "v0.4sc"
-control_node_image: "{{ docker_registry }}/{{ service_image }}:{{ service_version }}"
-
 ## cluster configuration
 ### the service_host ip should be conform to infrastructure routing
 #### service_host_phys_host is the host_index for the physical server
-service_host_phys_host: "20"
 service_host_container_index: "2"
 service_host_mgmt_ip: "{{ management_subnet_prefix }}.{{ service_host_phys_host }}.{{ service_host_container_index }}"
 control_node_container_name: "service-node"
@@ -62,5 +58,21 @@ control_node_container_name: "service-node"
 # network parameters for service and compute nodes
 lab_no_proxy: "localhost,10.0.0.0/8,192.168.0.0/16,172.17.0.0/16,127.0.0.1,127.0.0.0/8,{{ service_host_mgmt_ip }}"
 
-# OpenStack framework config
-use_odl_network: "True"
+
+# docker configuration variables
+## container configuration
+service_image: "s3p/service"
+compute_image: "s3p/compute"
+measure_image: "s3p/measure"
+docker_systemd_version: "v0.1"
+compute_version: "v0.4.1s"
+measure_version: "v0.1.1"
+service_version: "v0.4s"
+# service_version: "v0.4.1sb4"
+    # v0.4sb4
+    # v0.4.1sc
+control_node_image: "{{ docker_registry }}/{{ service_image }}:{{ service_version }}"
+compute_node_image: "{{ docker_registry }}/{{ compute_image }}:{{ compute_version }}"
+sleep_time: 10
+# vim: set et ai sw=2 ts=2 ft=yaml :
+
diff --git a/roles/compute/tasks/main.yml b/roles/compute/tasks/main.yml
new file mode 100644 (file)
index 0000000..6284973
--- /dev/null
@@ -0,0 +1,32 @@
+---
+- include: pull_compute.yml
+
+- include: run_compute.yml
+  register: compute_node_spawn_result
+
+- name: Display compute node container name
+  debug:
+    msg: "Compute node {{ container_name }} successfully spawned"
+    verbosity: 1
+  when: compute_node_spawn_result is defined
+
+- name: Start Stacking compute node
+  tags:
+    - compute
+    - openstack
+    - stack
+  include: ../../common/tasks/start_stacking.yml
+  when:
+  - "{{ auto_stack }}"
+
+- name: REstart stacking in compute nodes
+  tags:
+    - compute
+    - openstack
+    - restart
+  include: ../../common/tasks/restart_stacking.yml
+  when:
+  - "{{ restart_containers }}"
+
+# vim: set et sw=2 ts=2 :
+
diff --git a/roles/compute/tasks/pull_compute.yml b/roles/compute/tasks/pull_compute.yml
new file mode 100644 (file)
index 0000000..7ac6074
--- /dev/null
@@ -0,0 +1,13 @@
+---
+- name: Pull compute-node docker image from the registry
+  tags:
+    - pull
+    - docker
+    - containers
+  vars:
+    node_type: compute
+    container_tag: "{{ compute_version }}"
+  include: ../../common/tasks/pull_image.yml
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
diff --git a/roles/compute/tasks/run_compute.yml b/roles/compute/tasks/run_compute.yml
new file mode 100644 (file)
index 0000000..1567852
--- /dev/null
@@ -0,0 +1,54 @@
+---
+- name: Print debug info
+  tags:
+    - compute
+    - debug
+  debug:
+    msg: "Spawning compute node '{{ container_name }}' with COMP_ID={{ COMP_ID }}"
+#    verbosity: 1
+
+- name: Spawn compute node
+  remote_user: root
+  become: yes
+  become_method: sudo
+  tags:
+    - spawn
+    - compute
+    - containers
+    - docker
+  docker_container:
+    name: "{{ container_name }}"
+    hostname: "{{ container_name }}"
+    image: "{{ compute_container_image }}"
+    state: started
+    privileged: yes
+    capabilities: ALL
+    volumes:
+      - /dev:/dev
+      - /lib/modules:/lib/modules
+      - /sys/fs/cgroup:/sys/fs/cgroup:ro
+    published_ports:
+      - "{{ web_port_map }}"
+    stop_signal: SIGRTMIN+3
+    security_opts:
+      - "apparmor:docker-unconfined"
+    env:
+      TZ: "America/Los_Angeles"
+      ODL_NETWORK: "{{ use_odl_network }}"
+      SERVICE_HOST: "{{ service_host_mgmt_ip }}"
+      container: "docker"
+      http_proxy: "{{ lab_http_proxy }}"
+      https_proxy: "{{ lab_https_proxy }}"
+      no_proxy: "{{ lab_no_proxy }}"
+      TERM: screen
+  register: compute_node_spawn_result
+
+- name: Connect container to network with ansible (idempotent)
+  tags:
+    - network
+    - compute
+    - experimental
+  include_role:
+    name: network
+
+# vim: set et sw=2 ts=2 :
diff --git a/roles/compute/vars/main.yml b/roles/compute/vars/main.yml
new file mode 100644 (file)
index 0000000..3e2c1fb
--- /dev/null
@@ -0,0 +1,13 @@
+---
+# rackpos is a parameter of the physical hosts == position
+# COMP_ID must be passed as an 'extra-var'
+# like 'ansible-playbook ... --extra-vars "COMP_ID=11" '
+node_type: compute
+container_name: "compute-{{ rackpos }}-{{ COMP_ID }}"
+compute_container_image: "{{ docker_registry }}/{{ compute_image }}:{{ compute_version }}"
+web_port_map: "600{{ COMP_ID }}:80"
+cpuset_cpu1: "{{ COMP_ID }}"
+cpu2_offset: "{{ (ansible_processor_cores * ansible_processor_threads_per_core)|int }}"
+cpuset_cpu2: "{{ (COMP_ID)|int + (cpu2_offset)|int }}"
+cpuset: "{{ cpuset_cpu1 }},{{ cpuset_cpu2 }}"
+
diff --git a/roles/examples/example_connect_container_to_network.yml b/roles/examples/example_connect_container_to_network.yml
new file mode 100644 (file)
index 0000000..da41a82
--- /dev/null
@@ -0,0 +1,5 @@
+---
+- hosts: cluster0
+  roles:
+    - common
+    - network
diff --git a/roles/examples/example_purge_containers.yml b/roles/examples/example_purge_containers.yml
new file mode 100644 (file)
index 0000000..64a5002
--- /dev/null
@@ -0,0 +1,10 @@
+---
+- hosts: cluster0
+  become: yes
+  vars:
+    # name_filter may be supplied as a variable or
+    # on the command line with:
+    # --extra-vars "name_filter=compute*"
+    name_filter: 'compute*'
+  tasks:
+    - include: ../infra/tasks/purge_containers.yml
diff --git a/roles/examples/example_setup_infra.yml b/roles/examples/example_setup_infra.yml
deleted file mode 100644 (file)
index d329e6e..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: compute-node-hosts
-  become: yes
-  roles:
-    - role: infra
diff --git a/roles/examples/example_setup_infrastructure.yml b/roles/examples/example_setup_infrastructure.yml
new file mode 100644 (file)
index 0000000..8fb5a39
--- /dev/null
@@ -0,0 +1,5 @@
+---
+- hosts: cluster0
+  roles:
+    - common
+    - infra
diff --git a/roles/examples/example_spawn_compute_node_11.yml b/roles/examples/example_spawn_compute_node_11.yml
new file mode 100644 (file)
index 0000000..00abbbe
--- /dev/null
@@ -0,0 +1,11 @@
+---
+- hosts: compute-node-hosts
+  become: yes
+  vars:
+    # COMP_ID must be supplied as a variable or
+    # on the command line with:
+    # --extra-vars "COMP_ID=(11-250)"
+    COMP_ID: 11
+  roles:
+    - common
+    - compute
diff --git a/roles/examples/example_spawn_compute_node_COMP_ID.yml b/roles/examples/example_spawn_compute_node_COMP_ID.yml
new file mode 100644 (file)
index 0000000..1542c56
--- /dev/null
@@ -0,0 +1,9 @@
+---
+- hosts: compute-node-hosts
+  become: yes
+    # COMP_ID must be supplied as a variable or
+    # on the command line with:
+    # --extra-vars "COMP_ID=(11-250)"
+  roles:
+    - common
+    - compute
diff --git a/roles/examples/example_start_service_node.yml b/roles/examples/example_start_service_node.yml
new file mode 100644 (file)
index 0000000..6ed30a9
--- /dev/null
@@ -0,0 +1,7 @@
+---
+- hosts: service-node-hosts
+  become: yes
+  roles:
+    - common
+    - service
+
index 88b3726325263ff0eab99e1c693c7a7348e9fe80..2bf98841572ab3b19463dd78252f9d472fc4d220 100644 (file)
@@ -1,5 +1,10 @@
 ---
-- include: parse_apparmor_profile.yml
+- name: Parse apparmor profile
+  tags:
+    - files
+    - docker
+  shell: apparmor_parser -r -W "{{ profile_path }}"
+  listen: parse apparmor profile
 
 - name: restart docker
   tags: docker
index e5b01f1731fff1f0cc8374c5b43d7cf7b78deff8..41928c01d8a07d5b6e667bc1d51ffbf5368cc4d4 100644 (file)
@@ -1,5 +1,15 @@
 ---
+- name: Show interface info
+  tags:
+    - infra
+    - network
+  debug:
+    msg: "mgmt_lab_ip {{ mgmt_lab_ip }}, mgmt_ip {{ mgmt_ip }}, interface {{ management_interface}} iface {{ mgmt_iface }}"
+
 - name: Setup mgmt bridge
+  tags:
+    - infra
+    - network
   vars:
     interface: "{{ management_interface }}"
     bridge: "{{ management_bridge }}"
@@ -9,6 +19,9 @@
   when: "'{{ management_bridge }}' not in ansible_interfaces"
 
 - name: Setup data bridge
+  tags:
+    - infra
+    - network
   vars:
     interface: "{{ data_interface }}"
     bridge: "{{ data_bridge }}"
index bb0e59b636bf0e04b74ee67de73841ae32408950..4370480f25c1f0f4a48299bbc5c7b5c043b9d318 100644 (file)
@@ -13,6 +13,7 @@
     - install
   action: apt pkg={{item}} state=installed
   with_items:
+       - bridge-utils
        - git
        - sshpass
        - openssh-server
index 240b437b2ba1b82509b9a606c8763bb94fe8a847..456db085ece1f27d330c1398a062d732f1306868 100644 (file)
@@ -1,7 +1,13 @@
 ---
+- include: install_packages.yml
 - include: copy_apparmor_profile.yml
+- name: Show interface info
+  tags:
+    - infra
+    - network
+  debug:
+    msg: "mgmt_lab_ip {{ mgmt_lab_ip }}, mgmt_ip {{ mgmt_ip }}, interface {{ management_interface}} iface {{ mgmt_iface }}"
 - include: create_bridges.yml
-- include: install_packages.yml
 - include: setup_docker_daemon.yml
 
 # vim: set et ts=2 sw=2 ai ft=yaml :
diff --git a/roles/infra/tasks/purge_containers.yml b/roles/infra/tasks/purge_containers.yml
new file mode 100644 (file)
index 0000000..71e3ce9
--- /dev/null
@@ -0,0 +1,25 @@
+---
+- name: Get list of running containers
+  tags:
+    - cleanup
+    - status
+  shell: "docker ps --quiet --all --filter \"name={{ name_filter }}\" "
+  register: container_list
+
+- name: Print list of containers matching name_filter
+  tags:
+    - cleanup
+    - status
+  debug:
+    msg: "Containers matching '{{ name_filter }}' = {{ container_list.stdout }}"
+  when: container_list.stdout != ""
+
+- name: Remove containers matching filter
+  tags:
+    - cleanup
+    - purge
+  docker_container:
+    name: "{{ item }}"
+    state: absent
+  with_items: "{{ container_list.stdout_lines }}"
+  when: container_list.stdout != ""
diff --git a/roles/network/tasks/add_veth_links_to_container.yml b/roles/network/tasks/add_veth_links_to_container.yml
new file mode 100644 (file)
index 0000000..5ec0458
--- /dev/null
@@ -0,0 +1,150 @@
+---
+# task to create a veth-link pair between a bridge and a container
+# See https://github.com/intel-odl/socets/blob/master/ansible/connect_container_to_networks.sh
+
+- name: Set host and container index info
+  tags:
+    - container
+    - network
+  set_fact:
+    host_ix_hex: "{{ '%02x'|format(HOST_ID|int) }}"
+    comp_ix_hex: "{{ '%02x'|format(COMP_ID|int) }}"
+
+- name: Set veth pair base name
+  tags:
+    - container
+    - network
+  set_fact:
+    veth_base: "ve{{ host_ix_hex }}{{ comp_ix_hex }}"
+    adapter_ix: "{% if bridge_name=='br_mgmt' %}01{% else %}02{% endif %}"
+
+- name: Set host and container veth names
+  tags:
+    - container
+    - network
+  set_fact:
+    veth_host: "{{ veth_base }}{{ adapter_ix }}h"
+    veth_cont: "{{ veth_base }}{{ adapter_ix }}c"
+    adapter_ix_hex: "{{ '%02x'|format(adapter_ix|int) }}"
+
+- name: Delete existing veth links
+  tags:
+    - container
+    - network
+    - cleanup
+  shell: "ip link delete {{ veth_host }} || exit 0"
+
+- name: Get veth link status info for host
+  tags:
+    - container
+    - network
+  command: ip -o link show type veth
+  register: ip_link_status
+
+- name: Create veth pair for {{ bridge_name }} bridge
+  tags:
+    - container
+    - network
+  command: "ip link add {{ veth_host }} type veth peer name {{ veth_cont }}"
+  when: veth_host not in ip_link_status.stdout
+
+# if the veth_cont link already exists, it should be in the container
+- name: Set veth_host up
+  tags:
+    - container
+    - network
+  command: "ip link set dev {{ veth_host }} up"
+  when: veth_host not in ip_link_status.stdout
+
+# assume that the bridge already exists, setup by infra role
+- name: Get bridge status for {{ bridge_name }}
+  tags:
+    - container
+    - network
+  command: "brctl show {{ bridge_name }}"
+  register: bridge_status
+
+- name: Bind veth link to {{ bridge_name }} bridge
+  tags:
+    - container
+    - network
+  command: "brctl addif {{ bridge_name }} {{ veth_host }}"
+  when: veth_host not in bridge_status.stdout
+
+- name: Get name of veth adapter IN-side container
+  tags:
+    - container
+    - network
+  set_fact:
+    container_veth_name: "{% if bridge_name=='br_mgmt' %}{{ mgmt_veth_name }}{% else %}{{ data_veth_name }}{% endif %}"
+    subnet_prefix: "{% if bridge_name=='br_mgmt' %}{{ management_subnet_prefix }}{% else %}{{ data_subnet_prefix }}{% endif %}"
+    netmask: "{% if bridge_name=='br_mgmt' %}{{ management_subnet_netmask }}{% else %}{{ data_subnet_netmask }}{% endif %}"
+
+- name: Determine container interface IP for {{ bridge_name }}
+  tags:
+    - container
+    - network
+  set_fact:
+    container_ip: "{{ subnet_prefix }}.{{ HOST_ID }}.{{ COMP_ID }}/{{ netmask }}"
+    container_mac: "{{ veth_mac_address_prefix }}:{{ host_ix_hex }}:{{ comp_ix_hex }}:{{ adapter_ix_hex }}"
+
+- name: Check for veth adapter presence in container (docker exec)
+  tags:
+    - container
+    - network
+  command: docker exec {{ container_name }} ip link show
+  register: container_link_status
+
+- name: Show container adapter info
+  tags:
+    - container
+    - network
+    - debug
+  debug:
+    msg: "Container name: {{ container_name }},
+      veth_host: {{ veth_host }},
+      veth_cont: {{ veth_cont }},
+      container_ip: {{ container_ip }},
+      container_mac: {{ container_mac }},
+      container_veth_name: {{ container_veth_name }},
+      container_netns_link: {{ netns_link }}
+      container_netns_name: {{ netns_name }}"
+    verbosity: 3
+
+- name: Attach veth adapter to container
+  tags:
+    - container
+    - network
+  command: ip link set {{ veth_cont }} netns {{ netns_name }}
+  when: container_veth_name not in container_link_status.stdout
+
+- name: Set container veth adapter name
+  tags:
+    - container
+    - network
+  command: ip netns exec {{ netns_name }} ip link set dev {{ veth_cont }} name {{ container_veth_name }}
+  when: container_veth_name not in container_link_status.stdout
+
+- name: Set container veth adapter L2 HW address
+  tags:
+    - container
+    - network
+  command: ip netns exec {{ netns_name }} ip link set dev {{ container_veth_name }} address {{ container_mac }}
+  when: container_veth_name not in container_link_status.stdout
+
+- name: Set container veth adapter L3 IP address
+  tags:
+    - container
+    - network
+  command: ip netns exec {{ netns_name }} ip address add {{ container_ip }} dev {{ container_veth_name }}
+  when: container_veth_name not in container_link_status.stdout
+
+- name: Set container veth adapter 'up'
+  tags:
+    - container
+    - network
+  command: ip netns exec {{ netns_name }} ip link set dev {{ container_veth_name }} up
+  when: container_veth_name not in container_link_status.stdout
+
+# vim: set et ai ts=2 sw=2 sts=2 :
+
diff --git a/roles/network/tasks/link_container_netns.yml b/roles/network/tasks/link_container_netns.yml
new file mode 100644 (file)
index 0000000..7a25c50
--- /dev/null
@@ -0,0 +1,62 @@
+---
+- name: Get SandboxKey for container
+  tags:
+    - container
+    - network
+  vars:
+    # container_name: must be passed on command line. E.g. '--extra-vars "container_name=compute-5-11"'
+  command: "docker inspect --format {% raw %} '{{.NetworkSettings.SandboxKey}}' {% endraw %} {{ container_name }}"
+  register: sandbox_key
+
+- name: Print sandbox_key
+  tags:
+    - container
+    - network
+    - debug
+  debug:
+    msg: "Container '{{ container_name }}' has SandboxKey '{{ sandbox_key.stdout }}'"
+    verbosity: 2
+
+- name: Ensure host netns directory exists
+  tags:
+    - container
+    - network
+  file:
+    path: "{{ host_netns_root }}"
+    state: directory
+    mode: 0755
+
+- name: Set_fact netns_name
+  tags:
+    - container
+    - network
+  set_fact:
+    netns_name: "netns-{{ container_name }}"
+
+- name: Set_fact container_netns link name
+  tags:
+    - container
+    - network
+  set_fact:
+    netns_link: "{{ host_netns_root }}/{{ netns_name }}"
+
+- name: Remove netns link if it already exists
+  tags:
+    - container
+    - network
+    - cleanup
+  file:
+    path: "{{ netns_link }}"
+    state: absent
+    force: yes
+
+- name: Link container netns to host netns
+  tags:
+    - container
+    - network
+  file:
+    src: "{{ sandbox_key.stdout }}"
+    path: "{{ netns_link }}"
+    state: link
+    force: yes
+
diff --git a/roles/network/tasks/link_to_bridge.yml b/roles/network/tasks/link_to_bridge.yml
new file mode 100644 (file)
index 0000000..f3a5b95
--- /dev/null
@@ -0,0 +1,9 @@
+---
+# task to link a container to a bridge
+- include: link_container_netns.yml
+  register: netns_to_delete
+
+- include: add_veth_links_to_container.yml
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
diff --git a/roles/network/tasks/main.yml b/roles/network/tasks/main.yml
new file mode 100644 (file)
index 0000000..585a172
--- /dev/null
@@ -0,0 +1,26 @@
+---
+# - name: Set_facts for linking containers to bridges
+#   set_fact:
+#     HOST_ID: "{{ rackpos }}"
+#     container_name: "compute-{{ HOST_ID }}-{{ COMP_ID }}"
+#     container_netns_name: "netns-{{ container_name }}"
+#     # third octet of ip address for containers & host bridge
+#     SUBNET_SEGMENT: "{{ rackpos }}"
+#     # H_IXd == integer host index (within rack/datacenter)
+#     H_IXd: "{{ rackpos }}"
+#     # H_IXx == H_IXd as a hex number for mac addresses
+#     # C_IX == Container index within host (e.g. 11)
+#     C_IX: "{{ COMP_ID }}"
+
+- name: "Link container to management bridge"
+  vars:
+    bridge_name: "{{ management_bridge }}"
+  include: link_to_bridge.yml
+
+- name: "Link container to data bridge"
+  vars:
+    bridge_name: "{{ data_bridge }}"
+  include: link_to_bridge.yml
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
diff --git a/roles/network/vars/main.yml b/roles/network/vars/main.yml
new file mode 100644 (file)
index 0000000..8c4c25a
--- /dev/null
@@ -0,0 +1,13 @@
+---
+HOST_ID: "{{ rackpos }}"
+container_netns_name: "netns-{{ container_name }}"
+
+# third octet of ip address for containers & host bridge
+SUBNET_SEGMENT: "{{ rackpos }}"
+
+# H_IXd == integer host index (within rack/datacenter)
+H_IXd: "{{ rackpos }}"
+
+# H_IXx == H_IXd as a hex number for mac addresses
+# C_IX == Container index within host (e.g. 11)
+C_IX: "{{ COMP_ID }}"
diff --git a/roles/sample_hosts_file b/roles/sample_hosts_file
new file mode 100644 (file)
index 0000000..34c9129
--- /dev/null
@@ -0,0 +1,27 @@
+[all:vars]
+lab_http_proxy=""
+lab_https_proxy=""
+mgmt_lab_ip_prefix="10.20.30"
+data_lab_ip_prefix="10.20.31"
+infrastructure_server="odl-registry"
+registry_ip_address="10.20.32.1"
+docker_registry_port=4000
+docker_registry="{{ infrastructure_server }}:{{ docker_registry_port }}"
+service_host_phys_host="20"
+
+[cluster0:children]
+service-node-hosts
+compute-node-hosts
+
+[compute-node-hosts]
+pod1-05 ansible_host=10.20.30.5  rackpos=5
+pod1-06 ansible_host=10.20.30.6  rackpos=6
+pod1-08 ansible_host=10.20.30.8  rackpos=8
+pod1-09 ansible_host=10.20.30.9  rackpos=9
+pod1-10 ansible_host=10.20.30.10 rackpos=10
+
+[service-node-hosts]
+pod1-20 ansible_host=10.20.30.20 rackpos=20
+
+# vim: set et ai sw=2 ts=2 ft=dosini :
+
diff --git a/roles/service/tasks/main.yml b/roles/service/tasks/main.yml
new file mode 100644 (file)
index 0000000..78909f5
--- /dev/null
@@ -0,0 +1,23 @@
+---
+- include: pull_service.yml
+
+- include: run_service.yml
+
+- name: Display control node status
+  debug:
+    msg: "Control node successfully spawned with status: '{{ control_node_spawn_result }}'"
+    verbosity: 1
+
+- name: Start Stacking service node
+  tags:
+    - service
+    - openstack
+    - stack
+  include: ../../common/tasks/start_stacking.yml
+  when:
+  - control_node_spawn_result is defined
+  - "{{ auto_stack }}"
+  - False
+
+# vim: set et sw=2 ts=2 :
+
diff --git a/roles/service/tasks/pull_service.yml b/roles/service/tasks/pull_service.yml
new file mode 100644 (file)
index 0000000..45c356a
--- /dev/null
@@ -0,0 +1,14 @@
+---
+- name: Pull service-node docker image from the registry
+  tags:
+    - pull
+    - docker
+    - containers
+    - service
+  vars:
+    node_type: service
+    container_tag: "{{ service_version }}"
+  include: ../../common/tasks/pull_image.yml
+
+# vim: set et ts=2 sw=2 ai ft=yaml :
+
diff --git a/roles/service/tasks/run_service.yml b/roles/service/tasks/run_service.yml
new file mode 100644 (file)
index 0000000..c609dbb
--- /dev/null
@@ -0,0 +1,61 @@
+---
+- name: Print debug info
+  tags:
+    - service
+    - debug
+  debug:
+    msg: "Spawning service node '{{ container_name }}' with COMP_ID={{ COMP_ID }}"
+    verbosity: 2
+
+- name: Spawn Service/control node
+  remote_user: root
+  become: yes
+  become_method: sudo
+  tags:
+    - spawn
+    - service
+    - containers
+    - docker
+  docker_container:
+    name: "{{ control_node_container_name }}"
+    hostname: "{{ control_node_container_name }}"
+    image: "{{ control_node_image }}"
+    state: started
+    privileged: yes
+    capabilities: ALL
+    volumes:
+      - /dev:/dev
+      - /lib/modules:/lib/modules
+      - /sys/fs/cgroup:/sys/fs/cgroup:ro
+    published_ports:
+      - 50080:80
+      - 58181:8181
+      - 56080:6080
+      - 58000:8000
+    stop_signal: SIGRTMIN+3
+    security_opts:
+      - "apparmor:docker-unconfined"
+    env:
+      TZ: "America/Los_Angeles"
+      JAVA_HOME: "/usr/lib/jvm/java-8-openjdk-amd64"
+      JAVA_MAX_MEM: "16g"
+      ODL_NETWORK: "{{ use_odl_network }}"
+      SERVICE_HOST: "{{ service_host_mgmt_ip }}"
+      container: "docker"
+      http_proxy: "{{ lab_http_proxy }}"
+      https_proxy: "{{ lab_https_proxy }}"
+      no_proxy: "{{ lab_no_proxy }}"
+      TERM: screen
+  register: control_node_spawn_result
+
+- name: Connect container to network with ansible (idempotent)
+  tags:
+    - network
+    - service
+    - experimental
+  include_role:
+    name: network
+    vars:
+      COMP_ID: 2
+
+# vim: set et sw=2 ts=2 :
diff --git a/roles/service/vars/main.yml b/roles/service/vars/main.yml
new file mode 100644 (file)
index 0000000..e6f96b7
--- /dev/null
@@ -0,0 +1,5 @@
+---
+# Variables for emulated framework containers
+container_name: "{{ control_node_container_name }}"
+node_type: service
+COMP_ID: 2
index 1d1251a9f9bd1f3ccdc1916b05242dbf96d3d623..1b8f3bfebdb4aa7093fa5d8077a61a3ee8ad148e 100644 (file)
@@ -1,6 +1,12 @@
 ---
 # file: site.yml
-- include: infrastructure.yml
+# setup framework infrastructure
+- include: examples/example_setup_infrastructure.yml
+# spawn a service node
+- include: examples/example_start_service_node.yml
+# spawn a compute node with COMP_ID=11
+- include: examples/example_spawn_compute_node_11.yml
+
 
 # vim: set et ai sw=2 ts=2 :