Check Pod connectivity in COE 20/68920/65
authorR P Karthika <r.p.karthika@ericsson.com>
Thu, 1 Mar 2018 05:15:00 +0000 (10:45 +0530)
committerJamo Luhrsen <jluhrsen@redhat.com>
Mon, 11 Jun 2018 23:50:56 +0000 (23:50 +0000)
A master node and two minion nodes are brought up.
Kubernetes is installed on all three nodes.
Pods are brought up on minion nodes.Two pods are
brought up on same minion node by which L2 connectivity
is checked and one pod on each minion node is brought up
by which L3 connectivity is checked.

Change-Id: Iae6f0325e02d1d93ab424ce0e488259bb789a1d0
Signed-off-by: Faseela K <faseela.k@ericsson.com>
Signed-off-by: Karthika Panneer <karthikapaneer97@gmail.com>
12 files changed:
csit/libraries/Coe.robot [new file with mode: 0644]
csit/libraries/SSHKeywords.robot
csit/scriptplans/coe-container-networking.txt [new file with mode: 0644]
csit/scripts/set_coe_host.sh [new file with mode: 0644]
csit/suites/coe/Pod_Connectivity.robot [new file with mode: 0644]
csit/testplans/coe-container-networking.txt [new file with mode: 0644]
csit/variables/coe/Modules.py [new file with mode: 0644]
csit/variables/coe/busy-box.yaml [new file with mode: 0644]
csit/variables/coe/coe.yaml [new file with mode: 0644]
csit/variables/coe/coe_play.yaml [new file with mode: 0644]
csit/variables/coe/hosts.yaml [new file with mode: 0644]
csit/variables/coe/odlovs-cni.conf.j2 [new file with mode: 0644]

diff --git a/csit/libraries/Coe.robot b/csit/libraries/Coe.robot
new file mode 100644 (file)
index 0000000..2f0e0b8
--- /dev/null
@@ -0,0 +1,184 @@
+*** Settings ***
+Library           BuiltIn
+Library           SSHLibrary
+Library           String
+Resource          DataModels.robot
+Resource          Genius.robot
+Resource          OVSDB.robot
+Resource          SSHKeywords.robot
+Resource          Utils.robot
+Resource          ../variables/netvirt/Variables.robot
+Resource          ../variables/Variables.robot
+Resource          VpnOperations.robot
+Variables         ../variables/coe/Modules.py
+Variables         ../variables/netvirt/Modules.py
+
+*** Variables ***
+${CNI_BINARY_FILE}    /opt/cni/bin/odlovs-cni
+${CONFIG_FILE}    /etc/cni/net.d/odlovs-cni.conf
+${CONFIG_FILE_TEMPLATE}    ${CURDIR}/../variables/coe/odlovs-cni.conf.j2
+${HOST_INVENTORY}    ${CURDIR}/../variables/coe/hosts.yaml
+${K8s_MASTER_IP}    ${TOOLS_SYSTEM_1_IP}
+${K8s_MINION1_IP}    ${TOOLS_SYSTEM_2_IP}
+${K8s_MINION2_IP}    ${TOOLS_SYSTEM_3_IP}
+${NODE_READY_STATUS}    \\sReady
+${PLAYBOOK}       ${CURDIR}/../variables/coe/coe_play.yaml
+${POD_RUNNING_STATUS}    \\sRunning
+${WATCHER_COE}    ${CURDIR}/../variables/coe/coe.yaml
+@{NODE_IPs}       ${K8s_MASTER_IP}    ${K8s_MINION1_IP}    ${K8s_MINION2_IP}
+
+*** Keywords ***
+Start Suite
+    [Documentation]    Suite setup keyword.
+    Coe.Configuration Playbook
+    Coe.Set Connection ids and Bridge
+    Coe.Verify Config Files
+    Coe.Verify Watcher Is Running
+    BuiltIn.Wait Until Keyword Succeeds    40s    2s    Coe.Check Node Status Is Ready
+    Coe.Label Nodes
+    Genius.Verify Tunnel Status as UP    default-transport-zone
+    Coe.Derive Coe Data Models
+
+Configuration Playbook
+    [Documentation]    Ansible playbook which does all basic configuration for kubernetes nodes.
+    ${hosts} =    OperatingSystem.Get File    ${HOST_INVENTORY}
+    ${hosts} =    String.Replace String    ${hosts}    master_ip    ${K8s_MASTER_IP}
+    ${hosts} =    String.Replace String    ${hosts}    minion1_ip    ${K8s_MINION1_IP}
+    ${hosts} =    String.Replace String    ${hosts}    minion2_ip    ${K8s_MINION2_IP}
+    ${hosts} =    String.Replace String    ${hosts}    odl_ip    ${ODL_SYSTEM_IP}
+    ${hosts} =    String.Replace String    ${hosts}    mport    ${OVSDBPORT}
+    ${hosts} =    String.Replace String    ${hosts}    cport    ${ODL_OF_PORT_6653}
+    ${hosts} =    String.Replace String    ${hosts}    filepath    ${CONFIG_FILE_TEMPLATE}
+    ${hosts} =    String.Replace String    ${hosts}    yamlpath    ${USER_HOME}/coe.yaml
+    OperatingSystem.Create File    ${USER_HOME}/hosts.yaml    ${hosts}
+    ${watcher} =    OperatingSystem.Get File    ${WATCHER_COE}
+    ${watcher} =    String.Replace String    ${watcher}    odlip    ${ODL_SYSTEM_IP}
+    ${watcher} =    String.Replace String    ${watcher}    port    ${RESTCONFPORT}
+    OperatingSystem.Create File    ${WATCHER_COE}    ${watcher}
+    SSHKeywords.Copy_File_To_Remote_System    ${K8s_MASTER_IP}    ${WATCHER_COE}    ${USER_HOME}
+    OperatingSystem.Move File    ${PLAYBOOK}    ${USER_HOME}
+    ${play_output} =    OperatingSystem.Run    ansible-playbook ${USER_HOME}/coe_play.yaml -i ${USER_HOME}/hosts.yaml
+    BuiltIn.Log    ${play_output}
+
+Set Connection ids and Bridge
+    [Documentation]    Sets the connection ids for all the nodes and get the bridge from configuration file .
+    ${conn_id_1} =    SSHLibrary.Open Connection    ${K8s_MASTER_IP}
+    SSHKeywords.Flexible_SSH_Login    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}
+    BuiltIn.Set Suite Variable    ${conn_id_1}
+    ${conn_id_2} =    SSHLibrary.Open Connection    ${K8s_MINION1_IP}
+    SSHKeywords.Flexible_SSH_Login    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}
+    BuiltIn.Set Suite Variable    ${conn_id_2}
+    ${conn_id_3} =    SSHLibrary.Open Connection    ${K8s_MINION2_IP}
+    SSHKeywords.Flexible_SSH_Login    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}
+    BuiltIn.Set Suite Variable    ${conn_id_3}
+    ${file} =    OperatingSystem.Get File    ${CONFIG_FILE_TEMPLATE}
+    ${line}    ${bridge} =    Should Match Regexp    ${file}    "ovsBridge": "(\\w.*)"
+    BuiltIn.Set Suite Variable    ${bridge}
+
+Verify Config Files
+    [Documentation]    Checks if the configuration files are present in all nodes
+    : FOR    ${nodes}    IN    @{NODE_IPs}
+    \    Utils.Verify File Exists On Remote System    ${nodes}    ${CONFIG_FILE}
+    : FOR    ${nodes}    IN    @{NODE_IPs}
+    \    Utils.Verify File Exists On Remote System    ${nodes}    ${CNI_BINARY_FILE}
+
+Verify Watcher Is Running
+    [Documentation]    Checks if watcher is running in the background
+    ${lines} =    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    ps -ef | grep watcher
+    BuiltIn.Should Match Regexp    ${lines}    .* watcher odl
+
+Check Node Status Is Ready
+    [Documentation]    Checks the status of nodes.This keyword is repeated until the status of all nodes is Ready
+    ${nodes} =    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    kubectl get nodes    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}    ${DEFAULT_LINUX_PROMPT_STRICT}
+    @{cluster} =    String.Split To Lines    ${nodes}    1
+    : FOR    ${node}    IN    @{cluster}
+    \    BuiltIn.Should Match Regexp    ${node}    ${NODE_READY_STATUS}
+
+Label Nodes
+    [Documentation]    Create labels for minions so that random allocation of pods to minions is avoided
+    ${nodes} =    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    kubectl get nodes
+    ${node_1} =    String.Get Line    ${nodes}    2
+    ${minion_1} =    BuiltIn.Should Match Regexp    ${node_1}    ^\\w+-.*-\\d+
+    ${node_2} =    String.Get Line    ${nodes}    3
+    ${minion_2} =    BuiltIn.Should Match Regexp    ${node_2}    ^\\w+-.*-\\d+
+    Utils.Run Command On Remote System And Log    ${K8s_MASTER_IP}    kubectl label nodes ${minion_1} disktype=ssd
+    Utils.Run Command On Remote System And Log    ${K8s_MASTER_IP}    kubectl label nodes ${minion_2} disktype=ssl
+    Utils.Run Command On Remote System And Log    ${K8s_MASTER_IP}    kubectl get nodes --show-labels
+
+Derive Coe Data Models
+    [Documentation]    Data models is created by integrating netvirt and coe data models which is given as input to get the model dumps
+    : FOR    ${models}    IN    @{netvirt_data_models}
+    \    Collections.Append To List    ${coe_data_models}    ${models}
+
+Check Pod Status Is Running
+    [Documentation]    Checks the status of pods.This keyword is repeated until the status of all pods is Running
+    ${pods} =    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    kubectl get pods -o wide    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}    ${DEFAULT_LINUX_PROMPT_STRICT}
+    @{cluster} =    String.Split To Lines    ${pods}    1
+    : FOR    ${pod}    IN    @{cluster}
+    \    BuiltIn.Should Match Regexp    ${pod}    ${POD_RUNNING_STATUS}
+
+Tear Down
+    [Documentation]    Test teardown to get dumpflows,ovsconfig,model dump,node status,pod status and to dump config files \ and delete pods.
+    OVSDB.Get DumpFlows And Ovsconfig    ${conn_id_1}    ${bridge}
+    OVSDB.Get DumpFlows And Ovsconfig    ${conn_id_2}    ${bridge}
+    OVSDB.Get DumpFlows And Ovsconfig    ${conn_id_3}    ${bridge}
+    BuiltIn.Run Keyword And Ignore Error    DataModels.Get Model Dump    ${ODL_SYSTEM_IP}    ${coe_data_models}
+    Coe.DumpConfig File
+    Utils.Run Command On Remote System And Log    ${K8s_MASTER_IP}    kubectl get nodes    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}    ${DEFAULT_LINUX_PROMPT_STRICT}
+    Utils.Run Command On Remote System And Log    ${K8s_MASTER_IP}    kubectl get pods -o wide    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}    ${DEFAULT_LINUX_PROMPT_STRICT}
+    Coe.Delete Pods
+
+Delete Pods
+    [Documentation]    Waits till the keyword delete status succeeds implying that all pods created have been deleted
+    ${lines} =    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    kubectl get pods -o wide
+    @{lines} =    String.Split To Lines    ${lines}    1
+    : FOR    ${status}    IN    @{lines}
+    \    ${pod_name} =    BuiltIn.Should Match Regexp    ${status}    ^\\w+
+    \    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    kubectl delete pods ${pod_name}
+    BuiltIn.Wait Until Keyword Succeeds    60s    3s    Coe.Check If Pods Are Terminated
+
+Check If Pods Are Terminated
+    [Documentation]    Checks if the pods created have been terminated.The keyword is repeated until the pods are deleted
+    ${status} =    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    kubectl get pods -o wide    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}    ${DEFAULT_LINUX_PROMPT_STRICT}
+    ...    ${DEFAULT_TIMEOUT}    return_stdout=False    return_stderr=True
+    BuiltIn.Should Contain    ${status}    No resources
+
+Dump Config File
+    [Documentation]    Logs the configuration files present in all nodes
+    : FOR    ${nodes}    IN    @{NODE_IPs}
+    \    Utils.Run Command On Remote System And Log    ${nodes}    cat ${CONFIG_FILE}
+
+Stop Suite
+    [Documentation]    Suite teardown keyword
+    Coe.Collect Watcher Log
+    Coe.Collect Journalctl Log
+    Coe.Stop_Watcher
+    Coe.Kube_reset
+    SSHLibrary.Close All Connections
+
+Collect Watcher Log
+    [Documentation]    Watcher running in background logs into watcher.out which is copied to ${JENKINS_WORKSPACE}/archives/watcher.log
+    SSHLibrary.Open Connection    ${K8s_MASTER_IP}
+    SSHKeywords.Flexible_SSH_Login    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}
+    SSHLibrary.Get File    /tmp/watcher.out    ${JENKINS_WORKSPACE}/archives/watcher.log
+    SSHLibrary.Close Connection
+
+Collect Journalctl Log
+    [Documentation]    Logs of the command journalctl -u kubelet is copied to ${JENKINS_WORKSPACE}/archives/journal.log
+    Utils.Run Command On Remote System And Log    ${K8s_MASTER_IP}    sudo journalctl -u kubelet > ${USER_HOME}/journal.txt
+    SSHLibrary.Open Connection    ${K8s_MASTER_IP}
+    SSHKeywords.Flexible_SSH_Login    ${DEFAULT_USER}    ${DEFAULT_PASSWORD}
+    SSHLibrary.Get File    ${USER_HOME}/journal.txt    ${JENKINS_WORKSPACE}/archives/journalctl.log
+    SSHLibrary.Close Connection
+
+Stop Watcher
+    [Documentation]    Kill the watcher running at the background after completion of tests cases
+    ${lines} =    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    ps -ef | grep watcher
+    ${line}    ${pid} =    BuiltIn.Should Match Regexp    ${lines}    \\w+\\s+(\\d+).*watcher odl
+    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    kill -9 ${pid}
+
+Kube reset
+    [Documentation]    Reset K8s to clear up all stale entries
+    : FOR    ${nodes}    IN    @{NODE_IPs}
+    \    ${kube} =    Utils.Run Command On Remote System And Log    ${nodes}    sudo kubeadm reset
+    \    BuiltIn.Should Contain    ${kube}    Stopping the kubelet service.
index 2b83a592f6a1723ee4eaab7a250827d9ce2f568f..c3f1078ed4016a2361acf5945dcbee3dcf3c8019 100644 (file)
@@ -251,3 +251,22 @@ Flexible_Controller_Login
     [Arguments]    ${user}=${ODL_SYSTEM_USER}    ${password}=${ODL_SYSTEM_PASSWORD}    ${delay}=0.5s
     [Documentation]    Call Flexible SSH Login, but with default values suitable for Controller machine.
     BuiltIn.Run Keyword And Return    Flexible SSH Login    user=${user}    password=${password}    delay=${delay}
     [Arguments]    ${user}=${ODL_SYSTEM_USER}    ${password}=${ODL_SYSTEM_PASSWORD}    ${delay}=0.5s
     [Documentation]    Call Flexible SSH Login, but with default values suitable for Controller machine.
     BuiltIn.Run Keyword And Return    Flexible SSH Login    user=${user}    password=${password}    delay=${delay}
+
+Move_File_To_Remote_System
+    [Arguments]    ${system}    ${source}    ${destination}=./    ${user}=${DEFAULT_USER}    ${password}=${DEFAULT_PASSWORD}    ${prompt}=${DEFAULT_LINUX_PROMPT}
+    ...    ${prompt_timeout}=5s
+    [Documentation]    Moves the ${source} file to the ${destination} file on the remote ${system}. Any pre-existing active
+    ...    ssh connection will be retained.
+    SSHKeywords.Run_Keyword_Preserve_Connection    Unsafe_Move_File_To_Remote_System    ${system}    ${source}    ${destination}    ${user}    ${password}
+    ...    ${prompt}    ${prompt_timeout}
+
+Unsafe_Move_File_To_Remote_System
+    [Arguments]    ${system}    ${source}    ${destination}=./    ${user}=${DEFAULT_USER}    ${password}=${DEFAULT_PASSWORD}    ${prompt}=${DEFAULT_LINUX_PROMPT}
+    ...    ${prompt_timeout}=5s
+    [Documentation]    Moves the ${source} file to the ${destination} file on the remote ${system}. The keyword opens and closes a single
+    ...    ssh connection and does not rely on any existing ssh connection that may be open.
+    SSHLibrary.Open_Connection    ${system}    prompt=${prompt}    timeout=${prompt_timeout}
+    Flexible_SSH_Login    ${user}    ${password}
+    SSHLibrary.Put File    ${source}    ${destination}
+    OperatingSystem.Remove File    ${source}
+    SSHLibrary.Close Connection
diff --git a/csit/scriptplans/coe-container-networking.txt b/csit/scriptplans/coe-container-networking.txt
new file mode 100644 (file)
index 0000000..6d6477d
--- /dev/null
@@ -0,0 +1,2 @@
+# Place the scripts in run order:
+integration/test/csit/scripts/set_coe_host.sh
diff --git a/csit/scripts/set_coe_host.sh b/csit/scripts/set_coe_host.sh
new file mode 100644 (file)
index 0000000..effa207
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+cat > ${WORKSPACE}/system2-ovs-restart.sh <<EOF
+
+sudo rm -rf /etc/openvswitch/conf.db
+sudo service openvswitch-switch restart
+
+EOF
+scp ${WORKSPACE}/system2-ovs-restart.sh ${TOOLS_SYSTEM_2_IP}:/tmp/
+ssh ${TOOLS_SYSTEM_2_IP} 'sudo bash /tmp/system2-ovs-restart.sh'
+scp ${WORKSPACE}/system2-ovs-restart.sh ${TOOLS_SYSTEM_3_IP}:/tmp/
+ssh ${TOOLS_SYSTEM_3_IP} 'sudo bash /tmp/system2-ovs-restart.sh'
diff --git a/csit/suites/coe/Pod_Connectivity.robot b/csit/suites/coe/Pod_Connectivity.robot
new file mode 100644 (file)
index 0000000..87a2b10
--- /dev/null
@@ -0,0 +1,58 @@
+*** Settings ***
+Suite Setup       Coe.Start Suite
+Suite Teardown    Coe.Stop Suite
+Test Teardown     Coe.Tear Down
+Library           BuiltIn
+Library           SSHLibrary
+Library           String
+Resource          ../../libraries/Coe.robot
+Resource          ../../libraries/DataModels.robot
+Resource          ../../libraries/SSHKeywords.robot
+Resource          ../../libraries/Utils.robot
+Resource          ../../variables/netvirt/Variables.robot
+Resource          ../../variables/Variables.robot
+
+*** Variables ***
+${BUSY_BOX}       ${CURDIR}/../../variables/coe/busy-box.yaml
+${VARIABLES_PATH}    ${CURDIR}/../../variables/coe
+@{BB_NAMES}       busybox1    busybox2    busybox3    busybox4
+@{BUSY_BOXES}     busy-box-1.yaml    busy-box-2.yaml    busy-box-3.yaml    busy-box-4.yaml
+
+*** Test Cases ***
+Verify L2 Connectivity Between Pods
+    [Documentation]    This testcase verifies the connectivity between pods brought up on the same node.Pods are brought on the same node by using the same node selector in busybox.yaml files.
+    Create Pods    ssd    ${BUSY_BOXES[0]}    ${BB_NAMES[0]}
+    Create Pods    ssd    ${BUSY_BOXES[1]}    ${BB_NAMES[1]}
+    BuiltIn.Wait Until Keyword Succeeds    55s    2s    Coe.Check Pod Status Is Running
+    Ping Pods
+
+Verify L3 Connectivity Between Pods
+    [Documentation]    This testcase verifies the connectivity between pods brought up on different nodes.Nodes are given different labels(eg : ssd,ssl) through Coe.Label Nodes keyword.
+    ...    These labels are also inlcuded as node selectors in busybox.yaml files ,thus the pods are placed on the desired nodes avoiding random allocation of pods.
+    ...    For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels.
+    Create Pods    ssd    ${BUSY_BOXES[2]}    ${BB_NAMES[2]}
+    Create Pods    ssl    ${BUSY_BOXES[3]}    ${BB_NAMES[3]}
+    BuiltIn.Wait Until Keyword Succeeds    55s    2s    Coe.Check Pod Status Is Running
+    Ping Pods
+
+*** Keywords ***
+Create Pods
+    [Arguments]    ${label}    ${yaml}    ${name}
+    [Documentation]    Creates pods using the labels of the nodes and busy box names passed as arguments.
+    ${busybox} =    OperatingSystem.Get File    ${BUSY_BOX}
+    ${busybox} =    String.Replace String    ${busybox}    string    ${label}
+    ${busybox} =    String.Replace String    ${busybox}    busyboxname    ${name}
+    OperatingSystem.Create File    ${VARIABLES_PATH}/${yaml}    ${busybox}
+    SSHKeywords.Move_file_To_Remote_System    ${K8s_MASTER_IP}    ${VARIABLES_PATH}/${yaml}    ${USER_HOME}
+    Utils.Run Command On Remote System And Log    ${K8s_MASTER_IP}    kubectl create -f ${yaml}
+
+Ping Pods
+    [Documentation]    Ping pods to check connectivity between them
+    ${lines} =    Utils.Run Command On Remote System    ${K8s_MASTER_IP}    kubectl get pods -o wide
+    ${pod_name} =    String.Get Line    ${lines}    1
+    ${pod_name} =    Builtin.Should Match Regexp    ${pod_name}    ^\\w+
+    @{lines} =    String.Split To Lines    ${lines}    2
+    : FOR    ${status}    IN    @{lines}
+    \    ${pod_ip} =    Builtin.Should Match Regexp    ${status}    \\d+.\\d+.\\d+.\\d+
+    \    ${ping} =    Utils.Run Command On Remote System And Log    ${K8s_MASTER_IP}    kubectl exec -it ${pod_name} -- ping -c 3 ${pod_ip}
+    \    Builtin.Should Match Regexp    ${ping}    ${PING_REGEXP}
diff --git a/csit/testplans/coe-container-networking.txt b/csit/testplans/coe-container-networking.txt
new file mode 100644 (file)
index 0000000..f2927f8
--- /dev/null
@@ -0,0 +1,3 @@
+# Place the suites in run order:
+integration/test/csit/suites/coe/Pod_Connectivity.robot
+
diff --git a/csit/variables/coe/Modules.py b/csit/variables/coe/Modules.py
new file mode 100644 (file)
index 0000000..c37b1c4
--- /dev/null
@@ -0,0 +1,4 @@
+coe_data_models = [
+    'config/pod:coe',
+    'operational/pod-meta:podidentifier-info',
+]
diff --git a/csit/variables/coe/busy-box.yaml b/csit/variables/coe/busy-box.yaml
new file mode 100644 (file)
index 0000000..fe89c93
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: busyboxname
+  labels:
+    app: busyboxname
+spec:
+  containers:
+  - image: busybox
+    command:
+      - sleep
+      - "3600"
+    imagePullPolicy: IfNotPresent
+    name: busybox
+  restartPolicy: Always
+  nodeSelector:
+     disktype: string
diff --git a/csit/variables/coe/coe.yaml b/csit/variables/coe/coe.yaml
new file mode 100644 (file)
index 0000000..ffe8289
--- /dev/null
@@ -0,0 +1,7 @@
+kube:
+    config: ~/.kube/config
+odl:
+    host: http://odlip:port
+    user: admin
+    password: admin
+
diff --git a/csit/variables/coe/coe_play.yaml b/csit/variables/coe/coe_play.yaml
new file mode 100644 (file)
index 0000000..32c9f09
--- /dev/null
@@ -0,0 +1,157 @@
+---
+
+- hosts: coe-master:coe-minion1:coe-minion2
+  vars:
+    gopath: "{{ ansible_env.HOME }}/go"
+    coe_path: "{{ gopath }}/src/git.opendaylight.org/gerrit/p/coe.git"
+  environment:
+    PATH: "{{ ansible_env.HOME }}:/usr/local/go/bin:{{ ansible_env.HOME }}/go/bin:/usr/local/go/bin:/usr/bin:/bin"
+    GOPATH: "{{ gopath }}"
+    GOROOT: /usr/local/go
+  tasks:
+    - name: Make CNI config directory
+      file:
+        path: /etc/cni/net.d
+        state: directory
+      become: true
+    - name: Generate cni config
+      template:
+        src: "{{ conf_path }}"
+        dest: /etc/cni/net.d/odlovs-cni.conf
+      become: true
+    - name: Create $HOME/go/bin
+      file:
+        path: "{{ ansible_env.HOME }}/go/bin"
+        state: directory
+    - name: Install glide
+      shell: curl https://glide.sh/get | sh
+      args:
+        creates: "{{ gopath }}/bin/glide"
+    - name: Fetch COE git repository
+      git:
+        repo: 'https://git.opendaylight.org/gerrit/p/coe.git'
+        dest: "{{ coe_path }}"
+    - name: Fetch odl-cni dependencies with glide
+      shell: glide install -v
+      args:
+        chdir: "{{ coe_path }}/odlCNIPlugin/odlovs-cni"
+        creates: "{{ coe_path }}/odlCNIPlugin/odlovs-cni/vendor"
+    - name: Build odlovs-cni
+      shell: go build
+      args:
+        chdir: "{{ coe_path }}/odlCNIPlugin/odlovs-cni"
+        creates: "{{ coe_path }}/odlCNIPlugin/odlovs-cni/odlovs-cni"
+    - name: Install odlovs-cni
+      copy:
+        src: "{{ coe_path }}/odlCNIPlugin/odlovs-cni/odlovs-cni"
+        dest: "/opt/cni/bin/odlovs-cni"
+        remote_src: true
+        mode: 0755
+      become: true
+
+- hosts: coe-master
+  vars:
+    gopath: "{{ ansible_env.HOME}}/go"
+    coe_path: "{{ gopath }}/src/git.opendaylight.org/gerrit/p/coe.git"
+  environment:
+    PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin:{{ ansible_env.HOME }}/go/bin:/usr/local/go/bin:{{ gopath }}/bin:/usr/bin:/bin"
+    GOPATH: "{{ gopath }}"
+    GOROOT: /usr/local/go
+  tasks:
+    - name: Fetch watcher dependencies with glide
+      shell: glide install -v
+      args:
+        chdir: "{{ coe_path }}/watcher"
+        creates: "{{ coe_path }}/watcher/vendor"
+    - name: Build watcher
+      shell: go build
+      args:
+        chdir: "{{ coe_path }}/watcher"
+        creates: "{{ coe_path }}/watcher/watcher"
+    - name: Copy watcher to /usr/local/bin
+      copy:
+        src: "{{ coe_path }}/watcher/watcher"
+        dest: /usr/local/bin/watcher
+        mode: 0755
+        remote_src: true
+      become: true
+    - name: Copy coe.yaml to /etc
+      copy:
+        src: "{{ watcher_path }}"
+        dest: /etc
+        mode: 0755
+        remote_src: true
+      become: true
+
+- hosts: coe-master:coe-minion1:coe-minion2
+  tasks:
+    - name: Set OVS Manager
+      shell: ovs-vsctl set-manager tcp:{{ manager_ip }}:{{ manager_port }} && touch /tmp/ovs-set-manager
+      args:
+        creates: /tmp/ovs-set-manager
+      become: true
+    - name: Enable auto-tunnelling
+      shell: ovs-vsctl set O . other_config:local_ip={{ overlay_ip }} && touch /tmp/enable-auto-tunnel
+      args:
+        creates: /tmp/enable-auto-tunnel
+      become: true
+    - name: Enable docker in systemd
+      systemd:
+        name: docker
+        daemon_reload: true
+        enabled: true
+        state: started
+      become: true
+
+
+- hosts: coe-master
+  tasks:
+    - name: Disable swapfile
+      shell: swapoff -a
+      become: true
+    - name: Run kubeadm init
+      shell: kubeadm init --apiserver-advertise-address={{ k8s_advertise_address }} > /tmp/k8s-output
+      args:
+        creates: /tmp/k8s-output
+      become: true
+    - name: Create join script
+      shell: echo "#!/bin/sh" > /tmp/join-k8s.sh && cat /tmp/k8s-output | grep "kubeadm join" | sed -e 's/^[[:space:]]*//g' >> /tmp/join-k8s.sh
+      args:
+        creates: /tmp/join-k8s.sh
+    - name: Fetch join script
+      fetch:
+        src: /tmp/join-k8s.sh
+        dest: /tmp/join-k8s.sh
+        flat: true
+    - name: Make kube directory
+      file:
+        path: "{{ ansible_env.HOME }}/.kube"
+        state: directory
+    - name: Copy kubeconfig
+      copy:
+        src: /etc/kubernetes/admin.conf
+        dest: "{{ ansible_env.HOME }}/.kube/config"
+        remote_src: true
+      become: true
+
+- hosts: coe-master
+  tasks:
+    - name: Run watcher
+      shell: nohup watcher odl </dev/null >/tmp/watcher.out 2>&1 &
+      args:
+        creates: /tmp/watcher.out
+
+- hosts: coe-minion1:coe-minion2
+  tasks:
+    - name: Disable swapfile
+      shell: swapoff -a
+      become: true
+    - name: Copy join file
+      copy:
+        src: /tmp/join-k8s.sh
+        dest: /tmp/join-k8s.sh
+        mode: 0700
+      become: true
+    - name: Join cluster
+      shell: /tmp/join-k8s.sh
+      become: true
diff --git a/csit/variables/coe/hosts.yaml b/csit/variables/coe/hosts.yaml
new file mode 100644 (file)
index 0000000..3feb878
--- /dev/null
@@ -0,0 +1,49 @@
+coe-master:
+  hosts:
+    master_ip:
+  vars:
+    external_interface: ~
+    overlay_ip: master_ip
+    external_ip: ~
+    subnet: 10.11.1.0/24
+    gateway: 10.11.1.1
+    manager_ip: odl_ip
+    manager_port: mport
+    controller_ip: odl_ip
+    controller_port: cport
+    go_version: \1.10.2\
+    conf_path: filepath
+    k8s_advertise_address: master_ip
+    watcher_path: yamlpath
+
+coe-minion1:
+  hosts:
+    minion1_ip:
+  vars:
+    external_interface: enp0s9
+    overlay_ip: minion1_ip
+    external_ip: 192.168.50.12
+    subnet: 10.11.2.0/24
+    gateway: 10.11.2.1
+    manager_ip: odl_ip
+    manager_port: mport
+    controller_ip: odl_ip
+    controller_port: cport
+    go_version: \1.10.2\
+    conf_path: filepath
+
+coe-minion2:
+  hosts:
+    minion2_ip:
+  vars:
+    external_interface: enp0s9
+    overlay_ip: minion2_ip
+    external_ip: 192.168.50.13
+    subnet: 10.11.3.0/24
+    gateway: 10.11.3.1
+    manager_ip: odl_ip
+    manager_port: mport
+    controller_ip: odl_ip
+    controller_port: cport
+    go_version: \1.10.2\
+    conf_path: filepath
diff --git a/csit/variables/coe/odlovs-cni.conf.j2 b/csit/variables/coe/odlovs-cni.conf.j2
new file mode 100644 (file)
index 0000000..1a22dbf
--- /dev/null
@@ -0,0 +1,23 @@
+{
+    "cniVersion": "0.3.0",
+    "name": "odl-cni",
+    "type": "odlovs-cni",
+    "mgrPort": {{ manager_port }},
+    "mgrActive": true,
+    "manager": "{{ manager_ip }}",
+    "ovsBridge": "br-int",
+    "ctlrPort": {{ controller_port }},
+    "ctlrActive": true,
+    "controller": "{{ controller_ip }}",
+    "externalIntf": "{{ external_interface }}",
+    "externalIp": "{{ external_ip }}",
+    "ipam": {
+        "type": "host-local",
+        "subnet": "{{ subnet }}",
+        "routes": [{
+            "dst": "0.0.0.0/0"
+        }],
+        "gateway": "{{ gateway }}"
+    }
+}
+