Use os-std-topo option in hwvtep jobs
[releng/builder.git] / jjb / integration / integration-deploy-openstack-run-test.sh
index 62e95a552abff7b9ece428d209ea2988cab5fbe4..fbc577c2c7b6e8c4812bd628d5f16873bfa15029 100644 (file)
@@ -11,6 +11,9 @@ SSH="ssh -t -t"
 ADMIN_PASSWORD="admin"
 OPENSTACK_MASTER_CLIENTS_VERSION="queens"
 
+pip install odltools
+odltools -V
+
 # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level
 printf "\nshowing recent changes that made it into the distribution used by this job:\n"
 $PYTHON -m pip install --upgrade urllib3
@@ -40,7 +43,6 @@ function trap_handler() {
     local lasterr="$2"
     echo "trap_hanlder: ${prog}: line ${lastline}: exit status of last command: ${lasterr}"
     echo "trap_handler: command: ${BASH_COMMAND}"
-    collect_logs
     exit 1
 } # trap_handler()
 
@@ -55,6 +57,9 @@ DISTROSTREAM: ${DISTROSTREAM}
 BUNDLE_URL: ${BUNDLE_URL}
 CONTROLLERFEATURES: ${CONTROLLERFEATURES}
 CONTROLLERDEBUGMAP: ${CONTROLLERDEBUGMAP}
+SCRIPTPLAN: ${SCRIPTPLAN}
+CONFIGPLAN: ${CONFIGPLAN}
+STREAMTESTPLAN: ${STREAMTESTPLAN}
 TESTPLAN: ${TESTPLAN}
 SUITES: ${SUITES}
 PATCHREFSPEC: ${PATCHREFSPEC}
@@ -78,6 +83,7 @@ ENABLE_OS_PLUGINS: ${ENABLE_OS_PLUGINS}
 DISABLE_OS_SERVICES: ${DISABLE_OS_SERVICES}
 TENANT_NETWORK_TYPE: ${TENANT_NETWORK_TYPE}
 SECURITY_GROUP_MODE: ${SECURITY_GROUP_MODE}
+ENABLE_ITM_DIRECT_TUNNELS: ${ENABLE_ITM_DIRECT_TUNNELS}
 PUBLIC_PHYSICAL_NETWORK: ${PUBLIC_PHYSICAL_NETWORK}
 ENABLE_NETWORKING_L2GW: ${ENABLE_NETWORKING_L2GW}
 CREATE_INITIAL_NETWORKS: ${CREATE_INITIAL_NETWORKS}
@@ -138,7 +144,7 @@ function install_openstack_clients_in_robot_vm() {
     done
 
     if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
-        #networking-l2gw is not officially available in any release yet. Gettting the latest stable version.
+        #networking-l2gw is not officially available in any release yet. Getting the latest stable version.
         $PYTHON -m pip install networking-l2gw==11.0.0
     fi
 }
@@ -155,17 +161,6 @@ function is_openstack_feature_enabled() {
     echo 0
 }
 
-function fix_libvirt_version_n_cpu_pike() {
-    local ip=$1
-    ${SSH} ${ip} "
-        cd /opt/stack;
-        git clone https://git.openstack.org/openstack/requirements;
-        cd requirements;
-        git checkout stable/pike;
-        sed -i s/libvirt-python===3.5.0/libvirt-python===4.2.0/ upper-constraints.txt
-   "
-}
-
 #Function to install rdo release
 # This will help avoiding installing wrong version of packages which causes
 # functionality failures
@@ -186,6 +181,44 @@ function install_rdo_release() {
     esac
 }
 
+# Involves just setting up the shared directory
+function setup_live_migration_control() {
+    local control_ip=$1
+    printf "${control_ip}:Setup directory Share with NFS"
+    cat > ${WORKSPACE}/setup_live_migration_control.sh << EOF
+sudo mkdir --mode=777 /vm_instances
+sudo chown -R jenkins:jenkins /vm_instances
+sudo yum install -y nfs-utils
+printf "/vm_instances *(rw,no_root_squash)" | sudo tee -a /etc/exports
+sudo systemctl start rpcbind nfs-server
+sudo exportfs
+EOF
+    scp ${WORKSPACE}/setup_live_migration_control.sh ${control_ip}:/tmp/setup_live_migration_control.sh
+    ssh ${control_ip} "bash /tmp/setup_live_migration_control.sh"
+}
+
+# Involves mounting the share and configuring the libvirtd
+function setup_live_migration_compute() {
+    local compute_ip=$1
+    local control_ip=$2
+    printf "${compute_ip}:Mount Shared directory from ${control_ip}"
+    printf "${compute_ip}:Configure libvirt in listen mode"
+    cat >  ${WORKSPACE}/setup_live_migration_compute.sh << EOF
+sudo yum install -y libvirt libvirt-devel nfs-utils
+sudo crudini --verbose  --set --inplace /etc/libvirt/libvirtd.conf '' listen_tls 0
+sudo crudini --verbose  --set --inplace /etc/libvirt/libvirtd.conf '' listen_tcp 1
+sudo crudini --verbose  --set --inplace /etc/libvirt/libvirtd.conf '' auth_tcp '"none"'
+sudo crudini --verbose  --set --inplace /etc/sysconfig/libvirtd '' LIBVIRTD_ARGS '"--listen"'
+sudo mkdir --mode=777 -p /var/instances
+sudo chown -R jenkins:jenkins /var/instances
+sudo chmod o+x /var/instances
+sudo systemctl start rpcbind
+sudo mount -t nfs ${control_ip}:/vm_instances /var/instances
+sudo mount
+EOF
+    scp ${WORKSPACE}/setup_live_migration_compute.sh ${compute_ip}:/tmp/setup_live_migration_compute.sh
+    ssh ${compute_ip} "bash /tmp/setup_live_migration_compute.sh"
+}
 
 # Add enable_services and disable_services to the local.conf
 function add_os_services() {
@@ -224,9 +257,8 @@ function create_control_node_local_conf() {
     cat > ${local_conf_file_name} << EOF
 [[local|localrc]]
 LOGFILE=stack.sh.log
-USE_SCREEN=True
-SCREEN_LOGDIR=/opt/stack/data/log
 LOG_COLOR=False
+USE_SYSTEMD=True
 RECLONE=${RECLONE}
 # Increase the wait used by stack to poll for services
 SERVICE_TIMEOUT=120
@@ -336,6 +368,7 @@ EOF
 [[post-config|\$NEUTRON_CONF]]
 [DEFAULT]
 service_plugins = ${SERVICE_PLUGINS}
+log_dir = /opt/stack/logs
 
 [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
 [agent]
@@ -346,16 +379,33 @@ minimize_polling=True
 # MTU(1400) + VXLAN(50) + VLAN(4) = 1454 < MTU eth0/br-physnet1(1458)
 physical_network_mtus = ${PUBLIC_PHYSICAL_NETWORK}:1400
 path_mtu = 1458
+EOF
+
+    if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then
+        cat >> ${local_conf_file_name} << EOF
+
+[ml2_odl]
+enable_dhcp_service = True
+EOF
+    fi
+
+    cat >> ${local_conf_file_name} << EOF
+
+[ml2_odl]
+# Trigger n-odl full sync every 30 secs.
+maintenance_interval = 30
 
 [[post-config|/etc/neutron/dhcp_agent.ini]]
 [DEFAULT]
 force_metadata = True
 enable_isolated_metadata = True
+log_dir = /opt/stack/logs
 
 [[post-config|/etc/nova/nova.conf]]
 [DEFAULT]
 force_config_drive = False
 force_raw_images = False
+log_dir = /opt/stack/logs
 
 [scheduler]
 discover_hosts_in_cells_interval = 30
@@ -376,8 +426,7 @@ function create_compute_node_local_conf() {
 [[local|localrc]]
 LOGFILE=stack.sh.log
 LOG_COLOR=False
-USE_SCREEN=True
-SCREEN_LOGDIR=/opt/stack/data/log
+USE_SYSTEMD=True
 RECLONE=${RECLONE}
 # Increase the wait used by stack to poll for the nova service on the control node
 NOVA_READY_TIMEOUT=1800
@@ -438,6 +487,10 @@ auth_strategy = keystone
 [DEFAULT]
 use_neutron = True
 force_raw_images = False
+log_dir = /opt/stack/logs
+[libvirt]
+live_migration_uri = qemu+tcp://%s/system
+virt_type = qemu
 EOF
 
     echo "Compute local.conf created:"
@@ -625,25 +678,8 @@ function retry() {
     return ${rc}
 }
 
-# if we are using the new netvirt impl, as determined by the feature name
-# odl-netvirt-openstack (note: old impl is odl-ovsdb-openstack) then we
-# want PROVIDER_MAPPINGS to be used -- this should be fixed if we want to support
-# external networks in legacy netvirt
-if [[ ${CONTROLLERFEATURES} == *"odl-netvirt-openstack"* ]]; then
-  ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
-else
-  ODL_PROVIDER_MAPPINGS=
-fi
-
-# if we are using the old netvirt impl, as determined by the feature name
-# odl-ovsdb-openstack (note: new impl is odl-netvirt-openstack) then we
-# want ODL_L3 to be True.  New impl wants it False
-if [[ ${CONTROLLERFEATURES} == *"odl-ovsdb-openstack"* ]]; then
-    ODL_L3=True
-else
-    ODL_L3=False
-fi
-
+ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}"
+ODL_L3=False
 RECLONE=False
 ODL_PORT=8181
 
@@ -765,8 +801,10 @@ for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
     scp ${WORKSPACE}/get_devstack.sh ${!CONTROLIP}:/tmp
     # devstack Master is yet to migrate fully to lib/neutron, there are some ugly hacks that is
     # affecting the stacking.
-    #Workaround For Queens, Make the physical Network as physnet1 in lib/neutron
-    #Workaround Comment out creating initial Networks in lib/neutron
+    # Workaround For Queens, Make the physical Network as physnet1 in lib/neutron
+    # In Queens the neutron new libs are used and do not have the following options from Pike and earlier:
+    # Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS could be used for the flat_networks
+    # and Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS could be used for the ml2_type_vlan
     ${SSH} ${!CONTROLIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
     if [ "${ODL_ML2_BRANCH}" == "stable/queens" ]; then
        ssh ${!CONTROLIP} "sed -i 's/flat_networks public/flat_networks public,physnet1/' /opt/stack/devstack/lib/neutron"
@@ -776,6 +814,7 @@ for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do
     scp ${WORKSPACE}/local.conf_control_${!CONTROLIP} ${!CONTROLIP}:/opt/stack/devstack/local.conf
     echo "Install rdo release to avoid incompatible Package versions"
     install_rdo_release ${!CONTROLIP}
+    setup_live_migration_control ${!CONTROLIP}
     echo "Stack the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${CONTROLIP}"
     ssh ${!CONTROLIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
     ssh ${!CONTROLIP} "ps -ef | grep stack.sh"
@@ -806,7 +845,6 @@ if [ ${NUM_OPENSTACK_COMPUTE_NODES} -gt 0 ]; then
       echo "rabbitmq is ready, starting ${NUM_OPENSTACK_COMPUTE_NODES} compute(s)"
     else
       echo "rabbitmq was not ready in ${WAIT_FOR_RABBITMQ_MINUTES}m"
-      collect_logs
       exit 1
     fi
 fi
@@ -823,16 +861,11 @@ for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do
     scp ${WORKSPACE}/hosts_file ${!COMPUTEIP}:/tmp/hosts
     scp ${WORKSPACE}/get_devstack.sh  ${!COMPUTEIP}:/tmp
     ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1"
-    if [ "${ODL_ML2_BRANCH}" == "stable/pike" ]; then
-        echo "Updating requirements for ${ODL_ML2_BRANCH}"
-        echo "Workaround for libvirt-python failing installation"
-        echo "Modify upper-constraints to use libvirt-python 4.2.0"
-        fix_libvirt_version_n_cpu_pike ${!COMPUTEIP}
-    fi
     create_compute_node_local_conf ${!COMPUTEIP} ${!CONTROLIP} ${ODLMGRIP[$SITE_INDEX]} "${ODL_OVS_MGRS[$SITE_INDEX]}"
     scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf
     echo "Install rdo release to avoid incompatible Package versions"
     install_rdo_release ${!COMPUTEIP}
+    setup_live_migration_compute ${!COMPUTEIP} ${!CONTROLIP}
     echo "Stack the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${COMPUTEIP}"
     ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &"
     ssh ${!COMPUTEIP} "ps -ef | grep stack.sh"
@@ -881,7 +914,6 @@ while [ ${in_progress} -eq 1 ]; do
             continue
         elif [ "$stacking_status" == "Stacking Failed" ]; then
             echo "node $index ${os_node_list[index]}: stacking has failed"
-            collect_logs
             exit 1
         elif [ "$stacking_status" == "Stacking Complete" ]; then
             echo "node $index ${os_node_list[index]}: stacking complete"
@@ -895,7 +927,6 @@ while [ ${in_progress} -eq 1 ]; do
     sleep 60
     if [ ${iteration} -eq 60 ]; then
         echo "stacking has failed - took longer than 60m"
-        collect_logs
         exit 1
     fi
 done
@@ -927,7 +958,6 @@ for i in `seq 1 ${NUM_OPENSTACK_SITES}`; do
     num_hypervisors=$(${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r")
     if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then
         echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors"
-        collect_logs
         exit 1
     fi
 
@@ -1112,6 +1142,7 @@ for suite in ${SUITES}; do
     -v CONTROLLERFEATURES:"${CONTROLLERFEATURES}" \
     -v CONTROLLER_USER:${USER} \
     -v DEVSTACK_DEPLOY_PATH:/opt/stack/devstack \
+    -v ENABLE_ITM_DIRECT_TUNNELS:${ENABLE_ITM_DIRECT_TUNNELS} \
     -v HA_PROXY_IP:${HA_PROXY_IP} \
     -v HA_PROXY_1_IP:${HA_PROXY_1_IP} \
     -v HA_PROXY_2_IP:${HA_PROXY_2_IP} \
@@ -1147,6 +1178,7 @@ for suite in ${SUITES}; do
     -v OS_COMPUTE_4_IP:${OPENSTACK_COMPUTE_NODE_4_IP} \
     -v OS_COMPUTE_5_IP:${OPENSTACK_COMPUTE_NODE_5_IP} \
     -v OS_COMPUTE_6_IP:${OPENSTACK_COMPUTE_NODE_6_IP} \
+    -v CMP_INSTANCES_SHARED_PATH:/var/instances \
     -v OS_USER:${USER} \
     -v PUBLIC_PHYSICAL_NETWORK:${PUBLIC_PHYSICAL_NETWORK} \
     -v SECURITY_GROUP_MODE:${SECURITY_GROUP_MODE} \
@@ -1165,7 +1197,6 @@ ssh ${ODL_SYSTEM_IP} "ls -altr /tmp/${BUNDLEFOLDER}/data/log/"
 ssh ${ODL_SYSTEM_IP} "du -hs /tmp/${BUNDLEFOLDER}/data/log/*"
 
 echo "Tests Executed"
-collect_logs
 
 true  # perhaps Jenkins is testing last exit code
 # vim: ts=4 sw=4 sts=4 et ft=sh :