X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=jjb%2Fintegration%2Fintegration-deploy-openstack-run-test.sh;h=4f211e022e43cd04836cd2933dcfdb289e9c7e88;hb=0a5463eef63be0fdd691f1a686eee756f25df42c;hp=accea4a0b921dcb62ee82694cd43fdebfe4c2820;hpb=7e9491bb72105fa920e96e92458f45d3ec740a6f;p=releng%2Fbuilder.git diff --git a/jjb/integration/integration-deploy-openstack-run-test.sh b/jjb/integration/integration-deploy-openstack-run-test.sh index accea4a0b..4f211e022 100644 --- a/jjb/integration/integration-deploy-openstack-run-test.sh +++ b/jjb/integration/integration-deploy-openstack-run-test.sh @@ -14,9 +14,8 @@ PYTHON="${ROBOT_VENV}/bin/python" SSH="ssh -t -t" ADMIN_PASSWORD="admin" OPENSTACK_MASTER_CLIENTS_VERSION="queens" - -pip install odltools -odltools -V +#Size of the partition to /opt/stack in control and compute nodes +TMPFS_SIZE=2G # TODO: remove this work to run changes.py if/when it's moved higher up to be visible at the Robot level printf "\nshowing recent changes that made it into the distribution used by this job:\n" @@ -45,7 +44,7 @@ function trap_handler() { local prog="$0" local lastline="$1" local lasterr="$2" - echo "trap_hanlder: ${prog}: line ${lastline}: exit status of last command: ${lasterr}" + echo "trap_handler: ${prog}: line ${lastline}: exit status of last command: ${lasterr}" echo "trap_handler: command: ${BASH_COMMAND}" exit 1 } # trap_handler() @@ -111,10 +110,6 @@ function install_openstack_clients_in_robot_vm() { function install_rdo_release() { local ip=$1 case ${OPENSTACK_BRANCH} in - *pike*) - ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-pike/rdo-release-pike-1.noarch.rpm" - ;; - *queens*) ${SSH} ${ip} "sudo yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-queens/rdo-release-queens-1.noarch.rpm" ;; @@ -318,7 +313,13 @@ minimize_polling=True physical_network_mtus = ${PUBLIC_PHYSICAL_NETWORK}:1400 path_mtu = 1458 EOF - + if [ "${ENABLE_GRE_TYPE_DRIVERS}" == "yes" ]; then + cat >> ${local_conf_file_name} << EOF +type_drivers = local,flat,vlan,gre,vxlan +[ml2_type_gre] +tunnel_id_ranges = 1:1000 +EOF + fi if [ "${ENABLE_NETWORKING_L2GW}" == "yes" ]; then cat >> ${local_conf_file_name} << EOF @@ -445,22 +446,26 @@ EOF } # create_compute_node_local_conf() function configure_haproxy_for_neutron_requests() { - MGRIP=$1 + local -r haproxy_ip=$1 # shellcheck disable=SC2206 - ODL_IPS=(${2//,/ }) + local -r odl_ips=(${2//,/ }) cat > ${WORKSPACE}/install_ha_proxy.sh<< EOF sudo systemctl stop firewalld sudo yum -y install policycoreutils-python haproxy EOF -cat > ${WORKSPACE}/haproxy.cfg << EOF + cat > ${WORKSPACE}/haproxy.cfg << EOF global daemon group haproxy log /dev/log local0 maxconn 20480 pidfile /tmp/haproxy.pid + ssl-default-bind-ciphers !SSLv2:kEECDH:kRSA:kEDH:kPSK:+3DES:!aNULL:!eNULL:!MD5:!EXP:!RC4:!SEED:!IDEA:!DES + ssl-default-bind-options no-sslv3 no-tlsv10 + stats socket /var/lib/haproxy/stats mode 600 level user + stats timeout 2m user haproxy defaults @@ -469,35 +474,44 @@ defaults mode tcp retries 3 timeout http-request 10s - timeout queue 1m + timeout queue 2m timeout connect 10s - timeout client 1m - timeout server 1m + timeout client 2m + timeout server 2m timeout check 10s listen opendaylight - bind ${MGRIP}:8080 - balance source + bind ${haproxy_ip}:8181 transparent + mode http + http-request set-header X-Forwarded-Proto https if { ssl_fc } + http-request set-header X-Forwarded-Proto http if !{ ssl_fc } + option httpchk GET /diagstatus + option httplog +EOF -listen opendaylight_rest - bind ${MGRIP}:8181 - balance source + odlindex=1 + for odlip in ${odl_ips[*]}; do + echo " server opendaylight-rest-${odlindex} ${odlip}:8181 check fall 5 inter 2000 rise 2" >> ${WORKSPACE}/haproxy.cfg + odlindex=$((odlindex+1)) + done -listen opendaylight_websocket - bind ${MGRIP}:8185 - balance source + cat >> ${WORKSPACE}/haproxy.cfg << EOF +listen opendaylight_ws + bind ${haproxy_ip}:8185 transparent + mode http + timeout connect 5s + timeout client 25s + timeout server 25s + timeout tunnel 3600s EOF odlindex=1 - for odlip in ${ODL_IPS[*]}; do - sed -i "/listen opendaylight$/a server controller-${odlindex} ${odlip}:8080 check fall 5 inter 2000 rise 2" ${WORKSPACE}/haproxy.cfg - sed -i "/listen opendaylight_rest$/a server controller-rest-${odlindex} ${odlip}:8181 check fall 5 inter 2000 rise 2" ${WORKSPACE}/haproxy.cfg - sed -i "/listen opendaylight_websocket$/a server controller-websocket-${odlindex} ${odlip}:8185 check fall 5 inter 2000 rise 2" ${WORKSPACE}/haproxy.cfg + for odlip in ${odl_ips[*]}; do + echo " server opendaylight-ws-${odlindex} ${odlip}:8185 check fall 5 inter 2000 rise 2" >> ${WORKSPACE}/haproxy.cfg odlindex=$((odlindex+1)) done - echo "Dump haproxy.cfg" cat ${WORKSPACE}/haproxy.cfg @@ -512,11 +526,11 @@ sudo systemctl status haproxy true EOF - scp ${WORKSPACE}/install_ha_proxy.sh ${MGRIP}:/tmp - ${SSH} ${MGRIP} "sudo bash /tmp/install_ha_proxy.sh" - scp ${WORKSPACE}/haproxy.cfg ${MGRIP}:/tmp - scp ${WORKSPACE}/deploy_ha_proxy.sh ${MGRIP}:/tmp - ${SSH} ${MGRIP} "sudo bash /tmp/deploy_ha_proxy.sh" + scp ${WORKSPACE}/install_ha_proxy.sh ${haproxy_ip}:/tmp + ${SSH} ${haproxy_ip} "sudo bash /tmp/install_ha_proxy.sh" + scp ${WORKSPACE}/haproxy.cfg ${haproxy_ip}:/tmp + scp ${WORKSPACE}/deploy_ha_proxy.sh ${haproxy_ip}:/tmp + ${SSH} ${haproxy_ip} "sudo bash /tmp/deploy_ha_proxy.sh" } # configure_haproxy_for_neutron_requests() # Following three functions are debugging helpers when debugging devstack changes. @@ -625,6 +639,26 @@ function retry() { return ${rc} } +function install_ovs() { + local -r node=${1} + local -r rpm_path=${2} + + if [ "${OVS_INSTALL:0:1}" = "v" ]; then + # An OVS version was given, so we build it ourselves from OVS git repo. + # Only on the first node though, consecutive nodes will use RPMs + # built for the first one. + [ ! -d "${rpm_path}" ] && mkdir -p "${rpm_path}" && build_ovs ${node} ${OVS_INSTALL} "${rpm_path}" + # Install OVS from path + install_ovs_from_path ${node} "${rpm_path}" + elif [ "${OVS_INSTALL:0:4}" = "http" ]; then + # Otherwise, install from rpm repo directly. + install_ovs_from_repo ${node} ${OVS_INSTALL} + else + echo "Expected either an OVS version git tag or a repo http url" + exit 1 + fi +} + ODL_PROVIDER_MAPPINGS="\${PUBLIC_PHYSICAL_NETWORK}:${PUBLIC_BRIDGE}" RECLONE=False ODL_PORT=8181 @@ -666,6 +700,10 @@ sudo iptables --line-numbers -nvL true EOF +#For SFC Tests a larger partition is required for creating instances with Ubuntu +if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then + TMPFS_SIZE=12G +fi cat > ${WORKSPACE}/get_devstack.sh << EOF sudo systemctl stop firewalld sudo yum install bridge-utils python-pip -y @@ -680,7 +718,7 @@ echo "::1 localhost \${HOSTNAME}" >> /tmp/hosts sudo mv /tmp/hosts /etc/hosts sudo mkdir /opt/stack echo "Create RAM disk for /opt/stack" -sudo mount -t tmpfs -o size=2G tmpfs /opt/stack +sudo mount -t tmpfs -o size=${TMPFS_SIZE} tmpfs /opt/stack sudo chmod 777 /opt/stack cd /opt/stack echo "git clone https://git.openstack.org/openstack-dev/devstack --branch ${OPENSTACK_BRANCH}" @@ -690,16 +728,14 @@ if [ -n "${DEVSTACK_HASH}" ]; then echo "git checkout ${DEVSTACK_HASH}" git checkout ${DEVSTACK_HASH} fi +wget https://raw.githubusercontent.com/shague/odl_tools/master/fix-logging.patch.txt -O /tmp/fix-logging.patch.txt +patch --verbose -p1 -i /tmp/fix-logging.patch.txt git --no-pager log --pretty=format:'%h %<(13)%ar%<(13)%cr %<(20,trunc)%an%d %s%b' -n20 echo echo "workaround: do not upgrade openvswitch" sudo yum install -y yum-plugin-versionlock sudo yum versionlock add openvswitch - -#Install qemu-img command in Control Node for Pike -echo "Install qemu-img application" -sudo yum install -y qemu-img EOF cat > "${WORKSPACE}/setup_host_cell_mapping.sh" << EOF @@ -709,31 +745,32 @@ sudo nova-manage db sync sudo nova-manage cell_v2 discover_hosts EOF +cat > "${WORKSPACE}/workaround_networking_sfc.sh" << EOF +cd /opt/stack +git clone https://git.openstack.org/openstack/networking-sfc +cd networking-sfc +git checkout ${OPENSTACK_BRANCH} +git checkout master -- devstack/plugin.sh +EOF + NUM_OPENSTACK_SITES=${NUM_OPENSTACK_SITES:-1} compute_index=1 -odl_index=1 os_node_list=() -os_interval=$(( ${NUM_OPENSTACK_SYSTEM} / ${NUM_OPENSTACK_SITES} )) -ha_proxy_index=${os_interval} - -for i in `seq 1 ${NUM_OPENSTACK_SITES}`; do - if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then - echo "Configure HAProxy" - ODL_HAPROXYIP_PARAM=OPENSTACK_HAPROXY_${i}_IP - ha_proxy_index=$(( $ha_proxy_index + $os_interval )) - odl_index=$(((i - 1) * 3 + 1)) - ODL_IP_PARAM1=ODL_SYSTEM_$((odl_index++))_IP - ODL_IP_PARAM2=ODL_SYSTEM_$((odl_index++))_IP - ODL_IP_PARAM3=ODL_SYSTEM_$((odl_index++))_IP - ODLMGRIP[$i]=${!ODL_HAPROXYIP_PARAM} # ODL Northbound uses HAProxy VIP - ODL_OVS_MGRS[$i]="${!ODL_IP_PARAM1},${!ODL_IP_PARAM2},${!ODL_IP_PARAM3}" # OVSDB connects to all ODL IPs - configure_haproxy_for_neutron_requests ${!ODL_HAPROXYIP_PARAM} "${ODL_OVS_MGRS[$i]}" - else - ODL_IP_PARAM=ODL_SYSTEM_${i}_IP - ODL_OVS_MGRS[$i]="${!ODL_IP_PARAM}" # ODL Northbound uses ODL IP - ODLMGRIP[$i]=${!ODL_IP_PARAM} # OVSDB connects to ODL IP - fi -done + +if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then + echo "Configure HAProxy" + ODL_HAPROXYIP_PARAM=OPENSTACK_HAPROXY_1_IP + ODL_IP_PARAM1=ODL_SYSTEM_1_IP + ODL_IP_PARAM2=ODL_SYSTEM_2_IP + ODL_IP_PARAM3=ODL_SYSTEM_3_IP + ODLMGRIP=${!ODL_HAPROXYIP_PARAM} # ODL Northbound uses HAProxy VIP + ODL_OVS_MGRS="${!ODL_IP_PARAM1},${!ODL_IP_PARAM2},${!ODL_IP_PARAM3}" # OVSDB connects to all ODL IPs + configure_haproxy_for_neutron_requests ${!ODL_HAPROXYIP_PARAM} "${ODL_OVS_MGRS}" +else + ODL_IP_PARAM=ODL_SYSTEM_1_IP + ODLMGRIP=${!ODL_IP_PARAM} # OVSDB connects to ODL IP + ODL_OVS_MGRS="${!ODL_IP_PARAM}" # ODL Northbound uses ODL IP +fi os_ip_list=() for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do @@ -753,6 +790,7 @@ for i in "${!os_ip_list[@]}"; do tcpdump_start "${i}" "${ip}" "port 6653" done + # Begin stacking the nodes, starting with the controller(s) and then the compute(s) for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do @@ -773,8 +811,15 @@ for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do if [ "${ODL_ML2_BRANCH}" == "stable/queens" ]; then ssh ${!CONTROLIP} "sed -i 's/flat_networks public/flat_networks public,physnet1/' /opt/stack/devstack/lib/neutron" ssh ${!CONTROLIP} "sed -i '186i iniset \$NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges public:1:4094,physnet1:1:4094' /opt/stack/devstack/lib/neutron" + #Workaround for networking-sfc to configure the paramaters in neutron.conf if the + # services used are neutron-api, neutron-dhcp etc instead of q-agt. + # Can be removed if the patch https://review.openstack.org/#/c/596287/ gets merged + if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then + scp ${WORKSPACE}/workaround_networking_sfc.sh ${!CONTROLIP}:/tmp/ + ssh ${!CONTROLIP} "bash -x /tmp/workaround_networking_sfc.sh" + fi fi - create_control_node_local_conf ${!CONTROLIP} ${ODLMGRIP[$i]} "${ODL_OVS_MGRS[$i]}" + create_control_node_local_conf ${!CONTROLIP} ${ODLMGRIP} "${ODL_OVS_MGRS}" scp ${WORKSPACE}/local.conf_control_${!CONTROLIP} ${!CONTROLIP}:/opt/stack/devstack/local.conf echo "Install rdo release to avoid incompatible Package versions" install_rdo_release ${!CONTROLIP} @@ -782,6 +827,12 @@ for i in `seq 1 ${NUM_OPENSTACK_CONTROL_NODES}`; do if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then setup_live_migration_compute ${!CONTROLIP} ${!CONTROLIP} fi + [ -n "${OVS_INSTALL}" ] && install_ovs ${!CONTROLIP} /tmp/ovs_rpms + if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then + # This should be really done by networking-odl devstack plugin, + # but in the meantime do it ourselves + ssh ${!CONTROLIP} "sudo ovs-vsctl set Open_vSwitch . external_ids:of-tunnel=true" + fi echo "Stack the control node ${i} of ${NUM_OPENSTACK_CONTROL_NODES}: ${CONTROLIP}" ssh ${!CONTROLIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &" ssh ${!CONTROLIP} "ps -ef | grep stack.sh" @@ -817,10 +868,8 @@ if [ ${NUM_OPENSTACK_COMPUTE_NODES} -gt 0 ]; then fi for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do - NUM_COMPUTES_PER_SITE=$((NUM_OPENSTACK_COMPUTE_NODES / NUM_OPENSTACK_SITES)) - SITE_INDEX=$((((i - 1) / NUM_COMPUTES_PER_SITE) + 1)) # We need the site index to infer the control node IP for this compute COMPUTEIP=OPENSTACK_COMPUTE_NODE_${i}_IP - CONTROLIP=OPENSTACK_CONTROL_NODE_${SITE_INDEX}_IP + CONTROLIP=OPENSTACK_CONTROL_NODE_1_IP echo "Configure the stack of the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${!COMPUTEIP}" scp ${WORKSPACE}/disable_firewall.sh "${!COMPUTEIP}:/tmp" ${SSH} "${!COMPUTEIP}" "sudo bash /tmp/disable_firewall.sh" @@ -828,11 +877,17 @@ for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do scp ${WORKSPACE}/hosts_file ${!COMPUTEIP}:/tmp/hosts scp ${WORKSPACE}/get_devstack.sh ${!COMPUTEIP}:/tmp ${SSH} ${!COMPUTEIP} "bash /tmp/get_devstack.sh > /tmp/get_devstack.sh.txt 2>&1" - create_compute_node_local_conf ${!COMPUTEIP} ${!CONTROLIP} ${ODLMGRIP[$SITE_INDEX]} "${ODL_OVS_MGRS[$SITE_INDEX]}" + create_compute_node_local_conf ${!COMPUTEIP} ${!CONTROLIP} ${ODLMGRIP} "${ODL_OVS_MGRS}" scp ${WORKSPACE}/local.conf_compute_${!COMPUTEIP} ${!COMPUTEIP}:/opt/stack/devstack/local.conf echo "Install rdo release to avoid incompatible Package versions" install_rdo_release ${!COMPUTEIP} setup_live_migration_compute ${!COMPUTEIP} ${!CONTROLIP} + [ -n "${OVS_INSTALL}" ] && install_ovs ${!COMPUTEIP} /tmp/ovs_rpms + if [[ "${ENABLE_OS_PLUGINS}" =~ networking-sfc ]]; then + # This should be really done by networking-odl devstack plugin, + # but in the meantime do it ourselves + ssh ${!COMPUTEIP} "sudo ovs-vsctl set Open_vSwitch . external_ids:of-tunnel=true" + fi echo "Stack the compute node ${i} of ${NUM_OPENSTACK_COMPUTE_NODES}: ${!COMPUTEIP}" ssh ${!COMPUTEIP} "cd /opt/stack/devstack; nohup ./stack.sh > /opt/stack/devstack/nohup.out 2>&1 &" ssh ${!COMPUTEIP} "ps -ef | grep stack.sh" @@ -899,122 +954,111 @@ while [ ${in_progress} -eq 1 ]; do done # Further configuration now that stacking is complete. -NUM_COMPUTES_PER_SITE=$((NUM_OPENSTACK_COMPUTE_NODES / NUM_OPENSTACK_SITES)) -for i in `seq 1 ${NUM_OPENSTACK_SITES}`; do - echo "Configure the Control Node" - CONTROLIP=OPENSTACK_CONTROL_NODE_${i}_IP - # Gather Compute IPs for the site - for j in `seq 1 ${NUM_COMPUTES_PER_SITE}`; do - COMPUTE_INDEX=$(((i-1) * NUM_COMPUTES_PER_SITE + j)) - IP_VAR=OPENSTACK_COMPUTE_NODE_${COMPUTE_INDEX}_IP - COMPUTE_IPS[$((j-1))]=${!IP_VAR} - done +echo "Configure the Control Node" +CONTROLIP=OPENSTACK_CONTROL_NODE_1_IP +# Gather Compute IPs for the site +for i in `seq 1 ${NUM_OPENSTACK_COMPUTE_NODES}`; do + IP_VAR=OPENSTACK_COMPUTE_NODE_${i}_IP + COMPUTE_IPS[$((i-1))]=${!IP_VAR} +done - echo "sleep for 60s and print hypervisor-list" - sleep 60 - ${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list" - # in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors - # will be the same as the number of openstack systems. However, if we are doing multinode openstack then the - # assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors - # is one less than the total number of openstack systems - if [ $((NUM_OPENSTACK_SYSTEM / NUM_OPENSTACK_SITES)) -eq 1 ]; then - expected_num_hypervisors=1 - else - expected_num_hypervisors=${NUM_COMPUTES_PER_SITE} - if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then - expected_num_hypervisors=$((expected_num_hypervisors + 1)) - fi - fi - num_hypervisors=$(${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r") - if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then - echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors" - exit 1 +echo "sleep for 60s and print hypervisor-list" +sleep 60 +${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; nova hypervisor-list" +# in the case that we are doing openstack (control + compute) all in one node, then the number of hypervisors +# will be the same as the number of openstack systems. However, if we are doing multinode openstack then the +# assumption is we have a single control node and the rest are compute nodes, so the number of expected hypervisors +# is one less than the total number of openstack systems +if [ ${NUM_OPENSTACK_SYSTEM} -eq 1 ]; then + expected_num_hypervisors=1 +else + expected_num_hypervisors=${NUM_OPENSTACK_COMPUTE_NODES} + if [ "$(is_openstack_feature_enabled n-cpu)" == "1" ]; then + expected_num_hypervisors=$((expected_num_hypervisors + 1)) fi +fi +num_hypervisors=$(${SSH} ${!CONTROLIP} "cd /opt/stack/devstack; source openrc admin admin; openstack hypervisor list -f value | wc -l" | tail -1 | tr -d "\r") +if ! [ "${num_hypervisors}" ] || ! [ ${num_hypervisors} -eq ${expected_num_hypervisors} ]; then + echo "Error: Only $num_hypervisors hypervisors detected, expected $expected_num_hypervisors" + exit 1 +fi + +# External Network +echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..." +# FIXME Should there be a unique gateway IP and devstack index for each site? +devstack_index=1 +for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do + # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack + ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE" + ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))" +done - # Gather Compute IPs for the site - for j in `seq 1 ${NUM_COMPUTES_PER_SITE}`; do - COMPUTE_INDEX=$(((i-1) * NUM_COMPUTES_PER_SITE + j)) - IP_VAR=OPENSTACK_COMPUTE_NODE_${COMPUTE_INDEX}_IP - COMPUTE_IPS[$((j-1))]=${!IP_VAR} +# ipsec support +if [ "${IPSEC_VXLAN_TUNNELS_ENABLED}" == "yes" ]; then + # shellcheck disable=SC2206 + ALL_NODES=(${!CONTROLIP} ${COMPUTE_IPS[*]}) + for ((inx_ip1=0; inx_ip1<$((${#ALL_NODES[@]} - 1)); inx_ip1++)); do + for ((inx_ip2=$((inx_ip1 + 1)); inx_ip2<${#ALL_NODES[@]}; inx_ip2++)); do + KEY1=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64) + KEY2=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64) + ID=0x$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8) + ip1=${ALL_NODES[$inx_ip1]} + ip2=${ALL_NODES[$inx_ip2]} + ${SSH} $ip1 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2" + ${SSH} $ip1 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2" + ${SSH} $ip1 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir out tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport" + ${SSH} $ip1 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir in tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport" + + ${SSH} $ip2 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2" + ${SSH} $ip2 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2" + ${SSH} $ip2 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir out tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport" + ${SSH} $ip2 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir in tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport" + done done - # External Network - echo "prepare external networks by adding vxlan tunnels between all nodes on a separate bridge..." - # FIXME Should there be a unique gateway IP and devstack index for each site? - devstack_index=1 for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do - # FIXME - Workaround, ODL (new netvirt) currently adds PUBLIC_BRIDGE as a port in br-int since it doesn't see such a bridge existing when we stack - ${SSH} $ip "sudo ovs-vsctl --if-exists del-port br-int $PUBLIC_BRIDGE" - ${SSH} $ip "sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE other-config:disable-in-band=true other_config:hwaddr=f6:00:00:ff:01:0$((devstack_index++))" + echo "ip xfrm configuration for node $ip:" + ${SSH} $ip "sudo ip xfrm policy list" + ${SSH} $ip "sudo ip xfrm state list" done +fi - # ipsec support - if [ "${IPSEC_VXLAN_TUNNELS_ENABLED}" == "yes" ]; then - # shellcheck disable=SC2206 - ALL_NODES=(${!CONTROLIP} ${COMPUTE_IPS[*]}) - for ((inx_ip1=0; inx_ip1<$((${#ALL_NODES[@]} - 1)); inx_ip1++)); do - for ((inx_ip2=$((inx_ip1 + 1)); inx_ip2<${#ALL_NODES[@]}; inx_ip2++)); do - KEY1=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64) - KEY2=0x$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64) - ID=0x$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8) - ip1=${ALL_NODES[$inx_ip1]} - ip2=${ALL_NODES[$inx_ip2]} - ${SSH} $ip1 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2" - ${SSH} $ip1 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2" - ${SSH} $ip1 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir out tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport" - ${SSH} $ip1 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir in tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport" - - ${SSH} $ip2 "sudo ip xfrm state add src $ip2 dst $ip1 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2" - ${SSH} $ip2 "sudo ip xfrm state add src $ip1 dst $ip2 proto esp spi $ID reqid $ID mode transport auth sha256 $KEY1 enc aes $KEY2" - ${SSH} $ip2 "sudo ip xfrm policy add src $ip2 dst $ip1 proto udp dir out tmpl src $ip2 dst $ip1 proto esp reqid $ID mode transport" - ${SSH} $ip2 "sudo ip xfrm policy add src $ip1 dst $ip2 proto udp dir in tmpl src $ip1 dst $ip2 proto esp reqid $ID mode transport" - done - done - - for ip in ${!CONTROLIP} ${COMPUTE_IPS[*]}; do - echo "ip xfrm configuration for node $ip:" - ${SSH} $ip "sudo ip xfrm policy list" - ${SSH} $ip "sudo ip xfrm state list" - done - fi - - # Control Node - PUBLIC_BRIDGE will act as the external router - # Parameter values below are used in integration/test - changing them requires updates in intergration/test as well - EXTNET_GATEWAY_IP="10.10.10.250" - EXTNET_INTERNET_IP="10.9.9.9" - EXTNET_PNF_IP="10.10.10.253" - ${SSH} ${!CONTROLIP} "sudo ifconfig ${PUBLIC_BRIDGE} up ${EXTNET_GATEWAY_IP}/24" - - # Control Node - external net PNF simulation +# Control Node - PUBLIC_BRIDGE will act as the external router +# Parameter values below are used in integration/test - changing them requires updates in intergration/test as well +EXTNET_GATEWAY_IP="10.10.10.250" +EXTNET_INTERNET_IP="10.9.9.9" +EXTNET_PNF_IP="10.10.10.253" +${SSH} ${!CONTROLIP} "sudo ifconfig ${PUBLIC_BRIDGE} up ${EXTNET_GATEWAY_IP}/24" + +# Control Node - external net PNF simulation +${SSH} ${!CONTROLIP} " + sudo ip netns add pnf_ns; + sudo ip link add pnf_veth0 type veth peer name pnf_veth1; + sudo ip link set pnf_veth1 netns pnf_ns; + sudo ip link set pnf_veth0 up; + sudo ip netns exec pnf_ns ifconfig pnf_veth1 up ${EXTNET_PNF_IP}/24; + sudo ovs-vsctl add-port ${PUBLIC_BRIDGE} pnf_veth0; +" + +# Control Node - external net internet address simulation +${SSH} ${!CONTROLIP} " + sudo ip tuntap add dev internet_tap mode tap; + sudo ifconfig internet_tap up ${EXTNET_INTERNET_IP}/24; +" + +# Computes +compute_index=1 +for compute_ip in ${COMPUTE_IPS[*]}; do + # Tunnel from controller to compute + COMPUTEPORT=compute$(( compute_index++ ))_vxlan ${SSH} ${!CONTROLIP} " - sudo ip netns add pnf_ns; - sudo ip link add pnf_veth0 type veth peer name pnf_veth1; - sudo ip link set pnf_veth1 netns pnf_ns; - sudo ip link set pnf_veth0 up; - sudo ip netns exec pnf_ns ifconfig pnf_veth1 up ${EXTNET_PNF_IP}/24; - sudo ovs-vsctl add-port ${PUBLIC_BRIDGE} pnf_veth0; + sudo ovs-vsctl add-port $PUBLIC_BRIDGE $COMPUTEPORT -- set interface $COMPUTEPORT type=vxlan options:local_ip=${!CONTROLIP} options:remote_ip=$compute_ip options:dst_port=9876 options:key=flow " - - # Control Node - external net internet address simulation - ${SSH} ${!CONTROLIP} " - sudo ip tuntap add dev internet_tap mode tap; - sudo ifconfig internet_tap up ${EXTNET_INTERNET_IP}/24; + # Tunnel from compute to controller + CONTROLPORT="control_vxlan" + ${SSH} $compute_ip " + sudo ovs-vsctl add-port $PUBLIC_BRIDGE $CONTROLPORT -- set interface $CONTROLPORT type=vxlan options:local_ip=$compute_ip options:remote_ip=${!CONTROLIP} options:dst_port=9876 options:key=flow " - - # Computes - compute_index=1 - for compute_ip in ${COMPUTE_IPS[*]}; do - # Tunnel from controller to compute - COMPUTEPORT=compute$(( compute_index++ ))_vxlan - ${SSH} ${!CONTROLIP} " - sudo ovs-vsctl add-port $PUBLIC_BRIDGE $COMPUTEPORT -- set interface $COMPUTEPORT type=vxlan options:local_ip=${!CONTROLIP} options:remote_ip=$compute_ip options:dst_port=9876 options:key=flow - " - # Tunnel from compute to controller - CONTROLPORT="control_vxlan" - ${SSH} $compute_ip " - sudo ovs-vsctl add-port $PUBLIC_BRIDGE $CONTROLPORT -- set interface $CONTROLPORT type=vxlan options:local_ip=$compute_ip options:remote_ip=${!CONTROLIP} options:dst_port=9876 options:key=flow - " - done done if [ "${ENABLE_HAPROXY_FOR_NEUTRON}" == "yes" ]; then @@ -1125,7 +1169,6 @@ for suite in ${SUITES}; do -v JENKINS_WORKSPACE:${WORKSPACE} \ -v NEXUSURL_PREFIX:${NEXUSURL_PREFIX} \ -v NUM_ODL_SYSTEM:${NUM_ODL_SYSTEM} \ - -v NUM_OPENSTACK_SITES:${NUM_OPENSTACK_SITES} \ -v NUM_OS_SYSTEM:${NUM_OPENSTACK_SYSTEM} \ -v NUM_TOOLS_SYSTEM:${NUM_TOOLS_SYSTEM} \ -v ODL_SNAT_MODE:${ODL_SNAT_MODE} \