- Removed policy definition from within odl_gbp and pused to separate policy_config file. Also made necessary
changes to odl_gbp, testOfOverlay to facilitate that and the removal of mininet
- Added helper scripts.
Signed-off-by: Keith Burns (alagalah) <alagalah@gmail.com>
--- /dev/null
+GroupBasedPolicy Proof Of Concept Scripts
+
+1. Introduction
+
+This instance of GroupBasedPolicy "Proof of Concept" or Demo utilizes:
+- Two Ubuntu 14.04 LTS VMs on a host-net (using VirtualBox)
+- Docker 1.0.1 (for guests)
+- OpenVSwitch 2.3.0 (running natively on Ubuntu 14.04LTS)
+
+It mimics the same policy as the mininet example, that is:
+- Two EndPointGroups (EPG)
+ 1. Clients
+ 2. WebServers
+- 4 guests per EPG
+- Contract allowing HTTP Client -> WebServers and PING Client <-> WebServers but
+ HTTP WebServers -//-> Client (ie disallowed)
+
+ 2. Files
+ - infrastructure_config:
+ Contains configuration of OVS and docker guests. There is a default image, but this can be over-ridden.
+ - policy_config:
+ The policy is set here. It is important to note that this file needs to be on both VMs
+ - infrastructure_launch:
+ Launches the docker containers and configures switches
+ - odl_gbp:
+ Library of functions for performing operations on GBP policy entities
+ - testOfOverlay:
+ Processes policy and guests. It is important that one of the VMs launches this script with "--policy" much like the mininet POC
+ - start-poc.sh:
+ Cleans up any existing docker and OVS instances (using "mn -c" for quick OVS clean up. If this is not an option on your VM then pursue stopping OVS, removing conf.db, starting OVS)
+
+ 3. Usage
+ - Always run from root. ie sudo bash
+ - Edit infrastructure_config.py with the IP address of the VM for each switch, edit start-poc.sh with your ODL controller IP.
+
+
+++ /dev/null
-
-
-TENANT="f5c7d344-d1c7-4208-8531-2c2693657e12"
-L3CTX="f2311f52-890f-4095-8b85-485ec8b92b3c"
-L2BD="70aeb9ea-4ca1-4fb9-9780-22b04b84a0d6"
-
-L2FD1="252fbac6-bb6e-4d16-808d-6f56d20e5cca"
-EG1="1eaf9a67-a171-42a8-9282-71cf702f61dd"
-L2FD2="cb5249bb-e896-45be-899d-4cdd9354b58e"
-EG2="e593f05d-96be-47ad-acd5-ba81465680d5"
-
-CONTRACT="22282cca-9a13-4d0c-a67e-a933ebb0b0ae"
-
-# Config for switches, tunnelIP is the local IP address.
-switches = [{'name': 's1',
- 'tunnelIp': '192.168.56.30',
- 'dpid': '1'},
- {'name': 's2',
- 'tunnelIp': '192.168.56.32',
- 'dpid': '2'}]
-
-defaultContainerImage='alagalah/odlpoc_ovs230'
-
-hosts = [{'name': 'h35_2',
- 'mac': '00:00:00:00:35:02',
- 'ip': '10.0.35.2/24',
- 'switch': 's1',
- 'tenant': TENANT,
- 'endpointGroup': EG1},
- {'name': 'h35_3',
- 'ip': '10.0.35.3/24',
- 'mac': '00:00:00:00:35:03',
- 'switch': 's1',
- 'tenant': TENANT,
- 'endpointGroup': EG1},
- {'name': 'h35_4',
- 'ip': '10.0.35.4/24',
- 'mac': '00:00:00:00:35:04',
- 'switch': 's2',
- 'tenant': TENANT,
- 'endpointGroup': EG1},
- {'name': 'h35_5',
- 'ip': '10.0.35.5/24',
- 'mac': '00:00:00:00:35:05',
- 'switch': 's2',
- 'tenant': TENANT,
- 'endpointGroup': EG1},
- {'name': 'h36_2',
- 'ip': '10.0.36.2/24',
- 'mac': '00:00:00:00:36:02',
- 'switch': 's1',
- 'tenant': TENANT,
- 'endpointGroup': EG2},
- {'name': 'h36_3',
- 'ip': '10.0.36.3/24',
- 'mac': '00:00:00:00:36:03',
- 'switch': 's1',
- 'tenant': TENANT,
- 'endpointGroup': EG2},
- {'name': 'h36_4',
- 'ip': '10.0.36.4/24',
- 'mac': '00:00:00:00:36:04',
- 'switch': 's2',
- 'tenant': TENANT,
- 'endpointGroup': EG2},
- {'name': 'h36_5',
- 'ip': '10.0.36.5/24',
- 'mac': '00:00:00:00:36:05',
- 'switch': 's2',
- 'tenant': TENANT,
- 'endpointGroup': EG2}]
-
-contracts = [{'consumer': EG1,
- 'provider': EG2,
- 'id': CONTRACT}]
docker stop -t=1 $(docker ps -a -q)
docker rm $(docker ps -a -q)
+mn -c
-#===============================================================================
-# Containers are created from the config below. Basic structure is:
-# Container
-# - basic container information such as it's IP it uses to talk to host etc
-# - host ports: these are EPs in the policy_config.py file. Processes such as
-# a simple socket server, HTTPD, database etc can be run in the container.
-# - tunnel ports are what make up the topology, currently the remote_ip points
-# to the docker container IP_address
-#===============================================================================
+# Config for switches, tunnelIP is the local IP address.
+switches = [{'name': 's1',
+ 'tunnelIp': '192.168.56.30',
+ 'dpid': '1'},
+ {'name': 's2',
+ 'tunnelIp': '192.168.56.32',
+ 'dpid': '2'}]
-OPENDAYLIGHT_CONTROLLER_IP="192.168.56.1"
-OPENDAYLIGHT_CONTROLLER_PORT=6653
+defaultContainerImage='alagalah/odlpoc_ovs230'
-#TODO: modify script to leverage pipework for multiple host bridges/remote systems, as well as making 172.17.0.0/16 configurable
-#TODO: change remote IP to point to another container by container "name" and resolve it's IP Address
-#TODO: Write a GUI that can instantiate these values.
-#TODO: Change script to automatically pull image from docker repo.
+#Note that tenant name and endpointGroup name come from policy_config.py
+
+hosts = [{'name': 'h35_2',
+ 'mac': '00:00:00:00:35:02',
+ 'ip': '10.0.35.2/24',
+ 'switch': 's1',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'client'},
+ {'name': 'h35_3',
+ 'ip': '10.0.35.3/24',
+ 'mac': '00:00:00:00:35:03',
+ 'switch': 's1',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'client'},
+ {'name': 'h35_4',
+ 'ip': '10.0.35.4/24',
+ 'mac': '00:00:00:00:35:04',
+ 'switch': 's2',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'client'},
+ {'name': 'h35_5',
+ 'ip': '10.0.35.5/24',
+ 'mac': '00:00:00:00:35:05',
+ 'switch': 's2',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'client'},
+ {'name': 'h36_2',
+ 'ip': '10.0.36.2/24',
+ 'mac': '00:00:00:00:36:02',
+ 'switch': 's1',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'webserver'},
+ {'name': 'h36_3',
+ 'ip': '10.0.36.3/24',
+ 'mac': '00:00:00:00:36:03',
+ 'switch': 's1',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'webserver'},
+ {'name': 'h36_4',
+ 'ip': '10.0.36.4/24',
+ 'mac': '00:00:00:00:36:04',
+ 'switch': 's2',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'webserver'},
+ {'name': 'h36_5',
+ 'ip': '10.0.36.5/24',
+ 'mac': '00:00:00:00:36:05',
+ 'switch': 's2',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'webserver'}]
-containers = [{
- "name" : "s1", #synonymous with switch name
- "image" : "alagalah/odlpoc_ovs230",
- "ip_address" : "172.17.0.101", # IP address of the switch and relies on docker default of 172.17.0.0/16.
- "ip_mask" : "255.255.0.0",
- "host_interface_mac" : "00:00:00:fa:bb:01",
- "dpid" : "0000000000000001", # Must be 16 "bytes" long
- "host_ports" : [
- {"port_name" : "p1", # synonymous with EP
- "port_ip" : "10.1.1.11", # synonymous with EP
- "port_ip_mask" :"255.255.0.0",
- "port_mac" : "de:ad:10:01:01:11",
- "vlan" : None}
-# {"port_name" : "p2", # synonymous with EP
-# "port_ip" : "30.1.1.11", # synonymous with EP
-# "port_ip_mask" :"255.255.0.0",
-# "port_mac" : "de:ad:30:01:01:11",
-# "vlan" : None}
- ],
- "tunnels" : [
- {"type" :"vxlan", #only vxlan supported at moment, may look into ivxlan and GRE later
- "port_name" : "s1_vxlan1", #"vxlan1" is just a string and can be anything you like
- "key" :"flow", #allows us to overload VXLAN tunnel using VNIs if needed
- "openflow_port" : "10", #creates an OF port in datapath to map flows to tunnel
- "remote_ip" : "172.17.0.103" #Optional... TODO firx it.
- }
- ]
- },
- {"name" : "s2",
- "image" : "alagalah/odlpoc_ovs230",
- "ip_address" : "172.17.0.102",
- "ip_mask" : "255.255.0.0",
- "host_interface_mac" : "00:00:00:fa:bb:02",
- "dpid" : "0000000000000002",
- "host_ports" : [
- {"port_name" : "p1",
- "port_ip" : "20.1.1.11",
- "port_ip_mask" :"255.255.0.0",
- "port_mac" : "de:ad:20:01:01:11",
- "vlan" : None}
-# {"port_name" : "p2",
-# "port_ip" : "20.1.1.12",
-# "port_ip_mask" :"255.255.0.0",
-# "port_mac" : "de:ad:20:01:01:12",
-# "vlan" : None}
- ],
- "tunnels" : [
- {"type" :"vxlan",
- "port_name" : "s2_vxlan1",
- "key" :"flow",
- "openflow_port" : "10",
- "remote_ip" : "172.17.0.101"
- }
- ]
- }] #end containers
+#!/usr/bin/python
+
import re
import time
import sys
import ipaddr
from subprocess import call
from subprocess import check_output
-from config import *
+from infrastructure_config import *
def addController(sw, ip):
call(['ovs-vsctl', 'set-controller', sw, 'tcp:%s:6653' % ip ])
print 'DPID: %s is too long' % dpid
sys.exit(3)
call(['ovs-vsctl','set','bridge', name,'other-config:datapath-id=%s'%dpid])
-# return switch
def addHost(net, switch, name, ip, mac):
containerID=launchContainer()
containerImage=host['container_image']
containerID=launchContainer(host,containerImage)
connectContainerToSwitch(sw['name'],host,containerID,str(ports))
- host['port']='openflow%s:%s' %(str(sw['dpid']),str(ports)) # alagalah - this is such a horrible hack TODO: Find a more elegant way
+ host['port']=str(ports) # alagalah - this is such a horrible hack TODO: Find a more elegant way
+
- # ODL is very fragile so let's give it some time
-# time.sleep(1)
# This is a workaround for a bug encountered during
# the Helium release. Setting the vSwitch from 1.0
# to 1.3 while it was connected to the controller
addController(sw['name'], contIP)
return dpid
-# '''
-# Created on Oct 16, 2014
-#
-# @author: alagalah
-# '''
-#
-# import infrastructure_config
-# import os
-# import sys
-# import argparse
-# import io
-# import subprocess
-# import ipaddr
-#
-#
-# if __name__ == '__main__':
-# '''
-# Usage:
-# 1. Assumes well formed infrastructure_config.py file to build containers etc
-# 2. If used with --dockerfiles_only True, only the startup scripts and docker files will
-# be created, without building an image or launching the containers.
-# 3. If used with --directory {directory} creates all files in directory
-# '''
-# DEBUG=False
-#
-# #Check for parameters to see if --dockerfiles_only True is passed
-# parser = argparse.ArgumentParser()
-# parser.add_argument('--dockerfiles_only', help='Does not launch containers if set to True',default=False)
-# parser.add_argument('--directory', help='Base directory to create dockerfiles and shellscripts',default='.')
-# args = parser.parse_args()
-#
-# LAUNCH=True
-# if (args.dockerfiles_only):
-# LAUNCH=False
-#
-# if (args.directory):
-# os.chdir(args.directory)
-# print 'Working directory: ',os.getcwd()
-#
-# WORKING_DIRECTORY=os.getcwd()
-#
-# #===========================================================================
-# # For each container, the following steps are executed:
-# # 1. Create a shell script locally that will run inside each container.
-# # It is called 'startup-{switchname}.sh that:
-# # - Stops, cleans, starts OVS
-# # - Executes ovs-vsctl commands to create ports and tunnels
-# # - Assigns IP addresses to container interfaces
-# # - Fires off a bash script
-# # 2. Create a Dockerfile that:
-# # - Leverages the base image from infrastructure_config (FROM:)
-# # - ADD startup-{switchname}.sh which copies file from local to container
-# # - RUN chmod +x startup-{switchname}.sh
-# # - CMD ./startup-{switchname}.sh
-# # 3. Build a docker image called {switchname} using the Dockerfile-{switchname}.
-# # 4. Run the docker image with flags '-i -t -d --privileged=True'
-# #
-# #===========================================================================
-# docker_commands=[]
-# for container in infrastructure_config.containers:
-# docker_commands_container=[]
-# DOCKERIMAGE_NAME=str(container['name']).lower()
-# SCRIPT_NAME='startup-'+DOCKERIMAGE_NAME+'.sh'
-# #DOCKERFILE_NAME='Dockerfile-'+DOCKERIMAGE_NAME
-# DOCKERFILE_NAME='Dockerfile'
-#
-# if DEBUG: print "DEBUG: Processing container ",DOCKERIMAGE_NAME, SCRIPT_NAME, DOCKERIMAGE_NAME
-#
-# # Create shell script file to execute following
-# shell_lines=[]
-#
-# shell_lines.append(" ovsdb-server --remote=punix:/usr/local/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach")
-# shell_lines.append("ovs-vswitchd --pidfile --detach")
-# # shell_lines.append('service openvswitch-switch stop') #Kill OVS process if running (shouldn't be)
-# # shell_lines.append('rm /etc/openvswitch/conf.db') #Remove any config hanging around
-# # shell_lines.append('service openvswitch-switch start') #Restart all fresh and squeaky clean
-# shell_lines.append('/bin/sh export PS1="'+DOCKERIMAGE_NAME+'"') #Set the prompt to the switchname for usability
-# shell_lines.append('ovs-vsctl add-br '+DOCKERIMAGE_NAME) #Add the bridge
-# shell_lines.append('ovs-vsctl set bridge %s other-config:datapath-id=%s' % (DOCKERIMAGE_NAME,container["dpid"])) #Set DPID
-# # shell_lines.append('ovs-vsctl set bridge %s protocols="OpenFlow13,OpenFlow10"' % DOCKERIMAGE_NAME) #Set OF13
-# # shell_lines.append('ovs-vsctl set bridge %s datapath_type=netdev '+DOCKERIMAGE_NAME) #Alagalah - experimental
-# shell_lines.append('ovs-vsctl set bridge %s protocols="OpenFlow13"' % DOCKERIMAGE_NAME) #Set OF13
-#
-#
-# # Adding internal ports
-# for hostport in container["host_ports"]:
-# if DEBUG: print "DEBUG: Processing port: ",hostport["port_name"],hostport.keys()
-#
-#
-# port_name=str(hostport["port_name"])
-# port_ip=str(hostport["port_ip"])
-# port_ip_mask=str(hostport["port_ip_mask"])
-# port_mac=str(hostport["port_mac"])
-#
-# shell_lines.append('ovs-vsctl add-port %s %s -- set interface %s type=internal' % (DOCKERIMAGE_NAME, port_name, port_name)) #Add hostport to switch as internal hostport
-# shell_lines.append('ifconfig '+port_name+' down') #Down it
-# shell_lines.append('ifconfig '+port_name+' hw ether '+port_mac) #Set the MAC address
-# shell_lines.append('ifconfig '+port_name+' '+port_ip+' netmask '+port_ip_mask) #Set the IP address
-# shell_lines.append('ifconfig '+port_name+' up') #Up it
-#
-# #Reset the docker default interface address which is Eth0
-# #TODO: Perhaps look to add pipeworks functionality of a bridge/hostport just for POC/Demos
-# shell_lines.append('ifconfig eth0 down') #Down it
-# shell_lines.append('ifconfig eth0 hw ether '+str(container["host_interface_mac"])) #Set the MAC address
-# shell_lines.append('ifconfig eth0 ' + str(container["ip_address"])+' netmask '+str(container["ip_mask"])) #Set the IP address
-# shell_lines.append('ifconfig eth0 up') #Up it
-#
-#
-# # Adding tunnel ports
-# for tunnel in container["tunnels"]:
-# tunnel_type=tunnel["type"] #only supported at moment, may look into ivxlan and GRE later
-# port_name=tunnel["port_name"]
-# key=tunnel["key"] #allows us to overload VXLAN tunnel using VNIs if needed if using flow
-# remote_ip=tunnel["remote_ip"]
-# openflow_port=tunnel["openflow_port"] #creates an OF port in datapath to map flows to tunnel
-#
-# # #Set using remote tunnel destination
-# # shell_lines.append('ovs-vsctl add-port %s %s -- set interface %s type=%s option:remote_ip=%s option:key=%s ofport_request=%s' %
-# # (DOCKERIMAGE_NAME,port_name,port_name,tunnel_type,remote_ip,key,openflow_port))
-#
-# # Setting setting source tunnel only, no OFPORT REQUEST
-# shell_lines.append('ovs-vsctl add-port %s %s -- set interface %s type=%s option:remote_ip=flow option:key=%s option:source_ip=%s' %
-# (DOCKERIMAGE_NAME,port_name,port_name,tunnel_type,key,container["ip_address"]))
-#
-# #####################
-# # WARNING! THIS IS A BIT HACKY! UNTIL CONFIG CHANGES TO USE PIPEWORKS THIS SHOULD BE OK --- REALLY RELIES ON SINGLE PORT!!! OR LAST PORT SET!
-# shell_lines.append('ip route add %s via 172.17.42.1' % infrastructure_config.OPENDAYLIGHT_CONTROLLER_IP)
-# nw = ipaddr.IPv4Network(port_ip+"/"+port_ip_mask)
-# snet = "{}/{}".format(nw.network + 1, nw.prefixlen)
-# router = "{}".format(nw.network + 1)
-# shell_lines.append('ip route default via %s' % router) #alagalah HEINOUS
-#
-# # END WARNING
-# #####################
-# # Want to register with the controller last.
-# shell_lines.append('ovs-vsctl set-controller %s tcp:%s:%s' % (DOCKERIMAGE_NAME,infrastructure_config.OPENDAYLIGHT_CONTROLLER_IP,infrastructure_config.OPENDAYLIGHT_CONTROLLER_PORT)) #Set the CONTROLLER
-# # shell_lines.append('ovs-vsctl set-manager %s tcp:%s:%s' % (DOCKERIMAGE_NAME,infrastructure_config.OPENDAYLIGHT_CONTROLLER_IP,"6640")) #Set the CONTROLLER
-#
-# shell_lines.append('/bin/bash') #Leave a bash shell running else it dies... could also do "tail /dev/null"
-#
-# #Create the shell script
-# #These scripts only work correctly if docker has its own folder per container
-# directory=os.path.join(WORKING_DIRECTORY,DOCKERIMAGE_NAME)
-# if not os.path.exists(directory):
-# os.makedirs(directory)
-# os.chdir(directory)
-# with open(SCRIPT_NAME, 'w') as f:
-# for s in shell_lines:
-# f.write(s + '\n')
-# print "Created script ",SCRIPT_NAME
-#
-# #===============================================================================
-# # Step 1 COMPLETE, NOW FOR Step 2 - Creating the Docker file
-# #===============================================================================
-#
-# dockerfile_lines=[]
-#
-# dockerfile_lines.append("FROM %s" % container["image"])
-# dockerfile_lines.append("ADD %s /" % SCRIPT_NAME)
-# dockerfile_lines.append("RUN chmod +x %s" % SCRIPT_NAME)
-# dockerfile_lines.append("CMD ./%s" % SCRIPT_NAME)
-#
-# #Create the Dockerfile
-# with open(DOCKERFILE_NAME, 'w') as f:
-# for s in dockerfile_lines:
-# f.write(s + '\n')
-# print "Created docker file ",DOCKERFILE_NAME
-#
-# #=======================================================================
-# # Steps 4 & 5 create the docker CLI commands to BUILD (using the Dockerfile)
-# # and RUN the image, which automatically runs the startup-{switch}.sh script
-# #=======================================================================
-# docker_commands_container.append(os.getcwd()) # This is how we know what directory to go to
-# docker_commands_container.append('sudo docker build -t %s .' % DOCKERIMAGE_NAME)
-# docker_commands_container.append('sudo docker run -t -i -d --privileged=true --name=%s %s' % (DOCKERIMAGE_NAME,DOCKERIMAGE_NAME))
-# docker_commands.append(docker_commands_container)
-#
-# # Only execute docker launch commands if --dockerfiles_only is NOT set to True
-# if LAUNCH:
-# for command in docker_commands:
-# print command
-# os.chdir(command[0])
-# print "Changed directory to ",os.getcwd()
-# os.system(command[1])
-# os.system(command[2])
+
+++ /dev/null
-
-from mininet.topo import Topo
-from mininet.node import RemoteController
-from mininet.net import Mininet
-from mininet.util import dumpNodeConnections
-from mininet.log import setLogLevel
-from mininet.node import Node
-
-import re
-import time
-from subprocess import call
-from subprocess import check_output
-
-def addController(sw, ip):
- call(['ovs-vsctl', 'set-controller', sw, 'tcp:%s:6653' % ip ])
-
-def addSwitch(net, name, dpid=None):
- switch = net.addSwitch(name, dpid=dpid)
- return switch
-
-def addHost(net, switch, name, ip, mac):
- host = net.addHost(name, ip=ip, mac=mac)
- net.addLink(host, switch)
-
-def setOFVersion(sw, version='OpenFlow13,OpenFlow12,OpenFlow10'):
- call(['ovs-vsctl', 'set', 'bridge', sw, 'protocols={}'.format(version)])
-
-def addTunnel(sw, sourceIp=None):
- ifaceName = '{}_vxlan0'.format(sw)
- cmd = ['ovs-vsctl', 'add-port', sw, ifaceName,
- '--', 'set', 'Interface', ifaceName,
- 'type=vxlan',
- 'options:remote_ip=flow',
- 'options:key=flow']
- if sourceIp is not None:
- cmd.append('options:source_ip={}'.format(sourceIp))
- call(cmd)
-
-#ovs-ofctl dump-ports-desc s1 -OOpenFlow13
-
-def startMininet(switches, hosts, contIP='127.0.0.1'):
- setLogLevel('info')
-
- net = Mininet(controller=None,
- autoSetMacs=True,
- listenPort=6634)
-
- swobjs = {}
- swports = {}
-
- for sw in switches:
- swobj = addSwitch(net, sw['name'])
- swobjs[sw['name']] = swobj
- swports[sw['name']] = 0;
- for host in hosts:
- if host['switch'] not in swobjs:
- continue
- sw = swobjs[host['switch']]
- swports[host['switch']] += 1;
- port = swports[host['switch']]
- addHost(net, sw, host['name'], host['ip'], host['mac'])
- host['port'] = port
-
- try:
- net.start()
- for sw in switches:
- addTunnel(sw['name'], sw['tunnelIp'])
-
- for host in net.hosts:
- gw = re.sub(r'.\d+$', ".1", host.IP())
- host.cmd('route add default gw {}'.format(gw))
-
- # ODL is very fragile so let's give it some time
- time.sleep(1)
-
- # This is a workaround for a bug encountered during
- # the Helium release. Setting the vSwitch from 1.0
- # to 1.3 while it was connected to the controller
- # exposed a bug in the openflowplugin, which resulted
- # in the controller missing some of the ports on the
- # vswitch. This change avoids the bug by switching
- # the version before connecting the switch to the
- # controller.
- for sw in switches:
- setOFVersion(sw['name'])
- addController(sw['name'], contIP)
-
- return net
- except Exception, e:
- net.stop()
- raise e
import requests,json
+import uuid
from requests.auth import HTTPBasicAuth
USERNAME='admin'
tenants = {}
-# This is where some of the policy is set, subject and classifiers
+def initialize_tenant(tenant):
+ # All tenants must have unique ID
+ if not tenant.has_key('id'):
+ print "No ID, initializing"
+ tenant['id']=str(uuid.uuid4())
+
+ # If the tenant has already been initialised, we must assume that the stored copy in
+ # tenants dict is more up to date.
+ if tenant['id'] in tenants:
+ return tenants[tenant['id']]
+
+ # Dictionary items that must exist
+ data = {
+ "l3-context": [],
+ "l2-bridge-domain": [],
+ "l2-flood-domain": [],
+ "subnet": [],
+ "endpoint-group": [],
+ "contract": [],
+ "subject-feature-instances": {}
+ }
+
+ # This merges the base data dictionary with the passed tenant dictionary, and assumes that
+ # over-riding anything in data with tenant is preferred, if not, order must be reversed
+ mergedData = dict(data.items() + tenant.items())
+ tenants[mergedData['id']] = mergedData
+ return mergedData
+
def get_tenant(tenantId):
if tenantId in tenants:
return tenants[tenantId]
"subnet": [],
"endpoint-group": [],
"contract": [],
- "subject-feature-instances": {
- "classifier-instance": [
- {"name": "http-dest",
- "classifier-definition-id": "4250ab32-e8b8-445a-aebb-e1bd2cdd291f",
- "parameter-value": [
- {"name": "type",
- "string-value": "TCP"},
- {"name": "destport",
- "int-value": "80"}
- ]},
- {"name": "http-src",
- "classifier-definition-id": "4250ab32-e8b8-445a-aebb-e1bd2cdd291f",
- "parameter-value": [
- {"name": "type",
- "string-value": "TCP"},
- {"name": "sourceport",
- "int-value": "80"}
- ]},
- {"name": "icmp",
- "classifier-definition-id": "79c6fdb2-1e1a-4832-af57-c65baf5c2335",
- "parameter-value": [
- {"name": "proto",
- "int-value": "1"}
- ]},
- ]
- }
+ "subject-feature-instances": {}
}
tenants[tenantId] = data
return data
endpoints = []
-def get_ep(tenantId, groupId, l3ctx, ip, l2ctx, mac, sw,port):
+def get_ep(tenantId, groupId, l3ctx, ip, l2ctx, mac, sw, port):
group = get_epg(tenantId, groupId)
data = {"tenant": tenantId,
"endpoint-group": groupId,
"l3-address": [{"l3-context": l3ctx,
"ip-address": ip}],
"ofoverlay:node-id": "openflow:{}".format(sw),
- "ofoverlay:node-connector-id": "{}".format(port)
+ "ofoverlay:node-connector-id": "openflow:{}:{}".format(sw, port)
}
endpoints.append(data)
return data
nodes.append(data)
return data
-# This is where specifics of the contract are defined. Note: Classifiers are SET in the get_tenant procedure.
-def get_contract(tenantId, pgroupId, cgroupId, contractId):
+def get_contract(tenantId, pgroupIds, cgroupIds, contract):
+#TODO: This assumes a single provider/consumer per contract. Should be able to process list, just
+# note entirely sure if everything should be repeated, or just IDs ??? For now, assuming single
tenant = get_tenant(tenantId)
- pgroup = get_epg(tenantId, pgroupId)
- cgroup = get_epg(tenantId, cgroupId)
- data = {
- "id": contractId,
- "subject": [{"name": "allow-http-subject",
- "rule": [
- {"name": "allow-http-rule",
- "classifier-ref": [
- {"name": "http-dest",
- "direction": "in"},
- {"name": "http-src",
- "direction": "out"}
- ]}
- ]},
- {"name": "allow-icmp-subject",
- "rule": [
- {"name": "allow-icmp-rule",
- "classifier-ref": [
- {"name": "icmp"}
- ]}
- ]}],
- "clause": [{"name": "allow-http-clause",
- "subject-refs": ["allow-http-subject",
- "allow-icmp-subject"]}]
- }
+ pgroup = get_epg(tenantId, pgroupIds[0])
+ cgroup = get_epg(tenantId, cgroupIds[0])
+
+ if not contract.has_key('id'):
+ contract['id']=str(uuid.uuid4())
+ # tenant's contract construct has no idea of "name" so creating a copy of the contract dict,
+ # removing name altogether, and using that
+ data=dict(contract)
+ del data['name']
+
tenant["contract"].append(data)
cgroup["consumer-named-selector"].append({
- "name": "{}-{}-{}".format(pgroupId, cgroupId, contractId),
- "contract": [contractId]
+ "name": "{}-{}-{}".format(pgroupIds[0], cgroupIds[0], data['id']),
+ "contract": [data['id']]
})
pgroup["provider-named-selector"].append({
- "name": "{}-{}-{}".format(pgroupId, cgroupId, contractId),
- "contract": [contractId]
+ "name": "{}-{}-{}".format(pgroupIds[0], cgroupIds[0], data['id']),
+ "contract": [data['id']]
})
return data
print r.text
r.raise_for_status()
-def get(url):
-# headers = {'Content-type': 'application/yang.data+json',
-# 'Accept': 'application/yang.data+json'}
- print "GET %s" % url
-# r = requests.get(url, headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
- r = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD))
- r.raise_for_status()
- return r.json()
-
-def get_operational_nodes_data(contHost):
- return get(OPERATIONAL_NODES_URL % contHost)
-
def register_tenants(contHost):
data = {"policy:tenants": {"tenant": tenants.values()}}
put(REGISTER_TENANTS_URL % contHost, data)
def register_eps(contHost):
for ep in endpoints:
- data = {"input": ep}
- post(REGISTER_EP_URL % contHost, data)
+ data = {"input": ep}
+ post(REGISTER_EP_URL % contHost, data)
def register_nodes(contHost):
data = {"opendaylight-inventory:nodes": {"node": nodes}}
--- /dev/null
+L3CTX='cbe0cc07-b8ff-451d-8171-9eef002a8e80'
+L2BD='7b796915-adf4-4356-b5ca-de005ac410c1'
+# Only one tenant supported at this time.
+tenants = [
+ {'name':'GBPPOC',
+ 'id':'f5c7d344-d1c7-4208-8531-2c2693657e12', #Optional, if you leave this out will be generated
+ 'subject-feature-instances':
+ {'classifier-instance':
+ [
+ {'name': 'http-dest',
+ 'classifier-definition-id': '4250ab32-e8b8-445a-aebb-e1bd2cdd291f',
+ 'parameter-value': [
+ {'name': 'type',
+ 'string-value': 'TCP'},
+ {'name': 'destport',
+ 'int-value': '80'}
+ ]},
+ {'name': 'http-src',
+ 'classifier-definition-id': '4250ab32-e8b8-445a-aebb-e1bd2cdd291f',
+ 'parameter-value': [
+ {'name': 'type',
+ 'string-value': 'TCP'},
+ {'name': 'sourceport',
+ 'int-value': '80'}
+ ]},
+ {'name': 'icmp',
+ 'classifier-definition-id': '79c6fdb2-1e1a-4832-af57-c65baf5c2335',
+ 'parameter-value': [
+ {'name': 'proto',
+ 'int-value': '1'}
+ ]
+ }
+ ]
+ }
+ }
+ ]
+
+contracts = [
+ {'name':'pingall+web',
+ 'id':'22282cca-9a13-4d0c-a67e-a933ebb0b0ae',
+ 'subject': [
+ {'name': 'allow-http-subject',
+ 'rule': [
+ {'name': 'allow-http-rule',
+ 'classifier-ref': [
+ {'name': 'http-dest',
+ 'direction': 'in'},
+ {'name': 'http-src',
+ 'direction': 'out'}
+ ]
+ }
+ ]
+ },
+ {'name': 'allow-icmp-subject',
+ 'rule': [
+ {'name': 'allow-icmp-rule',
+ 'classifier-ref': [
+ {'name': 'icmp'}
+ ]}
+ ]
+ }],
+ 'clause': [
+ {'name': 'allow-http-clause',
+ 'subject-refs': [
+ 'allow-http-subject',
+ 'allow-icmp-subject'
+ ]
+ }
+ ]
+ }]
+endpointGroups = [
+ {'name':'client',
+ 'providesContracts' : [], #List of contract names provided
+ 'consumesContracts' : ['pingall+web'],
+ 'id' : '1eaf9a67-a171-42a8-9282-71cf702f61dd', #Optional, if you leave this out will be generated
+ },
+ {'name':'webserver',
+ 'providesContracts' : ['pingall+web'], #List of contract names provided
+ 'consumesContracts' : [],
+ 'id' : 'e593f05d-96be-47ad-acd5-ba81465680d5', #Optional, if you leave this out will be generated
+ }
+ ]
+
+
+
echo
echo "*** Removing containers... "
echo
-sudo ./docker-clean.sh
+./docker-clean.sh
echo
echo "*** Cleaning up OVS... "
-sudo mn -c
+mn -c
+echo
+echo "Pulling alagalah/odlpoc_ovs230 docker image...edit script for own images"
+echo
+docker pull alagalah/odlpoc_ovs230
echo
echo "Running POC script"
echo
-sudo ./testOfOverlay.py --local s1 --controller ${CONTROLLER}
+./testOfOverlay.py --local s1 --controller ${CONTROLLER}
import infrastructure_launch
import odl_gbp
-#import mininet.cli
import ipaddr
import uuid
import re
import argparse, sys
-from config import *
+import policy_config
+import infrastructure_config
def getSubnet(ip):
nw = ipaddr.IPv4Network(ip)
parser.print_help()
sys.exit(3)
- # switches is a list from config.py, when this script is called with --local (switch) and its present in config, it is added to the conf_switches
+ # switches is a list from infrastructure_config.py, these are the OVS instances
conf_switches = []
if args.local:
- for switch in switches:
+ for switch in infrastructure_config.switches:
if switch['name'] == args.local:
conf_switches = [switch]
break
- # Assuming we have switches defined (and hence conf_switches), start mininet with the "hosts" list also from config.py
- net = None
+ # Assuming we have switches defined (and hence conf_switches), start containers with the "hosts" list also from infrastructure_config.py
if len(conf_switches) > 0:
- dpid=infrastructure_launch.launch(conf_switches, hosts, args.controller)
- try :
- if args.policy:
- for switch in switches:
- # This leverages a global from odl_gbp called "nodes", which appends "data" from this for loop
- odl_gbp.get_node_config(switch['dpid'], switch['tunnelIp'])
- #This also uses the global "nodes" from odl_gbp
- odl_gbp.register_nodes(args.controller)
-
- # TENANT, L3CTX, L2BD are imported from config.py
- # get_tenant looks for the TENANT UUID in a global tenant dictionary in odl_gbp.
- # If TENANT doesn't already exist in that dict. then a bunch of 'default' tenant data is defined, inluding
- # subjects and classifiers (at writing specific to HTTP source/dest and ICMP)
- tenant = odl_gbp.get_tenant(TENANT)
-
- # Layer3 context and Layer BridgeDomain are SET into the tenant{} structure in odl_gbp
- # TODO: (maybe call these set???)
- odl_gbp.get_l3c(TENANT, L3CTX)
- odl_gbp.get_bd(TENANT, L2BD, L3CTX)
-
- # subnets and fds (flood domains)
- subnets = {}
- fds = {}
- # hosts comes from config.py, which contains target switch, IP Address, MAC address, tenant and EPG
- for host in hosts:
- print host
- if args.local and host['switch'] != args.local:
- continue
- nw = ipaddr.IPv4Network(host['ip'])
- snet = "{}/{}".format(nw.network + 1, nw.prefixlen)
- router = "{}".format(nw.network + 1)
-
- if snet not in subnets:
- snid = str(uuid.uuid4())
- fdid = str(uuid.uuid4())
- # Sets flood domain where parent is L2BD from config.py
- fds[fdid] = odl_gbp.get_fd(TENANT, fdid, L2BD)
-
- # sets subnet from tenant, which also includes the flood domain
- subnets[snet] = odl_gbp.get_subnet(TENANT, snid, fdid, snet, router)
-
- # Sets the "network-domain" in global endpointGroups dict in odl_gbp.py
- odl_gbp.get_epg(TENANT, host['endpointGroup'])["network-domain"] = snid
-
- # Creates EP information and appends to endpoint list, a global
- odl_gbp.get_ep(TENANT,
- host['endpointGroup'],
- L3CTX,
- re.sub(r'/\d+$', '', host['ip']),
- L2BD,
- host['mac'],
- dpid,
- host['port'])
-
- # contracts is a global list from config.py.
- # get_contract creates the specific subject, classifiers, rules etc for the contract
- # and appends this to the global tenant list.
- for contract in contracts:
- odl_gbp.get_contract(TENANT,
- contract['provider'], contract['consumer'],
- contract['id'])
-
- # POST to the controller to register tenants
- if args.policy:
- odl_gbp.register_tenants(args.controller)
-
- # POST to controller to register EPS
- # TODO: Should this be done on a per Tenant basis
- odl_gbp.register_eps(args.controller)
-
- if net is not None:
- mininet.cli.CLI(net)
- finally:
- if net is not None:
- net.stop()
+ dpid=infrastructure_launch.launch(conf_switches, infrastructure_config.hosts, args.controller)
+
+ if args.policy:
+ for switch in infrastructure_config.switches:
+ # This leverages a global from odl_gbp called "nodes", which appends "data" from this for loop
+ odl_gbp.get_node_config(switch['dpid'], switch['tunnelIp'])
+ #This also uses the global "nodes" from odl_gbp
+ odl_gbp.register_nodes(args.controller)
+
+ #Only one tenant supported today
+ tenant = policy_config.tenants[0]
+ tenant = odl_gbp.initialize_tenant(tenant)
+ if len(tenant['l3-context']) ==0:
+ print "Setting L3 context"
+ odl_gbp.get_l3c(tenant['id'], policy_config.L3CTX)
+ l3context=tenant['l3-context'][0]['id']
+ if len(tenant['l2-bridge-domain']) == 0:
+ print "Setting L2 Bridge domain"
+ odl_gbp.get_bd(tenant['id'], policy_config.L2BD, tenant['l3-context'][0]['id'])
+ l2bridgeDomain=tenant['l2-bridge-domain'][0]['id']
+ # subnets and fds (flood domains)
+ subnets = {}
+ fds = {}
+ # hosts comes from infrastructure_config.py, which contains target switch, IP Address, MAC address, tenant and EPG
+ for host in infrastructure_config.hosts:
+ if args.local and host['switch'] != args.local:
+ continue
+ nw = ipaddr.IPv4Network(host['ip'])
+ snet = "{}/{}".format(nw.network + 1, nw.prefixlen)
+ router = "{}".format(nw.network + 1)
+
+ if snet not in subnets:
+ snid = str(uuid.uuid4())
+ fdid = str(uuid.uuid4())
+ # Sets flood domain where parent is L2BD from config.py
+ fds[fdid] = odl_gbp.get_fd(tenant['id'], fdid, l2bridgeDomain)
+
+ # sets subnet from tenant, which also includes the flood domain
+ subnets[snet] = odl_gbp.get_subnet(tenant['id'], snid, fdid, snet, router)
+ # Sets the "network-domain" in global endpointGroups dict in odl_gbp.py
+
+ for endpointGroup in policy_config.endpointGroups:
+ if host['endpointGroup'] == endpointGroup['name']:
+ groupId=endpointGroup['id']
+ odl_gbp.get_epg(tenant['id'], groupId)["network-domain"] = snid
+
+ # Creates EP information and appends to endpoint list, a global
+ odl_gbp.get_ep(tenant['id'],
+ groupId,
+ l3context,
+ re.sub(r'/\d+$', '', host['ip']),
+ l2bridgeDomain,
+ host['mac'],
+ dpid,
+ host['port'])
+
+ # Resolve contract names to IDs and add to policy
+ contractConsumerEpgIDs=[]
+ contractProviderEpgIDs=[]
+ for contract in policy_config.contracts:
+ for endpointGroup in policy_config.endpointGroups:
+ if contract['name'] in endpointGroup['consumesContracts']:
+ contractConsumerEpgIDs.append(endpointGroup['id'])
+ if contract['name'] in endpointGroup['providesContracts']:
+ contractProviderEpgIDs.append(endpointGroup['id'])
+
+ odl_gbp.get_contract(tenant['id'],
+ contractProviderEpgIDs,
+ contractConsumerEpgIDs,
+ contract)
+
+ # POST to the controller to register tenants
+ if args.policy:
+ odl_gbp.register_tenants(args.controller)
+
+ # POST to controller to register EPS
+ odl_gbp.register_eps(args.controller)
+