--- /dev/null
+
+
+TENANT="f5c7d344-d1c7-4208-8531-2c2693657e12"
+L3CTX="f2311f52-890f-4095-8b85-485ec8b92b3c"
+L2BD="70aeb9ea-4ca1-4fb9-9780-22b04b84a0d6"
+
+L2FD1="252fbac6-bb6e-4d16-808d-6f56d20e5cca"
+EG1="1eaf9a67-a171-42a8-9282-71cf702f61dd"
+L2FD2="cb5249bb-e896-45be-899d-4cdd9354b58e"
+EG2="e593f05d-96be-47ad-acd5-ba81465680d5"
+
+CONTRACT="22282cca-9a13-4d0c-a67e-a933ebb0b0ae"
+
+# Config for switches, tunnelIP is the local IP address.
+switches = [{'name': 's1',
+ 'tunnelIp': '192.168.56.30',
+ 'dpid': '1'},
+ {'name': 's2',
+ 'tunnelIp': '192.168.56.32',
+ 'dpid': '2'}]
+
+defaultContainerImage='alagalah/odlpoc_ovs230'
+
+hosts = [{'name': 'h35_2',
+ 'mac': '00:00:00:00:35:02',
+ 'ip': '10.0.35.2/24',
+ 'switch': 's1',
+ 'tenant': TENANT,
+ 'endpointGroup': EG1},
+ {'name': 'h35_3',
+ 'ip': '10.0.35.3/24',
+ 'mac': '00:00:00:00:35:03',
+ 'switch': 's1',
+ 'tenant': TENANT,
+ 'endpointGroup': EG1},
+ {'name': 'h35_4',
+ 'ip': '10.0.35.4/24',
+ 'mac': '00:00:00:00:35:04',
+ 'switch': 's2',
+ 'tenant': TENANT,
+ 'endpointGroup': EG1},
+ {'name': 'h35_5',
+ 'ip': '10.0.35.5/24',
+ 'mac': '00:00:00:00:35:05',
+ 'switch': 's2',
+ 'tenant': TENANT,
+ 'endpointGroup': EG1},
+ {'name': 'h36_2',
+ 'ip': '10.0.36.2/24',
+ 'mac': '00:00:00:00:36:02',
+ 'switch': 's1',
+ 'tenant': TENANT,
+ 'endpointGroup': EG2},
+ {'name': 'h36_3',
+ 'ip': '10.0.36.3/24',
+ 'mac': '00:00:00:00:36:03',
+ 'switch': 's1',
+ 'tenant': TENANT,
+ 'endpointGroup': EG2},
+ {'name': 'h36_4',
+ 'ip': '10.0.36.4/24',
+ 'mac': '00:00:00:00:36:04',
+ 'switch': 's2',
+ 'tenant': TENANT,
+ 'endpointGroup': EG2},
+ {'name': 'h36_5',
+ 'ip': '10.0.36.5/24',
+ 'mac': '00:00:00:00:36:05',
+ 'switch': 's2',
+ 'tenant': TENANT,
+ 'endpointGroup': EG2}]
+
+contracts = [{'consumer': EG1,
+ 'provider': EG2,
+ 'id': CONTRACT}]
--- /dev/null
+docker stop -t=1 $(docker ps -a -q)
+docker rm $(docker ps -a -q)
+
--- /dev/null
+#===============================================================================
+# Containers are created from the config below. Basic structure is:
+# Container
+# - basic container information such as it's IP it uses to talk to host etc
+# - host ports: these are EPs in the policy_config.py file. Processes such as
+# a simple socket server, HTTPD, database etc can be run in the container.
+# - tunnel ports are what make up the topology, currently the remote_ip points
+# to the docker container IP_address
+#===============================================================================
+
+OPENDAYLIGHT_CONTROLLER_IP="192.168.56.1"
+OPENDAYLIGHT_CONTROLLER_PORT=6653
+
+#TODO: modify script to leverage pipework for multiple host bridges/remote systems, as well as making 172.17.0.0/16 configurable
+#TODO: change remote IP to point to another container by container "name" and resolve it's IP Address
+#TODO: Write a GUI that can instantiate these values.
+#TODO: Change script to automatically pull image from docker repo.
+
+containers = [{
+ "name" : "s1", #synonymous with switch name
+ "image" : "alagalah/odlpoc_ovs230",
+ "ip_address" : "172.17.0.101", # IP address of the switch and relies on docker default of 172.17.0.0/16.
+ "ip_mask" : "255.255.0.0",
+ "host_interface_mac" : "00:00:00:fa:bb:01",
+ "dpid" : "0000000000000001", # Must be 16 "bytes" long
+ "host_ports" : [
+ {"port_name" : "p1", # synonymous with EP
+ "port_ip" : "10.1.1.11", # synonymous with EP
+ "port_ip_mask" :"255.255.0.0",
+ "port_mac" : "de:ad:10:01:01:11",
+ "vlan" : None}
+# {"port_name" : "p2", # synonymous with EP
+# "port_ip" : "30.1.1.11", # synonymous with EP
+# "port_ip_mask" :"255.255.0.0",
+# "port_mac" : "de:ad:30:01:01:11",
+# "vlan" : None}
+ ],
+ "tunnels" : [
+ {"type" :"vxlan", #only vxlan supported at moment, may look into ivxlan and GRE later
+ "port_name" : "s1_vxlan1", #"vxlan1" is just a string and can be anything you like
+ "key" :"flow", #allows us to overload VXLAN tunnel using VNIs if needed
+ "openflow_port" : "10", #creates an OF port in datapath to map flows to tunnel
+ "remote_ip" : "172.17.0.103" #Optional... TODO firx it.
+ }
+ ]
+ },
+ {"name" : "s2",
+ "image" : "alagalah/odlpoc_ovs230",
+ "ip_address" : "172.17.0.102",
+ "ip_mask" : "255.255.0.0",
+ "host_interface_mac" : "00:00:00:fa:bb:02",
+ "dpid" : "0000000000000002",
+ "host_ports" : [
+ {"port_name" : "p1",
+ "port_ip" : "20.1.1.11",
+ "port_ip_mask" :"255.255.0.0",
+ "port_mac" : "de:ad:20:01:01:11",
+ "vlan" : None}
+# {"port_name" : "p2",
+# "port_ip" : "20.1.1.12",
+# "port_ip_mask" :"255.255.0.0",
+# "port_mac" : "de:ad:20:01:01:12",
+# "vlan" : None}
+ ],
+ "tunnels" : [
+ {"type" :"vxlan",
+ "port_name" : "s2_vxlan1",
+ "key" :"flow",
+ "openflow_port" : "10",
+ "remote_ip" : "172.17.0.101"
+ }
+ ]
+ }] #end containers
--- /dev/null
+import re
+import time
+import sys
+import ipaddr
+from subprocess import call
+from subprocess import check_output
+from config import *
+
+def addController(sw, ip):
+ call(['ovs-vsctl', 'set-controller', sw, 'tcp:%s:6653' % ip ])
+
+def addSwitch(name, dpid=None):
+ call(['ovs-vsctl', 'add-br', name]) #Add bridge
+ if dpid:
+ if len(dpid) < 16: #DPID must be 16-bytes in later versions of OVS
+ filler='0000000000000000'
+ dpid=filler[:len(filler)-len(dpid)]+dpid
+ elif len(dpid) > 16:
+ print 'DPID: %s is too long' % dpid
+ sys.exit(3)
+ call(['ovs-vsctl','set','bridge', name,'other-config:datapath-id=%s'%dpid])
+# return switch
+
+def addHost(net, switch, name, ip, mac):
+ containerID=launchContainer()
+
+def setOFVersion(sw, version='OpenFlow13,OpenFlow12,OpenFlow10'):
+ call(['ovs-vsctl', 'set', 'bridge', sw, 'protocols={}'.format(version)])
+
+def addTunnel(sw, sourceIp=None):
+ ifaceName = '{}_vxlan0'.format(sw)
+ cmd = ['ovs-vsctl', 'add-port', sw, ifaceName,
+ '--', 'set', 'Interface', ifaceName,
+ 'type=vxlan',
+ 'options:remote_ip=flow',
+ 'options:key=flow']
+ if sourceIp is not None:
+ cmd.append('options:source_ip={}'.format(sourceIp))
+ call(cmd)
+
+def launchContainer(host,containerImage):
+ containerID= check_output(['docker','run','-d','--net=none','--name=%s'%host['name'],'-h',host['name'],'-t', '-i','--privileged=True',containerImage,'/bin/bash']) #docker run -d --net=none --name={name} -h {name} -t -i {image} /bin/bash
+ return containerID[:-1] #Remove extraneous \n from output of above
+
+def connectContainerToSwitch(sw,host,containerID,of_port):
+ hostIP=host['ip']
+ mac=host['mac']
+ nw = ipaddr.IPv4Network(hostIP)
+ broadcast = "{}".format(nw.broadcast)
+ router = "{}".format(nw.network + 1)
+ cmd=['./ovswork.sh',sw,containerID,hostIP,broadcast,router,mac,of_port]
+ if host.has_key('vlan'):
+ cmd.append(host['vlan'])
+ call(cmd)
+
+
+def launch(switches, hosts, contIP='127.0.0.1'):
+
+ for sw in switches:
+ dpid=sw['dpid']
+ addSwitch(sw['name'],sw['dpid'])
+ addTunnel(sw['name'], sw['tunnelIp'])
+
+ ports=0
+ for host in hosts:
+ if host['switch'] == sw['name']:
+ ports+=1
+ containerImage=defaultContainerImage #from Config
+ if host.has_key('container_image'): #from Config
+ containerImage=host['container_image']
+ containerID=launchContainer(host,containerImage)
+ connectContainerToSwitch(sw['name'],host,containerID,str(ports))
+ host['port']='openflow%s:%s' %(str(sw['dpid']),str(ports)) # alagalah - this is such a horrible hack TODO: Find a more elegant way
+
+ # ODL is very fragile so let's give it some time
+# time.sleep(1)
+ # This is a workaround for a bug encountered during
+ # the Helium release. Setting the vSwitch from 1.0
+ # to 1.3 while it was connected to the controller
+ # exposed a bug in the openflowplugin, which resulted
+ # in the controller missing some of the ports on the
+ # vswitch. This change avoids the bug by switching
+ # the version before connecting the switch to the
+ # controller.
+ setOFVersion(sw['name'])
+ addController(sw['name'], contIP)
+
+ return dpid
+# '''
+# Created on Oct 16, 2014
+#
+# @author: alagalah
+# '''
+#
+# import infrastructure_config
+# import os
+# import sys
+# import argparse
+# import io
+# import subprocess
+# import ipaddr
+#
+#
+# if __name__ == '__main__':
+# '''
+# Usage:
+# 1. Assumes well formed infrastructure_config.py file to build containers etc
+# 2. If used with --dockerfiles_only True, only the startup scripts and docker files will
+# be created, without building an image or launching the containers.
+# 3. If used with --directory {directory} creates all files in directory
+# '''
+# DEBUG=False
+#
+# #Check for parameters to see if --dockerfiles_only True is passed
+# parser = argparse.ArgumentParser()
+# parser.add_argument('--dockerfiles_only', help='Does not launch containers if set to True',default=False)
+# parser.add_argument('--directory', help='Base directory to create dockerfiles and shellscripts',default='.')
+# args = parser.parse_args()
+#
+# LAUNCH=True
+# if (args.dockerfiles_only):
+# LAUNCH=False
+#
+# if (args.directory):
+# os.chdir(args.directory)
+# print 'Working directory: ',os.getcwd()
+#
+# WORKING_DIRECTORY=os.getcwd()
+#
+# #===========================================================================
+# # For each container, the following steps are executed:
+# # 1. Create a shell script locally that will run inside each container.
+# # It is called 'startup-{switchname}.sh that:
+# # - Stops, cleans, starts OVS
+# # - Executes ovs-vsctl commands to create ports and tunnels
+# # - Assigns IP addresses to container interfaces
+# # - Fires off a bash script
+# # 2. Create a Dockerfile that:
+# # - Leverages the base image from infrastructure_config (FROM:)
+# # - ADD startup-{switchname}.sh which copies file from local to container
+# # - RUN chmod +x startup-{switchname}.sh
+# # - CMD ./startup-{switchname}.sh
+# # 3. Build a docker image called {switchname} using the Dockerfile-{switchname}.
+# # 4. Run the docker image with flags '-i -t -d --privileged=True'
+# #
+# #===========================================================================
+# docker_commands=[]
+# for container in infrastructure_config.containers:
+# docker_commands_container=[]
+# DOCKERIMAGE_NAME=str(container['name']).lower()
+# SCRIPT_NAME='startup-'+DOCKERIMAGE_NAME+'.sh'
+# #DOCKERFILE_NAME='Dockerfile-'+DOCKERIMAGE_NAME
+# DOCKERFILE_NAME='Dockerfile'
+#
+# if DEBUG: print "DEBUG: Processing container ",DOCKERIMAGE_NAME, SCRIPT_NAME, DOCKERIMAGE_NAME
+#
+# # Create shell script file to execute following
+# shell_lines=[]
+#
+# shell_lines.append(" ovsdb-server --remote=punix:/usr/local/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach")
+# shell_lines.append("ovs-vswitchd --pidfile --detach")
+# # shell_lines.append('service openvswitch-switch stop') #Kill OVS process if running (shouldn't be)
+# # shell_lines.append('rm /etc/openvswitch/conf.db') #Remove any config hanging around
+# # shell_lines.append('service openvswitch-switch start') #Restart all fresh and squeaky clean
+# shell_lines.append('/bin/sh export PS1="'+DOCKERIMAGE_NAME+'"') #Set the prompt to the switchname for usability
+# shell_lines.append('ovs-vsctl add-br '+DOCKERIMAGE_NAME) #Add the bridge
+# shell_lines.append('ovs-vsctl set bridge %s other-config:datapath-id=%s' % (DOCKERIMAGE_NAME,container["dpid"])) #Set DPID
+# # shell_lines.append('ovs-vsctl set bridge %s protocols="OpenFlow13,OpenFlow10"' % DOCKERIMAGE_NAME) #Set OF13
+# # shell_lines.append('ovs-vsctl set bridge %s datapath_type=netdev '+DOCKERIMAGE_NAME) #Alagalah - experimental
+# shell_lines.append('ovs-vsctl set bridge %s protocols="OpenFlow13"' % DOCKERIMAGE_NAME) #Set OF13
+#
+#
+# # Adding internal ports
+# for hostport in container["host_ports"]:
+# if DEBUG: print "DEBUG: Processing port: ",hostport["port_name"],hostport.keys()
+#
+#
+# port_name=str(hostport["port_name"])
+# port_ip=str(hostport["port_ip"])
+# port_ip_mask=str(hostport["port_ip_mask"])
+# port_mac=str(hostport["port_mac"])
+#
+# shell_lines.append('ovs-vsctl add-port %s %s -- set interface %s type=internal' % (DOCKERIMAGE_NAME, port_name, port_name)) #Add hostport to switch as internal hostport
+# shell_lines.append('ifconfig '+port_name+' down') #Down it
+# shell_lines.append('ifconfig '+port_name+' hw ether '+port_mac) #Set the MAC address
+# shell_lines.append('ifconfig '+port_name+' '+port_ip+' netmask '+port_ip_mask) #Set the IP address
+# shell_lines.append('ifconfig '+port_name+' up') #Up it
+#
+# #Reset the docker default interface address which is Eth0
+# #TODO: Perhaps look to add pipeworks functionality of a bridge/hostport just for POC/Demos
+# shell_lines.append('ifconfig eth0 down') #Down it
+# shell_lines.append('ifconfig eth0 hw ether '+str(container["host_interface_mac"])) #Set the MAC address
+# shell_lines.append('ifconfig eth0 ' + str(container["ip_address"])+' netmask '+str(container["ip_mask"])) #Set the IP address
+# shell_lines.append('ifconfig eth0 up') #Up it
+#
+#
+# # Adding tunnel ports
+# for tunnel in container["tunnels"]:
+# tunnel_type=tunnel["type"] #only supported at moment, may look into ivxlan and GRE later
+# port_name=tunnel["port_name"]
+# key=tunnel["key"] #allows us to overload VXLAN tunnel using VNIs if needed if using flow
+# remote_ip=tunnel["remote_ip"]
+# openflow_port=tunnel["openflow_port"] #creates an OF port in datapath to map flows to tunnel
+#
+# # #Set using remote tunnel destination
+# # shell_lines.append('ovs-vsctl add-port %s %s -- set interface %s type=%s option:remote_ip=%s option:key=%s ofport_request=%s' %
+# # (DOCKERIMAGE_NAME,port_name,port_name,tunnel_type,remote_ip,key,openflow_port))
+#
+# # Setting setting source tunnel only, no OFPORT REQUEST
+# shell_lines.append('ovs-vsctl add-port %s %s -- set interface %s type=%s option:remote_ip=flow option:key=%s option:source_ip=%s' %
+# (DOCKERIMAGE_NAME,port_name,port_name,tunnel_type,key,container["ip_address"]))
+#
+# #####################
+# # WARNING! THIS IS A BIT HACKY! UNTIL CONFIG CHANGES TO USE PIPEWORKS THIS SHOULD BE OK --- REALLY RELIES ON SINGLE PORT!!! OR LAST PORT SET!
+# shell_lines.append('ip route add %s via 172.17.42.1' % infrastructure_config.OPENDAYLIGHT_CONTROLLER_IP)
+# nw = ipaddr.IPv4Network(port_ip+"/"+port_ip_mask)
+# snet = "{}/{}".format(nw.network + 1, nw.prefixlen)
+# router = "{}".format(nw.network + 1)
+# shell_lines.append('ip route default via %s' % router) #alagalah HEINOUS
+#
+# # END WARNING
+# #####################
+# # Want to register with the controller last.
+# shell_lines.append('ovs-vsctl set-controller %s tcp:%s:%s' % (DOCKERIMAGE_NAME,infrastructure_config.OPENDAYLIGHT_CONTROLLER_IP,infrastructure_config.OPENDAYLIGHT_CONTROLLER_PORT)) #Set the CONTROLLER
+# # shell_lines.append('ovs-vsctl set-manager %s tcp:%s:%s' % (DOCKERIMAGE_NAME,infrastructure_config.OPENDAYLIGHT_CONTROLLER_IP,"6640")) #Set the CONTROLLER
+#
+# shell_lines.append('/bin/bash') #Leave a bash shell running else it dies... could also do "tail /dev/null"
+#
+# #Create the shell script
+# #These scripts only work correctly if docker has its own folder per container
+# directory=os.path.join(WORKING_DIRECTORY,DOCKERIMAGE_NAME)
+# if not os.path.exists(directory):
+# os.makedirs(directory)
+# os.chdir(directory)
+# with open(SCRIPT_NAME, 'w') as f:
+# for s in shell_lines:
+# f.write(s + '\n')
+# print "Created script ",SCRIPT_NAME
+#
+# #===============================================================================
+# # Step 1 COMPLETE, NOW FOR Step 2 - Creating the Docker file
+# #===============================================================================
+#
+# dockerfile_lines=[]
+#
+# dockerfile_lines.append("FROM %s" % container["image"])
+# dockerfile_lines.append("ADD %s /" % SCRIPT_NAME)
+# dockerfile_lines.append("RUN chmod +x %s" % SCRIPT_NAME)
+# dockerfile_lines.append("CMD ./%s" % SCRIPT_NAME)
+#
+# #Create the Dockerfile
+# with open(DOCKERFILE_NAME, 'w') as f:
+# for s in dockerfile_lines:
+# f.write(s + '\n')
+# print "Created docker file ",DOCKERFILE_NAME
+#
+# #=======================================================================
+# # Steps 4 & 5 create the docker CLI commands to BUILD (using the Dockerfile)
+# # and RUN the image, which automatically runs the startup-{switch}.sh script
+# #=======================================================================
+# docker_commands_container.append(os.getcwd()) # This is how we know what directory to go to
+# docker_commands_container.append('sudo docker build -t %s .' % DOCKERIMAGE_NAME)
+# docker_commands_container.append('sudo docker run -t -i -d --privileged=true --name=%s %s' % (DOCKERIMAGE_NAME,DOCKERIMAGE_NAME))
+# docker_commands.append(docker_commands_container)
+#
+# # Only execute docker launch commands if --dockerfiles_only is NOT set to True
+# if LAUNCH:
+# for command in docker_commands:
+# print command
+# os.chdir(command[0])
+# print "Changed directory to ",os.getcwd()
+# os.system(command[1])
+# os.system(command[2])
--- /dev/null
+
+from mininet.topo import Topo
+from mininet.node import RemoteController
+from mininet.net import Mininet
+from mininet.util import dumpNodeConnections
+from mininet.log import setLogLevel
+from mininet.node import Node
+
+import re
+import time
+from subprocess import call
+from subprocess import check_output
+
+def addController(sw, ip):
+ call(['ovs-vsctl', 'set-controller', sw, 'tcp:%s:6653' % ip ])
+
+def addSwitch(net, name, dpid=None):
+ switch = net.addSwitch(name, dpid=dpid)
+ return switch
+
+def addHost(net, switch, name, ip, mac):
+ host = net.addHost(name, ip=ip, mac=mac)
+ net.addLink(host, switch)
+
+def setOFVersion(sw, version='OpenFlow13,OpenFlow12,OpenFlow10'):
+ call(['ovs-vsctl', 'set', 'bridge', sw, 'protocols={}'.format(version)])
+
+def addTunnel(sw, sourceIp=None):
+ ifaceName = '{}_vxlan0'.format(sw)
+ cmd = ['ovs-vsctl', 'add-port', sw, ifaceName,
+ '--', 'set', 'Interface', ifaceName,
+ 'type=vxlan',
+ 'options:remote_ip=flow',
+ 'options:key=flow']
+ if sourceIp is not None:
+ cmd.append('options:source_ip={}'.format(sourceIp))
+ call(cmd)
+
+#ovs-ofctl dump-ports-desc s1 -OOpenFlow13
+
+def startMininet(switches, hosts, contIP='127.0.0.1'):
+ setLogLevel('info')
+
+ net = Mininet(controller=None,
+ autoSetMacs=True,
+ listenPort=6634)
+
+ swobjs = {}
+ swports = {}
+
+ for sw in switches:
+ swobj = addSwitch(net, sw['name'])
+ swobjs[sw['name']] = swobj
+ swports[sw['name']] = 0;
+ for host in hosts:
+ if host['switch'] not in swobjs:
+ continue
+ sw = swobjs[host['switch']]
+ swports[host['switch']] += 1;
+ port = swports[host['switch']]
+ addHost(net, sw, host['name'], host['ip'], host['mac'])
+ host['port'] = port
+
+ try:
+ net.start()
+ for sw in switches:
+ addTunnel(sw['name'], sw['tunnelIp'])
+
+ for host in net.hosts:
+ gw = re.sub(r'.\d+$', ".1", host.IP())
+ host.cmd('route add default gw {}'.format(gw))
+
+ # ODL is very fragile so let's give it some time
+ time.sleep(1)
+
+ # This is a workaround for a bug encountered during
+ # the Helium release. Setting the vSwitch from 1.0
+ # to 1.3 while it was connected to the controller
+ # exposed a bug in the openflowplugin, which resulted
+ # in the controller missing some of the ports on the
+ # vswitch. This change avoids the bug by switching
+ # the version before connecting the switch to the
+ # controller.
+ for sw in switches:
+ setOFVersion(sw['name'])
+ addController(sw['name'], contIP)
+
+ return net
+ except Exception, e:
+ net.stop()
+ raise e
--- /dev/null
+
+import requests,json
+from requests.auth import HTTPBasicAuth
+
+USERNAME='admin'
+PASSWORD='admin'
+REGISTER_EP_URL="http://%s:8181/restconf/operations/endpoint:register-endpoint"
+REGISTER_TENANTS_URL="http://%s:8181/restconf/config/policy:tenants"
+REGISTER_NODES_URL="http://%s:8181/restconf/config/opendaylight-inventory:nodes"
+
+endpointGroups = {}
+
+def get_epg(tenantId, epgId):
+ k = "{}|{}".format(tenantId,epgId)
+ if k in endpointGroups:
+ return endpointGroups[k]
+ tenant = get_tenant(tenantId);
+ data = {
+ "id": epgId,
+ "consumer-named-selector": [],
+ "provider-named-selector": []
+ }
+ tenant["endpoint-group"].append(data)
+ endpointGroups[k] = data
+ return data
+
+tenants = {}
+
+# This is where some of the policy is set, subject and classifiers
+def get_tenant(tenantId):
+ if tenantId in tenants:
+ return tenants[tenantId]
+ data = {
+ "id": tenantId,
+ "l3-context": [],
+ "l2-bridge-domain": [],
+ "l2-flood-domain": [],
+ "subnet": [],
+ "endpoint-group": [],
+ "contract": [],
+ "subject-feature-instances": {
+ "classifier-instance": [
+ {"name": "http-dest",
+ "classifier-definition-id": "4250ab32-e8b8-445a-aebb-e1bd2cdd291f",
+ "parameter-value": [
+ {"name": "type",
+ "string-value": "TCP"},
+ {"name": "destport",
+ "int-value": "80"}
+ ]},
+ {"name": "http-src",
+ "classifier-definition-id": "4250ab32-e8b8-445a-aebb-e1bd2cdd291f",
+ "parameter-value": [
+ {"name": "type",
+ "string-value": "TCP"},
+ {"name": "sourceport",
+ "int-value": "80"}
+ ]},
+ {"name": "icmp",
+ "classifier-definition-id": "79c6fdb2-1e1a-4832-af57-c65baf5c2335",
+ "parameter-value": [
+ {"name": "proto",
+ "int-value": "1"}
+ ]},
+ ]
+ }
+ }
+ tenants[tenantId] = data
+ return data
+
+subnets = {}
+
+def get_fd(tenantId, fdId, parent):
+ tenant = get_tenant(tenantId)
+ data = {"id": fdId,
+ "parent": parent}
+ tenant["l2-flood-domain"].append(data)
+ return data
+
+def get_bd(tenantId, bdId, parent):
+ tenant = get_tenant(tenantId)
+ data = {"id": bdId,
+ "parent": parent}
+ tenant["l2-bridge-domain"].append(data)
+ return data
+
+def get_l3c(tenantId, l3cId):
+ tenant = get_tenant(tenantId)
+ data = {"id": l3cId}
+ tenant["l3-context"].append(data)
+ return data
+
+def get_subnet(tenantId, subnetId, parent, prefix, router):
+ k = "{}|{}".format(tenantId, subnetId)
+ if k in subnets:
+ return subnets[k]
+ tenant = get_tenant(tenantId)
+ data = {"id": subnetId,
+ "parent": parent,
+ "ip-prefix": prefix,
+ "virtual-router-ip": router}
+ tenant["subnet"].append(data)
+ return data
+
+endpoints = []
+
+def get_ep(tenantId, groupId, l3ctx, ip, l2ctx, mac, sw,port):
+ group = get_epg(tenantId, groupId)
+ data = {"tenant": tenantId,
+ "endpoint-group": groupId,
+ "l2-context": l2ctx,
+ "mac-address": mac,
+ "l3-address": [{"l3-context": l3ctx,
+ "ip-address": ip}],
+ "ofoverlay:node-id": "openflow:{}".format(sw),
+ "ofoverlay:node-connector-id": "{}".format(port)
+ }
+ endpoints.append(data)
+ return data
+
+nodes = []
+
+def get_node_config(sw, tun_ip):
+ data = {
+ "id": "openflow:{}".format(sw),
+ "ofoverlay:tunnel-ip": tun_ip
+ }
+ nodes.append(data)
+ return data
+
+# This is where specifics of the contract are defined. Note: Classifiers are SET in the get_tenant procedure.
+def get_contract(tenantId, pgroupId, cgroupId, contractId):
+ tenant = get_tenant(tenantId)
+ pgroup = get_epg(tenantId, pgroupId)
+ cgroup = get_epg(tenantId, cgroupId)
+ data = {
+ "id": contractId,
+ "subject": [{"name": "allow-http-subject",
+ "rule": [
+ {"name": "allow-http-rule",
+ "classifier-ref": [
+ {"name": "http-dest",
+ "direction": "in"},
+ {"name": "http-src",
+ "direction": "out"}
+ ]}
+ ]},
+ {"name": "allow-icmp-subject",
+ "rule": [
+ {"name": "allow-icmp-rule",
+ "classifier-ref": [
+ {"name": "icmp"}
+ ]}
+ ]}],
+ "clause": [{"name": "allow-http-clause",
+ "subject-refs": ["allow-http-subject",
+ "allow-icmp-subject"]}]
+ }
+ tenant["contract"].append(data)
+ cgroup["consumer-named-selector"].append({
+ "name": "{}-{}-{}".format(pgroupId, cgroupId, contractId),
+ "contract": [contractId]
+ })
+ pgroup["provider-named-selector"].append({
+ "name": "{}-{}-{}".format(pgroupId, cgroupId, contractId),
+ "contract": [contractId]
+ })
+
+ return data
+
+def post(url, data):
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ print "POST %s" % url
+ print json.dumps(data, indent=4, sort_keys=True)
+ r = requests.post(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
+ print r.text
+ r.raise_for_status()
+
+def put(url, data):
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ print "PUT %s" % url
+ print json.dumps(data, indent=4, sort_keys=True)
+ r = requests.put(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
+ print r.text
+ r.raise_for_status()
+
+def get(url):
+# headers = {'Content-type': 'application/yang.data+json',
+# 'Accept': 'application/yang.data+json'}
+ print "GET %s" % url
+# r = requests.get(url, headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
+ r = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD))
+ r.raise_for_status()
+ return r.json()
+
+def get_operational_nodes_data(contHost):
+ return get(OPERATIONAL_NODES_URL % contHost)
+
+def register_tenants(contHost):
+ data = {"policy:tenants": {"tenant": tenants.values()}}
+ put(REGISTER_TENANTS_URL % contHost, data)
+
+def register_eps(contHost):
+ for ep in endpoints:
+ data = {"input": ep}
+ post(REGISTER_EP_URL % contHost, data)
+
+def register_nodes(contHost):
+ data = {"opendaylight-inventory:nodes": {"node": nodes}}
+ put(REGISTER_NODES_URL % contHost, data)
--- /dev/null
+#!/bin/bash
+set -e
+
+BRIDGE=$1
+GUESTNAME=$2
+IPADDR=$3
+BROADCAST=$4
+GWADDR=$5
+MAC=$6
+OF_PORT=$7
+VLANTAG=$8
+
+[ "$IPADDR" ] || {
+ echo "Syntax:"
+ echo "pipework <hostinterface> <guest> <ipaddr>/<subnet> <broadcast> <gateway> [vlan tag]"
+ exit 1
+}
+
+# Step 1: Find the guest (for now, we only support LXC containers)
+while read dev mnt fstype options dump fsck
+do
+ [ "$fstype" != "cgroup" ] && continue
+ echo $options | grep -qw devices || continue
+ CGROUPMNT=$mnt
+done < /proc/mounts
+
+[ "$CGROUPMNT" ] || {
+ echo "Could not locate cgroup mount point."
+ exit 1
+}
+
+N=$(find "$CGROUPMNT" -name "$GUESTNAME*" | wc -l)
+case "$N" in
+ 0)
+ echo "Could not find any container matching $GUESTNAME."
+ exit 1
+ ;;
+ 1)
+ true
+ ;;
+ *)
+ echo "Found more than one container matching $GUESTNAME."
+ exit 1
+ ;;
+esac
+
+NSPID=$(head -n 1 $(find "$CGROUPMNT" -name "$GUESTNAME*" | head -n 1)/tasks)
+[ "$NSPID" ] || {
+ echo "Could not find a process inside container $GUESTNAME."
+ exit 1
+}
+
+# Step 2: Prepare the working directory
+mkdir -p /var/run/netns
+rm -f /var/run/netns/$NSPID
+ln -s /proc/$NSPID/ns/net /var/run/netns/$NSPID
+
+# Step 3: Creating virtual interfaces
+LOCAL_IFNAME=vethl$NSPID
+GUEST_IFNAME=vethg$NSPID
+ip link add name $LOCAL_IFNAME type veth peer name $GUEST_IFNAME
+ip link set $LOCAL_IFNAME up
+
+# Step 4: Adding the virtual interface to the bridge
+ip link set $GUEST_IFNAME netns $NSPID
+if [ "$VLANTAG" ]
+then
+ ovs-vsctl add-port $BRIDGE $LOCAL_IFNAME tag=$VLANTAG -- set Interface $LOCAL_IFNAME ofport_request=$OF_PORT
+else
+ ovs-vsctl add-port $BRIDGE $LOCAL_IFNAME -- set Interface $LOCAL_IFNAME ofport_request=$OF_PORT
+fi
+
+# Step 5: Configure netwroking within the container
+ip netns exec $NSPID ip link set $GUEST_IFNAME name eth0
+ip netns exec $NSPID ip addr add $IPADDR broadcast $BROADCAST dev eth0
+ip netns exec $NSPID ifconfig eth0 hw ether $MAC
+ip netns exec $NSPID ip addr add 127.0.0.1 dev lo
+ip netns exec $NSPID ip link set eth0 up
+ip netns exec $NSPID ip link set lo up
+ip netns exec $NSPID ip route add default via $GWADDR
--- /dev/null
+CONTROLLER=192.168.56.1
+echo
+echo "*** Removing containers... "
+echo
+sudo ./docker-clean.sh
+echo
+echo "*** Cleaning up OVS... "
+sudo mn -c
+echo
+echo "Running POC script"
+echo
+sudo ./testOfOverlay.py --local s1 --controller ${CONTROLLER}
--- /dev/null
+#!/usr/bin/python
+
+import infrastructure_launch
+import odl_gbp
+#import mininet.cli
+import ipaddr
+import uuid
+import re
+import argparse, sys
+from config import *
+
+def getSubnet(ip):
+ nw = ipaddr.IPv4Network(ip)
+ return "{}/{}".format(nw.network + 1, nw.prefixlen)
+
+if __name__ == '__main__':
+
+ # Validate all parameters are present
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--local',
+ help='Set up distributed mininet on local host with the specified switch')
+ parser.add_argument('--policy', action='store_true',
+ help='Configure the policy on the controller')
+ parser.add_argument('--controller', default='127.0.0.1',
+ help='Use the specified controller IP address')
+ args = parser.parse_args()
+
+ if (not args.local and not args.policy):
+ parser.print_help()
+ sys.exit(3)
+
+ # switches is a list from config.py, when this script is called with --local (switch) and its present in config, it is added to the conf_switches
+ conf_switches = []
+ if args.local:
+ for switch in switches:
+ if switch['name'] == args.local:
+ conf_switches = [switch]
+ break
+
+ # Assuming we have switches defined (and hence conf_switches), start mininet with the "hosts" list also from config.py
+ net = None
+ if len(conf_switches) > 0:
+ dpid=infrastructure_launch.launch(conf_switches, hosts, args.controller)
+ try :
+ if args.policy:
+ for switch in switches:
+ # This leverages a global from odl_gbp called "nodes", which appends "data" from this for loop
+ odl_gbp.get_node_config(switch['dpid'], switch['tunnelIp'])
+ #This also uses the global "nodes" from odl_gbp
+ odl_gbp.register_nodes(args.controller)
+
+ # TENANT, L3CTX, L2BD are imported from config.py
+ # get_tenant looks for the TENANT UUID in a global tenant dictionary in odl_gbp.
+ # If TENANT doesn't already exist in that dict. then a bunch of 'default' tenant data is defined, inluding
+ # subjects and classifiers (at writing specific to HTTP source/dest and ICMP)
+ tenant = odl_gbp.get_tenant(TENANT)
+
+ # Layer3 context and Layer BridgeDomain are SET into the tenant{} structure in odl_gbp
+ # TODO: (maybe call these set???)
+ odl_gbp.get_l3c(TENANT, L3CTX)
+ odl_gbp.get_bd(TENANT, L2BD, L3CTX)
+
+ # subnets and fds (flood domains)
+ subnets = {}
+ fds = {}
+ # hosts comes from config.py, which contains target switch, IP Address, MAC address, tenant and EPG
+ for host in hosts:
+ print host
+ if args.local and host['switch'] != args.local:
+ continue
+ nw = ipaddr.IPv4Network(host['ip'])
+ snet = "{}/{}".format(nw.network + 1, nw.prefixlen)
+ router = "{}".format(nw.network + 1)
+
+ if snet not in subnets:
+ snid = str(uuid.uuid4())
+ fdid = str(uuid.uuid4())
+ # Sets flood domain where parent is L2BD from config.py
+ fds[fdid] = odl_gbp.get_fd(TENANT, fdid, L2BD)
+
+ # sets subnet from tenant, which also includes the flood domain
+ subnets[snet] = odl_gbp.get_subnet(TENANT, snid, fdid, snet, router)
+
+ # Sets the "network-domain" in global endpointGroups dict in odl_gbp.py
+ odl_gbp.get_epg(TENANT, host['endpointGroup'])["network-domain"] = snid
+
+ # Creates EP information and appends to endpoint list, a global
+ odl_gbp.get_ep(TENANT,
+ host['endpointGroup'],
+ L3CTX,
+ re.sub(r'/\d+$', '', host['ip']),
+ L2BD,
+ host['mac'],
+ dpid,
+ host['port'])
+
+ # contracts is a global list from config.py.
+ # get_contract creates the specific subject, classifiers, rules etc for the contract
+ # and appends this to the global tenant list.
+ for contract in contracts:
+ odl_gbp.get_contract(TENANT,
+ contract['provider'], contract['consumer'],
+ contract['id'])
+
+ # POST to the controller to register tenants
+ if args.policy:
+ odl_gbp.register_tenants(args.controller)
+
+ # POST to controller to register EPS
+ # TODO: Should this be done on a per Tenant basis
+ odl_gbp.register_eps(args.controller)
+
+ if net is not None:
+ mininet.cli.CLI(net)
+ finally:
+ if net is not None:
+ net.stop()