--- /dev/null
+GroupBasedPolicy Proof Of Concept Scripts
+
+1. Introduction
+
+This instance of GroupBasedPolicy "Proof of Concept" or Demo utilizes:
+- Two Ubuntu 14.04 LTS VMs on a host-net (using VirtualBox)
+- Docker 1.0.1 (for guests)
+- OpenVSwitch 2.3.0 (running natively on Ubuntu 14.04LTS)
+
+It mimics the same policy as the mininet example, that is:
+- Two EndPointGroups (EPG)
+ 1. Clients
+ 2. WebServers
+- 4 guests per EPG
+- Contract allowing HTTP Client -> WebServers and PING Client <-> WebServers but
+ HTTP WebServers -//-> Client (ie disallowed)
+
+ 2. Files
+ - infrastructure_config:
+ Contains configuration of OVS and docker guests. There is a default image, but this can be over-ridden.
+ - policy_config:
+ The policy is set here. It is important to note that this file needs to be on both VMs
+ - infrastructure_launch:
+ Launches the docker containers and configures switches
+ - odl_gbp:
+ Library of functions for performing operations on GBP policy entities
+ - testOfOverlay:
+ Processes policy and guests. It is important that one of the VMs launches this script with "--policy" much like the mininet POC
+ - start-poc.sh:
+ Cleans up any existing docker and OVS instances (using "mn -c" for quick OVS clean up. If this is not an option on your VM then pursue stopping OVS, removing conf.db, starting OVS)
+
+ 3. Usage
+ - Always run from root. ie sudo bash
+ - Edit infrastructure_config.py with the IP address of the VM for each switch, edit start-poc.sh with your ODL controller IP.
+
+
--- /dev/null
+docker stop -t=1 $(docker ps -a -q)
+docker rm $(docker ps -a -q)
+
+mn -c
--- /dev/null
+# Config for switches, tunnelIP is the local IP address.
+switches = [{'name': 's1',
+ 'tunnelIp': '192.168.56.30',
+ 'dpid': '1'},
+ {'name': 's2',
+ 'tunnelIp': '192.168.56.32',
+ 'dpid': '2'}]
+
+defaultContainerImage='alagalah/odlpoc_ovs230'
+
+#Note that tenant name and endpointGroup name come from policy_config.py
+
+hosts = [{'name': 'h35_2',
+ 'mac': '00:00:00:00:35:02',
+ 'ip': '10.0.35.2/24',
+ 'switch': 's1',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'client'},
+ {'name': 'h35_3',
+ 'ip': '10.0.35.3/24',
+ 'mac': '00:00:00:00:35:03',
+ 'switch': 's1',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'client'},
+ {'name': 'h35_4',
+ 'ip': '10.0.35.4/24',
+ 'mac': '00:00:00:00:35:04',
+ 'switch': 's2',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'client'},
+ {'name': 'h35_5',
+ 'ip': '10.0.35.5/24',
+ 'mac': '00:00:00:00:35:05',
+ 'switch': 's2',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'client'},
+ {'name': 'h36_2',
+ 'ip': '10.0.36.2/24',
+ 'mac': '00:00:00:00:36:02',
+ 'switch': 's1',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'webserver'},
+ {'name': 'h36_3',
+ 'ip': '10.0.36.3/24',
+ 'mac': '00:00:00:00:36:03',
+ 'switch': 's1',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'webserver'},
+ {'name': 'h36_4',
+ 'ip': '10.0.36.4/24',
+ 'mac': '00:00:00:00:36:04',
+ 'switch': 's2',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'webserver'},
+ {'name': 'h36_5',
+ 'ip': '10.0.36.5/24',
+ 'mac': '00:00:00:00:36:05',
+ 'switch': 's2',
+ 'tenant': 'GBPPOC',
+ 'endpointGroup': 'webserver'}]
+
--- /dev/null
+#!/usr/bin/python
+
+import re
+import time
+import sys
+import ipaddr
+from subprocess import call
+from subprocess import check_output
+from infrastructure_config import *
+
+def addController(sw, ip):
+ call(['ovs-vsctl', 'set-controller', sw, 'tcp:%s:6653' % ip ])
+
+def addSwitch(name, dpid=None):
+ call(['ovs-vsctl', 'add-br', name]) #Add bridge
+ if dpid:
+ if len(dpid) < 16: #DPID must be 16-bytes in later versions of OVS
+ filler='0000000000000000'
+ dpid=filler[:len(filler)-len(dpid)]+dpid
+ elif len(dpid) > 16:
+ print 'DPID: %s is too long' % dpid
+ sys.exit(3)
+ call(['ovs-vsctl','set','bridge', name,'other-config:datapath-id=%s'%dpid])
+
+def addHost(net, switch, name, ip, mac):
+ containerID=launchContainer()
+
+def setOFVersion(sw, version='OpenFlow13,OpenFlow12,OpenFlow10'):
+ call(['ovs-vsctl', 'set', 'bridge', sw, 'protocols={}'.format(version)])
+
+def addTunnel(sw, sourceIp=None):
+ ifaceName = '{}_vxlan0'.format(sw)
+ cmd = ['ovs-vsctl', 'add-port', sw, ifaceName,
+ '--', 'set', 'Interface', ifaceName,
+ 'type=vxlan',
+ 'options:remote_ip=flow',
+ 'options:key=flow']
+ if sourceIp is not None:
+ cmd.append('options:source_ip={}'.format(sourceIp))
+ call(cmd)
+
+def launchContainer(host,containerImage):
+ containerID= check_output(['docker','run','-d','--net=none','--name=%s'%host['name'],'-h',host['name'],'-t', '-i','--privileged=True',containerImage,'/bin/bash']) #docker run -d --net=none --name={name} -h {name} -t -i {image} /bin/bash
+ return containerID[:-1] #Remove extraneous \n from output of above
+
+def connectContainerToSwitch(sw,host,containerID,of_port):
+ hostIP=host['ip']
+ mac=host['mac']
+ nw = ipaddr.IPv4Network(hostIP)
+ broadcast = "{}".format(nw.broadcast)
+ router = "{}".format(nw.network + 1)
+ cmd=['./ovswork.sh',sw,containerID,hostIP,broadcast,router,mac,of_port]
+ if host.has_key('vlan'):
+ cmd.append(host['vlan'])
+ call(cmd)
+
+
+def launch(switches, hosts, contIP='127.0.0.1'):
+
+ for sw in switches:
+ dpid=sw['dpid']
+ addSwitch(sw['name'],sw['dpid'])
+ addTunnel(sw['name'], sw['tunnelIp'])
+
+ ports=0
+ for host in hosts:
+ if host['switch'] == sw['name']:
+ ports+=1
+ containerImage=defaultContainerImage #from Config
+ if host.has_key('container_image'): #from Config
+ containerImage=host['container_image']
+ containerID=launchContainer(host,containerImage)
+ connectContainerToSwitch(sw['name'],host,containerID,str(ports))
+ host['port']=str(ports) # alagalah - this is such a horrible hack TODO: Find a more elegant way
+
+
+ # This is a workaround for a bug encountered during
+ # the Helium release. Setting the vSwitch from 1.0
+ # to 1.3 while it was connected to the controller
+ # exposed a bug in the openflowplugin, which resulted
+ # in the controller missing some of the ports on the
+ # vswitch. This change avoids the bug by switching
+ # the version before connecting the switch to the
+ # controller.
+ setOFVersion(sw['name'])
+ addController(sw['name'], contIP)
+
+ return dpid
+
--- /dev/null
+
+import requests,json
+import uuid
+from requests.auth import HTTPBasicAuth
+
+USERNAME='admin'
+PASSWORD='admin'
+REGISTER_EP_URL="http://%s:8181/restconf/operations/endpoint:register-endpoint"
+REGISTER_TENANTS_URL="http://%s:8181/restconf/config/policy:tenants"
+REGISTER_NODES_URL="http://%s:8181/restconf/config/opendaylight-inventory:nodes"
+
+endpointGroups = {}
+
+def get_epg(tenantId, epgId):
+ k = "{}|{}".format(tenantId,epgId)
+ if k in endpointGroups:
+ return endpointGroups[k]
+ tenant = get_tenant(tenantId);
+ data = {
+ "id": epgId,
+ "consumer-named-selector": [],
+ "provider-named-selector": []
+ }
+ tenant["endpoint-group"].append(data)
+ endpointGroups[k] = data
+ return data
+
+tenants = {}
+
+def initialize_tenant(tenant):
+ # All tenants must have unique ID
+ if not tenant.has_key('id'):
+ print "No ID, initializing"
+ tenant['id']=str(uuid.uuid4())
+
+ # If the tenant has already been initialised, we must assume that the stored copy in
+ # tenants dict is more up to date.
+ if tenant['id'] in tenants:
+ return tenants[tenant['id']]
+
+ # Dictionary items that must exist
+ data = {
+ "l3-context": [],
+ "l2-bridge-domain": [],
+ "l2-flood-domain": [],
+ "subnet": [],
+ "endpoint-group": [],
+ "contract": [],
+ "subject-feature-instances": {}
+ }
+
+ # This merges the base data dictionary with the passed tenant dictionary, and assumes that
+ # over-riding anything in data with tenant is preferred, if not, order must be reversed
+ mergedData = dict(data.items() + tenant.items())
+ tenants[mergedData['id']] = mergedData
+ return mergedData
+
+def get_tenant(tenantId):
+ if tenantId in tenants:
+ return tenants[tenantId]
+ data = {
+ "id": tenantId,
+ "l3-context": [],
+ "l2-bridge-domain": [],
+ "l2-flood-domain": [],
+ "subnet": [],
+ "endpoint-group": [],
+ "contract": [],
+ "subject-feature-instances": {}
+ }
+ tenants[tenantId] = data
+ return data
+
+subnets = {}
+
+def get_fd(tenantId, fdId, parent):
+ tenant = get_tenant(tenantId)
+ data = {"id": fdId,
+ "parent": parent}
+ tenant["l2-flood-domain"].append(data)
+ return data
+
+def get_bd(tenantId, bdId, parent):
+ tenant = get_tenant(tenantId)
+ data = {"id": bdId,
+ "parent": parent}
+ tenant["l2-bridge-domain"].append(data)
+ return data
+
+def get_l3c(tenantId, l3cId):
+ tenant = get_tenant(tenantId)
+ data = {"id": l3cId}
+ tenant["l3-context"].append(data)
+ return data
+
+def get_subnet(tenantId, subnetId, parent, prefix, router):
+ k = "{}|{}".format(tenantId, subnetId)
+ if k in subnets:
+ return subnets[k]
+ tenant = get_tenant(tenantId)
+ data = {"id": subnetId,
+ "parent": parent,
+ "ip-prefix": prefix,
+ "virtual-router-ip": router}
+ tenant["subnet"].append(data)
+ return data
+
+endpoints = []
+
+def get_ep(tenantId, groupId, l3ctx, ip, l2ctx, mac, sw, port):
+ group = get_epg(tenantId, groupId)
+ data = {"tenant": tenantId,
+ "endpoint-group": groupId,
+ "l2-context": l2ctx,
+ "mac-address": mac,
+ "l3-address": [{"l3-context": l3ctx,
+ "ip-address": ip}],
+ "ofoverlay:node-id": "openflow:{}".format(sw),
+ "ofoverlay:node-connector-id": "openflow:{}:{}".format(sw, port)
+ }
+ endpoints.append(data)
+ return data
+
+nodes = []
+
+def get_node_config(sw, tun_ip):
+ data = {
+ "id": "openflow:{}".format(sw),
+ "ofoverlay:tunnel-ip": tun_ip
+ }
+ nodes.append(data)
+ return data
+
+def get_contract(tenantId, pgroupIds, cgroupIds, contract):
+#TODO: This assumes a single provider/consumer per contract. Should be able to process list, just
+# note entirely sure if everything should be repeated, or just IDs ??? For now, assuming single
+ tenant = get_tenant(tenantId)
+ pgroup = get_epg(tenantId, pgroupIds[0])
+ cgroup = get_epg(tenantId, cgroupIds[0])
+
+ if not contract.has_key('id'):
+ contract['id']=str(uuid.uuid4())
+ # tenant's contract construct has no idea of "name" so creating a copy of the contract dict,
+ # removing name altogether, and using that
+ data=dict(contract)
+ del data['name']
+
+ tenant["contract"].append(data)
+ cgroup["consumer-named-selector"].append({
+ "name": "{}-{}-{}".format(pgroupIds[0], cgroupIds[0], data['id']),
+ "contract": [data['id']]
+ })
+ pgroup["provider-named-selector"].append({
+ "name": "{}-{}-{}".format(pgroupIds[0], cgroupIds[0], data['id']),
+ "contract": [data['id']]
+ })
+
+ return data
+
+def post(url, data):
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ print "POST %s" % url
+ print json.dumps(data, indent=4, sort_keys=True)
+ r = requests.post(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
+ print r.text
+ r.raise_for_status()
+
+def put(url, data):
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ print "PUT %s" % url
+ print json.dumps(data, indent=4, sort_keys=True)
+ r = requests.put(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
+ print r.text
+ r.raise_for_status()
+
+def register_tenants(contHost):
+ data = {"policy:tenants": {"tenant": tenants.values()}}
+ put(REGISTER_TENANTS_URL % contHost, data)
+
+def register_eps(contHost):
+ for ep in endpoints:
+ data = {"input": ep}
+ post(REGISTER_EP_URL % contHost, data)
+
+def register_nodes(contHost):
+ data = {"opendaylight-inventory:nodes": {"node": nodes}}
+ put(REGISTER_NODES_URL % contHost, data)
--- /dev/null
+#!/bin/bash
+set -e
+
+BRIDGE=$1
+GUESTNAME=$2
+IPADDR=$3
+BROADCAST=$4
+GWADDR=$5
+MAC=$6
+OF_PORT=$7
+VLANTAG=$8
+
+[ "$IPADDR" ] || {
+ echo "Syntax:"
+ echo "pipework <hostinterface> <guest> <ipaddr>/<subnet> <broadcast> <gateway> [vlan tag]"
+ exit 1
+}
+
+# Step 1: Find the guest (for now, we only support LXC containers)
+while read dev mnt fstype options dump fsck
+do
+ [ "$fstype" != "cgroup" ] && continue
+ echo $options | grep -qw devices || continue
+ CGROUPMNT=$mnt
+done < /proc/mounts
+
+[ "$CGROUPMNT" ] || {
+ echo "Could not locate cgroup mount point."
+ exit 1
+}
+
+N=$(find "$CGROUPMNT" -name "$GUESTNAME*" | wc -l)
+case "$N" in
+ 0)
+ echo "Could not find any container matching $GUESTNAME."
+ exit 1
+ ;;
+ 1)
+ true
+ ;;
+ *)
+ echo "Found more than one container matching $GUESTNAME."
+ exit 1
+ ;;
+esac
+
+NSPID=$(head -n 1 $(find "$CGROUPMNT" -name "$GUESTNAME*" | head -n 1)/tasks)
+[ "$NSPID" ] || {
+ echo "Could not find a process inside container $GUESTNAME."
+ exit 1
+}
+
+# Step 2: Prepare the working directory
+mkdir -p /var/run/netns
+rm -f /var/run/netns/$NSPID
+ln -s /proc/$NSPID/ns/net /var/run/netns/$NSPID
+
+# Step 3: Creating virtual interfaces
+LOCAL_IFNAME=vethl$NSPID
+GUEST_IFNAME=vethg$NSPID
+ip link add name $LOCAL_IFNAME type veth peer name $GUEST_IFNAME
+ip link set $LOCAL_IFNAME up
+
+# Step 4: Adding the virtual interface to the bridge
+ip link set $GUEST_IFNAME netns $NSPID
+if [ "$VLANTAG" ]
+then
+ ovs-vsctl add-port $BRIDGE $LOCAL_IFNAME tag=$VLANTAG -- set Interface $LOCAL_IFNAME ofport_request=$OF_PORT
+else
+ ovs-vsctl add-port $BRIDGE $LOCAL_IFNAME -- set Interface $LOCAL_IFNAME ofport_request=$OF_PORT
+fi
+
+# Step 5: Configure netwroking within the container
+ip netns exec $NSPID ip link set $GUEST_IFNAME name eth0
+ip netns exec $NSPID ip addr add $IPADDR broadcast $BROADCAST dev eth0
+ip netns exec $NSPID ifconfig eth0 hw ether $MAC
+ip netns exec $NSPID ip addr add 127.0.0.1 dev lo
+ip netns exec $NSPID ip link set eth0 up
+ip netns exec $NSPID ip link set lo up
+ip netns exec $NSPID ip route add default via $GWADDR
--- /dev/null
+L3CTX='cbe0cc07-b8ff-451d-8171-9eef002a8e80'
+L2BD='7b796915-adf4-4356-b5ca-de005ac410c1'
+# Only one tenant supported at this time.
+tenants = [
+ {'name':'GBPPOC',
+ 'id':'f5c7d344-d1c7-4208-8531-2c2693657e12', #Optional, if you leave this out will be generated
+ 'subject-feature-instances':
+ {'classifier-instance':
+ [
+ {'name': 'http-dest',
+ 'classifier-definition-id': '4250ab32-e8b8-445a-aebb-e1bd2cdd291f',
+ 'parameter-value': [
+ {'name': 'type',
+ 'string-value': 'TCP'},
+ {'name': 'destport',
+ 'int-value': '80'}
+ ]},
+ {'name': 'http-src',
+ 'classifier-definition-id': '4250ab32-e8b8-445a-aebb-e1bd2cdd291f',
+ 'parameter-value': [
+ {'name': 'type',
+ 'string-value': 'TCP'},
+ {'name': 'sourceport',
+ 'int-value': '80'}
+ ]},
+ {'name': 'icmp',
+ 'classifier-definition-id': '79c6fdb2-1e1a-4832-af57-c65baf5c2335',
+ 'parameter-value': [
+ {'name': 'proto',
+ 'int-value': '1'}
+ ]
+ }
+ ]
+ }
+ }
+ ]
+
+contracts = [
+ {'name':'pingall+web',
+ 'id':'22282cca-9a13-4d0c-a67e-a933ebb0b0ae',
+ 'subject': [
+ {'name': 'allow-http-subject',
+ 'rule': [
+ {'name': 'allow-http-rule',
+ 'classifier-ref': [
+ {'name': 'http-dest',
+ 'direction': 'in'},
+ {'name': 'http-src',
+ 'direction': 'out'}
+ ]
+ }
+ ]
+ },
+ {'name': 'allow-icmp-subject',
+ 'rule': [
+ {'name': 'allow-icmp-rule',
+ 'classifier-ref': [
+ {'name': 'icmp'}
+ ]}
+ ]
+ }],
+ 'clause': [
+ {'name': 'allow-http-clause',
+ 'subject-refs': [
+ 'allow-http-subject',
+ 'allow-icmp-subject'
+ ]
+ }
+ ]
+ }]
+endpointGroups = [
+ {'name':'client',
+ 'providesContracts' : [], #List of contract names provided
+ 'consumesContracts' : ['pingall+web'],
+ 'id' : '1eaf9a67-a171-42a8-9282-71cf702f61dd', #Optional, if you leave this out will be generated
+ },
+ {'name':'webserver',
+ 'providesContracts' : ['pingall+web'], #List of contract names provided
+ 'consumesContracts' : [],
+ 'id' : 'e593f05d-96be-47ad-acd5-ba81465680d5', #Optional, if you leave this out will be generated
+ }
+ ]
+
+
+
--- /dev/null
+CONTROLLER=192.168.56.1
+echo
+echo "*** Removing containers... "
+echo
+./docker-clean.sh
+echo
+echo "*** Cleaning up OVS... "
+mn -c
+echo
+echo "Pulling alagalah/odlpoc_ovs230 docker image...edit script for own images"
+echo
+docker pull alagalah/odlpoc_ovs230
+echo
+echo "Running POC script"
+echo
+./testOfOverlay.py --local s1 --controller ${CONTROLLER}
--- /dev/null
+#!/usr/bin/python
+
+import infrastructure_launch
+import odl_gbp
+import ipaddr
+import uuid
+import re
+import argparse, sys
+import policy_config
+import infrastructure_config
+
+def getSubnet(ip):
+ nw = ipaddr.IPv4Network(ip)
+ return "{}/{}".format(nw.network + 1, nw.prefixlen)
+
+if __name__ == '__main__':
+
+ # Validate all parameters are present
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--local',
+ help='Set up distributed mininet on local host with the specified switch')
+ parser.add_argument('--policy', action='store_true',
+ help='Configure the policy on the controller')
+ parser.add_argument('--controller', default='127.0.0.1',
+ help='Use the specified controller IP address')
+ args = parser.parse_args()
+
+ if (not args.local and not args.policy):
+ parser.print_help()
+ sys.exit(3)
+
+ # switches is a list from infrastructure_config.py, these are the OVS instances
+ conf_switches = []
+ if args.local:
+ for switch in infrastructure_config.switches:
+ if switch['name'] == args.local:
+ conf_switches = [switch]
+ break
+
+ # Assuming we have switches defined (and hence conf_switches), start containers with the "hosts" list also from infrastructure_config.py
+ if len(conf_switches) > 0:
+ dpid=infrastructure_launch.launch(conf_switches, infrastructure_config.hosts, args.controller)
+
+ if args.policy:
+ for switch in infrastructure_config.switches:
+ # This leverages a global from odl_gbp called "nodes", which appends "data" from this for loop
+ odl_gbp.get_node_config(switch['dpid'], switch['tunnelIp'])
+ #This also uses the global "nodes" from odl_gbp
+ odl_gbp.register_nodes(args.controller)
+
+ #Only one tenant supported today
+ tenant = policy_config.tenants[0]
+ tenant = odl_gbp.initialize_tenant(tenant)
+ if len(tenant['l3-context']) ==0:
+ print "Setting L3 context"
+ odl_gbp.get_l3c(tenant['id'], policy_config.L3CTX)
+ l3context=tenant['l3-context'][0]['id']
+ if len(tenant['l2-bridge-domain']) == 0:
+ print "Setting L2 Bridge domain"
+ odl_gbp.get_bd(tenant['id'], policy_config.L2BD, tenant['l3-context'][0]['id'])
+ l2bridgeDomain=tenant['l2-bridge-domain'][0]['id']
+ # subnets and fds (flood domains)
+ subnets = {}
+ fds = {}
+ # hosts comes from infrastructure_config.py, which contains target switch, IP Address, MAC address, tenant and EPG
+ for host in infrastructure_config.hosts:
+ if args.local and host['switch'] != args.local:
+ continue
+ nw = ipaddr.IPv4Network(host['ip'])
+ snet = "{}/{}".format(nw.network + 1, nw.prefixlen)
+ router = "{}".format(nw.network + 1)
+
+ if snet not in subnets:
+ snid = str(uuid.uuid4())
+ fdid = str(uuid.uuid4())
+ # Sets flood domain where parent is L2BD from config.py
+ fds[fdid] = odl_gbp.get_fd(tenant['id'], fdid, l2bridgeDomain)
+
+ # sets subnet from tenant, which also includes the flood domain
+ subnets[snet] = odl_gbp.get_subnet(tenant['id'], snid, fdid, snet, router)
+ # Sets the "network-domain" in global endpointGroups dict in odl_gbp.py
+
+ for endpointGroup in policy_config.endpointGroups:
+ if host['endpointGroup'] == endpointGroup['name']:
+ groupId=endpointGroup['id']
+ odl_gbp.get_epg(tenant['id'], groupId)["network-domain"] = snid
+
+ # Creates EP information and appends to endpoint list, a global
+ odl_gbp.get_ep(tenant['id'],
+ groupId,
+ l3context,
+ re.sub(r'/\d+$', '', host['ip']),
+ l2bridgeDomain,
+ host['mac'],
+ dpid,
+ host['port'])
+
+ # Resolve contract names to IDs and add to policy
+ contractConsumerEpgIDs=[]
+ contractProviderEpgIDs=[]
+ for contract in policy_config.contracts:
+ for endpointGroup in policy_config.endpointGroups:
+ if contract['name'] in endpointGroup['consumesContracts']:
+ contractConsumerEpgIDs.append(endpointGroup['id'])
+ if contract['name'] in endpointGroup['providesContracts']:
+ contractProviderEpgIDs.append(endpointGroup['id'])
+
+ odl_gbp.get_contract(tenant['id'],
+ contractProviderEpgIDs,
+ contractConsumerEpgIDs,
+ contract)
+
+ # POST to the controller to register tenants
+ if args.policy:
+ odl_gbp.register_tenants(args.controller)
+
+ # POST to controller to register EPS
+ odl_gbp.register_eps(args.controller)
+