get-nsps.py
infrastructure_config.py
demo.lock
+SF.vmdk
control.vm.network "private_network", ip: "#{neutron_ex_ip}", virtualbox__intnet: "mylocalnet"
control.vm.provider :virtualbox do |vb|
vb.memory = 4096
+ vb.cpus = 2
end
control.vm.provider "vmware_fusion" do |vf|
vf.vmx["memsize"] = "4096"
compute.vm.network "private_network", ip: "192.168.111.12", virtualbox__intnet: "mylocalnet"
compute.vm.provider :virtualbox do |vb|
vb.memory = 4096
+ vb.cpus = 3
end
compute.vm.provider "vmware_fusion" do |vf|
vf.vmx["memsize"] = "4096"
--- /dev/null
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAx+Qraf8Q9eeH/jrbVCUHS+t2zwVl11DwIidToYqsVV2nrulk
+tFaj9uhKqFJT+oLG7gH1Z4TPZL7RSsFwvXqNNu6nJBo9p6AbrzhGOeyM7w32bR3F
+8x41wNZpMT1bTw62xSNAJ/zsB8bRbxgzHciI2A9SaBBrSS1eZEYGqeSx2cr9zsbf
+wOZea9mGd4l8Mw3pPp5y6hM5MqEMhY9S21pGODH9eonXdpoNMyLanjXwvbAEWLij
+stMiFY7scSzh6SvE2tXxXAfD8TFpTUWsbNk6TwtaT8N6wdcIMkZNsKOR/cJeCImV
+dRa4JW1C2C9WtgmKNSyCGV2DcjXU9voeyrwKXwIDAQABAoIBAFblX+IiWfMshbgF
+SheYSUW9xNZWWGFz9BBE3FxbdnNd1Wl6WKb1Cy5o6RunPo6mj2gkPdSUrbv0H/PJ
+iAM25XC96rcNGURnm+FeK1k+9j1t6nSg/s6jkWaVTwFy1ODXWqj0nA1GLC1tRtpu
+wLnv1V1nu8E1cg5LxL7+miL9Iju0+EghgjbeINT/VxQam6305onZ1Vc1uHtQiCC1
+XTzJzDB6vy1+gFlr8UuX9uuVpApL6Sk6D0kx3cIP7JZzu8coit+Z7AmXKKnd4mFV
+nqS2wGFDX0WBO52uVfxtfvvZC/WmSeuFfXD3+lFu7HmZVeh+5WsGAv7DKuOK6Z7l
+idSisQECgYEA58oVeFhg+orlAeKnGGFLI9zHnSbWUKS978c6jGAL45kZ0I8+6TXK
+t3b3UtbEEi7Cn0ulsw90pE8Gq3uTVgNwZRXIu/SzsW2Sbw/E4mYdDTrN8WguCiex
+0Ffr3d0oUt9GZSXDucKFQ8FSRCxTvobwNYXuqXxlzB7xzHdyfsWlZMECgYEA3MUm
+DVCwKhK73zcmNxoW+s3J+ZHlu5lTYixRz4B4/Hh3Q6aJptF5v+CRgy64aOa8McTa
+yxXDdoikaKeHqb1Uaq1N8bXerppZhDrzf+j2UjUciR5/z3bHSR3NzKKnQoJm6e97
+3K6MP9k2dKXNngaxQp/KnVGz5VfZpq660YwHlx8CgYEAhWZi0O5cchV6E4m0jW2B
+Z1RrdeCiO6t0vQk2gw/MB+NXXhbzKBkTYF+RnBWKpsROsotBBDsC8IWi2xK2CyzT
+hPnym2oL+TOMsWqr73jBg5myaRuCU8ngpn3Te88KpBdoVlJTwRdYx1P92sqAMAlN
+OiCo6NZ1Z/LvSrP8K0XpGIECgYAy5Yy5QyHqATmo42j/CRfFKI5o6BbjohyUJI2T
+3hWGdytfwFQ5zk9YSiih6rJ/FLrMaoraiSDwAd6NdhoTqt98XIn8sKYu8My0bHbY
+xVeMakwy3IIwzTxygdmBVPpknfWl9x1CKkeRLL2eNN9rkDVrm7U1gbGMrS0zfVL6
+nmLdlwKBgAYKWVD3UfEwKHG7sxqUrJG7ibbvTMhng79tCnXSNtFnT2YYI3CaxI2n
+eb9e7FQeFdtn7XYzcKYXcFFNU761hZ5kKgY7TN/rucPitpQKao6Xs8wTgJSgnE/j
+XlRvsTdBKcfgmSv0rtkNkAJp2C0LjgtBv2ATQBIg4/G88K27+tsV
+-----END RSA PRIVATE KEY-----
--- /dev/null
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDH5Ctp/xD154f+OttUJQdL63bPBWXXUPAiJ1OhiqxVXaeu6WS0VqP26EqoUlP6gsbuAfVnhM9kvtFKwXC9eo027qckGj2noBuvOEY57IzvDfZtHcXzHjXA1mkxPVtPDrbFI0An/OwHxtFvGDMdyIjYD1JoEGtJLV5kRgap5LHZyv3Oxt/A5l5r2YZ3iXwzDek+nnLqEzkyoQyFj1LbWkY4Mf16idd2mg0zItqeNfC9sARYuKOy0yIVjuxxLOHpK8Ta1fFcB8PxMWlNRaxs2TpPC1pPw3rB1wgyRk2wo5H9wl4IiZV1FrglbULYL1a2CYo1LIIZXYNyNdT2+h7KvApf vagrant@devstack-control
--- /dev/null
+udo ovs-vsctl del-port vxlangpe-br-int
+ 398 sudo ovs-vsctl del-port vxlan-br-int
+ 399 sudo ovs-vsctl show
+ 400 sudo ovs-dpctl dump-flows
+ 401 dumpflows.sh | grep nsp
+ 402 sudo ovs-dpctl dump-flows
+ 403 ip a
+ 404 sudo ip route add 11.0.0.3/32 dev tap20536c64-c7
+ 405 sudo arp -i tap20536c64-c7 -s 11.0.0.3 fa:16:3e:64:0d:e6
+ 406 sudo ovs-dpctl dump-flows
+ 407 clear
+ 408 sudo ovs-dpctl dump-flows
+ 409 clear;sudo ovs-dpctl dump-flows
+ 410 ip -o a
+ 411 ip a
+ 412 history
+ 413 clear;sudo ovs-dpctl dump-flows
+ 414 resetcontroller.sh
+ 415 history | grep route
+ 416 sudo ip route add 11.0.0.3/32 dev tap20536c64-c7
+ 417 sudo ip route del 11.0.0.3/32 dev tap20536c64-c7
+ 418 sudo ovs-dpctl dump-flows
+ 419 sudo ifdown eth1
+ 420 sudo ifdown tap20536c64-c7
+ 421 ip -o a
+ 422 sudo ovs-dpctl dump-flows
+ 423 clear
+ 424 sudo ovs-dpctl dump-flows
+ 425 clear
+ 426 sudo ovs-dpctl dump-flows
+ 427 clear
+ 428 sudo ovs-dpctl dump-flows
+ 429 route -n
+ 430 history
+
--- /dev/null
+sudo ovs-vsctl del-controller br-int
+sudo ovs-vsctl del-manager
+sudo ovs-vsctl show
--- /dev/null
+#!/usr/bin/python
+
+from subprocess import check_output
+
+
+def call_dpctl():
+ cmd="sudo ovs-dpctl dump-flows"
+ listcmd=cmd.split()
+ return check_output(listcmd)
+
+if __name__ == "__main__" :
+ flows=call_dpctl().split("recirc_id")
+ for flow in flows:
+ print flow
+
+
+
--- /dev/null
+sudo ovs-ofctl dump-flows br-int -OOpenFlow13
--- /dev/null
+source openrc admin admin
+glance image-create --name sf --disk-format vmdk --container-format bare --is-public True < /vagrant/SF.vmdk
+openstack flavor create custom --ram 1024 --disk 15 --public
--- /dev/null
+template_name: test-vnfd
+description: firewall-example
+
+service_properties:
+ Id: firewall-vnfd
+ vendor: tacker
+ version: 1
+ type:
+ - firewall
+vdus:
+ vdu1:
+ id: vdu1
+ vm_image: sf
+ instance_type: custom
+ service_type: firewall
+
+ network_interfaces:
+ management:
+ network: net_mgmt
+ management: true
+
+ placement_policy:
+ availability_zone: az-compute
+
+ auto-scaling: noop
+ monitoring_policy: noop
+ failure_policy: respawn
+
+ config:
+ param0: key0
+ param1: key1
--- /dev/null
+#!/usr/bin/python
+import argparse
+import requests, json
+from requests.auth import HTTPBasicAuth
+from subprocess import call
+import time
+import sys
+import os
+
+DEFAULT_PORT = '8181'
+
+USERNAME = 'admin'
+PASSWORD = 'admin'
+
+OPER_NODES = '/restconf/operational/opendaylight-inventory:nodes/'
+CONF_TENANT = '/restconf/config/policy:tenants'
+
+
+def get(host, port, uri):
+ url = 'http://' + host + ":" + port + uri
+ # print url
+ r = requests.get(url, auth=HTTPBasicAuth(USERNAME, PASSWORD))
+ jsondata = json.loads(r.text)
+ return jsondata
+
+
+def put(host, port, uri, data, debug=False):
+ '''Perform a PUT rest operation, using the URL and data provided'''
+
+ url = 'http://' + host + ":" + port + uri
+
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ if debug == True:
+ print "PUT %s" % url
+ print json.dumps(data, indent=4, sort_keys=True)
+ r = requests.put(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
+ if debug == True:
+ print r.text
+ r.raise_for_status()
+
+
+def post(host, port, uri, data, debug=False):
+ '''Perform a POST rest operation, using the URL and data provided'''
+
+ url = 'http://' + host + ":" + port + uri
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ if debug == True:
+ print "POST %s" % url
+ print json.dumps(data, indent=4, sort_keys=True)
+ r = requests.post(url, data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(USERNAME, PASSWORD))
+ if debug == True:
+ print r.text
+ r.raise_for_status()
+
+
+def get_service_functions_uri():
+ return "/restconf/config/service-function:service-functions"
+
+
+def get_service_functions_data():
+ return {
+ "service-functions": {
+ "service-function": [
+ {
+ "name": "firewall-72",
+ "ip-mgmt-address": "192.168.50.72",
+ "type": "service-function-type:firewall",
+ "nsh-aware": "true",
+ "sf-data-plane-locator": [
+ {
+ "name": "2",
+ "port": 6633,
+ "ip": "192.168.50.72",
+ "transport": "service-locator:vxlan-gpe",
+ "service-function-forwarder": "SFF1"
+ }
+ ]
+ },
+ {
+ "name": "dpi-74",
+ "ip-mgmt-address": "192.168.50.74",
+ "type": "service-function-type:dpi",
+ "nsh-aware": "true",
+ "sf-data-plane-locator": [
+ {
+ "name": "3",
+ "port": 6633,
+ "ip": "192.168.50.74",
+ "transport": "service-locator:vxlan-gpe",
+ "service-function-forwarder": "SFF2"
+ }
+ ]
+ }
+ ]
+ }
+ }
+
+
+def get_service_function_forwarders_uri():
+ return "/restconf/config/service-function-forwarder:service-function-forwarders"
+
+
+def get_service_function_forwarders_data():
+ return {
+ "service-function-forwarders": {
+ "service-function-forwarder": [
+ {
+ "name": "SFF1",
+ "service-node": "OVSDB2",
+ "service-function-forwarder-ovs:ovs-bridge": {
+ "bridge-name": "sw2"
+ },
+ "service-function-dictionary": [
+ {
+ "name": "firewall-72",
+ "type": "service-function-type:firewall",
+ "sff-sf-data-plane-locator": {
+ "port": 6633,
+ "ip": "192.168.50.71",
+ "transport": "service-locator:vxlan-gpe"
+ }
+ }
+ ],
+ "sff-data-plane-locator": [
+ {
+ "name": "sfc-tun2",
+ "data-plane-locator": {
+ "transport": "service-locator:vxlan-gpe",
+ "port": 6633,
+ "ip": "192.168.50.71"
+ },
+ "service-function-forwarder-ovs:ovs-options": {
+ "remote-ip": "flow",
+ "dst-port": "6633",
+ "key": "flow",
+ "nsp": "flow",
+ "nsi": "flow",
+ "nshc1": "flow",
+ "nshc2": "flow",
+ "nshc3": "flow",
+ "nshc4": "flow"
+ }
+ }
+ ]
+ },
+ {
+ "name": "SFF2",
+ "service-node": "OVSDB2",
+ "service-function-forwarder-ovs:ovs-bridge": {
+ "bridge-name": "sw4"
+ },
+ "service-function-dictionary": [
+ {
+ "name": "dpi-74",
+ "type": "service-function-type:dpi",
+ "sff-sf-data-plane-locator": {
+ "port": 6633,
+ "ip": "192.168.50.73",
+ "transport": "service-locator:vxlan-gpe"
+ }
+ }
+ ],
+ "sff-data-plane-locator": [
+ {
+ "name": "sfc-tun4",
+ "data-plane-locator": {
+ "transport": "service-locator:vxlan-gpe",
+ "port": 6633,
+ "ip": "192.168.50.73"
+ },
+ "service-function-forwarder-ovs:ovs-options": {
+ "remote-ip": "flow",
+ "dst-port": "6633",
+ "key": "flow",
+ "nsp": "flow",
+ "nsi": "flow",
+ "nshc1": "flow",
+ "nshc2": "flow",
+ "nshc3": "flow",
+ "nshc4": "flow"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ }
+
+
+def get_service_function_chains_uri():
+ return "/restconf/config/service-function-chain:service-function-chains/"
+
+
+def get_service_function_chains_data():
+ return {
+ "service-function-chains": {
+ "service-function-chain": [
+ {
+ "name": "SFCGBP",
+ "symmetric": "true",
+ "sfc-service-function": [
+ {
+ "name": "firewall-abstract1",
+ "type": "service-function-type:firewall"
+ },
+ {
+ "name": "dpi-abstract1",
+ "type": "service-function-type:dpi"
+ }
+ ]
+ }
+ ]
+ }
+ }
+
+
+def get_service_function_paths_uri():
+ return "/restconf/config/service-function-path:service-function-paths/"
+
+
+def get_service_function_paths_data():
+ return {
+ "service-function-paths": {
+ "service-function-path": [
+ {
+ "name": "SFCGBP-Path",
+ "service-chain-name": "SFCGBP",
+ "starting-index": 255,
+ "symmetric": "true"
+
+ }
+ ]
+ }
+ }
+
+
+def get_tenant_data():
+ return {
+ "policy:tenant": [
+ {
+ "id": "f5c7d344-d1c7-4208-8531-2c2693657e12",
+ "l2-flood-domain": [
+ {
+ "id": "393b4a3f-431e-476f-9674-832fb9f5fab9",
+ "parent": "7b796915-adf4-4356-b5ca-de005ac410c1"
+ },
+ {
+ "id": "4ae1198e-0380-427f-8386-28281672eca3",
+ "parent": "7b796915-adf4-4356-b5ca-de005ac410c1"
+ }
+ ],
+ "name": "DockerTenant",
+ "l3-context": [
+ {
+ "id": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "l2-bridge-domain": [
+ {
+ "id": "7b796915-adf4-4356-b5ca-de005ac410c1",
+ "parent": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "subnet": [
+ {
+ "id": "49850b5a-684d-4cc0-aafe-95d25c9a4b97",
+ "virtual-router-ip": "10.0.36.1",
+ "parent": "4ae1198e-0380-427f-8386-28281672eca3",
+ "ip-prefix": "10.0.36.1/24"
+ },
+ {
+ "id": "7f43a456-2c99-497b-9ecf-7169be0163b9",
+ "virtual-router-ip": "10.0.35.1",
+ "parent": "393b4a3f-431e-476f-9674-832fb9f5fab9",
+ "ip-prefix": "10.0.35.1/24"
+ }
+ ],
+ "endpoint-group": [
+ {
+ "id": "e593f05d-96be-47ad-acd5-ba81465680d5",
+ "network-domain": "49850b5a-684d-4cc0-aafe-95d25c9a4b97",
+ "name": "webservers",
+ "provider-named-selector": [
+ {
+ "name": "e593f05d-96be-47ad-acd5-ba81465680d5-1eaf9a67-a171-42a8-9282-71cf702f61dd-22282cca-9a13-4d0c-a67e-a933ebb0b0ae",
+ "contract": [
+ "22282cca-9a13-4d0c-a67e-a933ebb0b0ae"
+ ]
+ }
+ ]
+ },
+ {
+ "id": "1eaf9a67-a171-42a8-9282-71cf702f61dd",
+ "name": "clients",
+ "network-domain": "7f43a456-2c99-497b-9ecf-7169be0163b9",
+ "consumer-named-selector": [
+ {
+ "name": "e593f05d-96be-47ad-acd5-ba81465680d5-1eaf9a67-a171-42a8-9282-71cf702f61dd-22282cca-9a13-4d0c-a67e-a933ebb0b0ae",
+ "contract": [
+ "22282cca-9a13-4d0c-a67e-a933ebb0b0ae"
+ ]
+ }
+ ]
+ }
+ ],
+ "subject-feature-instances": {
+ "classifier-instance": [
+ {
+ "name": "icmp",
+ "classifier-definition-id": "79c6fdb2-1e1a-4832-af57-c65baf5c2335",
+ "parameter-value": [
+ {
+ "name": "proto",
+ "int-value": 1
+ }
+ ]
+ },
+ {
+ "name": "http-dest",
+ "classifier-definition-id": "4250ab32-e8b8-445a-aebb-e1bd2cdd291f",
+ "parameter-value": [
+ {
+ "int-value": "6",
+ "name": "proto"
+ },
+ {
+ "int-value": "80",
+ "name": "destport"
+ }
+ ]
+ },
+ {
+ "name": "http-src",
+ "classifier-definition-id": "4250ab32-e8b8-445a-aebb-e1bd2cdd291f",
+ "parameter-value": [
+ {
+ "int-value": "6",
+ "name": "proto"
+ },
+ {
+ "int-value": "80",
+ "name": "sourceport"
+ }
+ ]
+ }
+ ],
+ "action-instance": [
+ {
+ "name": "chain1",
+ "action-definition-id": "3d886be7-059f-4c4f-bbef-0356bea40933",
+ "parameter-value": [
+ {
+ "name": "sfc-chain-name",
+ "string-value": "SFCGBP"
+ }
+ ]
+ },
+ {
+ "name": "allow1",
+ "action-definition-id": "f942e8fd-e957-42b7-bd18-f73d11266d17"
+ }
+ ]
+ },
+ "contract": [
+ {
+ "id": "22282cca-9a13-4d0c-a67e-a933ebb0b0ae",
+ "subject": [
+ {
+ "name": "icmp-subject",
+ "rule": [
+ {
+ "name": "allow-icmp-rule",
+ "order": 0,
+ "classifier-ref": [
+ {
+ "name": "icmp",
+ "instance-name": "icmp"
+ }
+ ],
+ "action-ref": [
+ {
+ "name": "allow1",
+ "order": 0
+ }
+ ]
+ }
+
+ ]
+ },
+ {
+ "name": "http-subject",
+ "rule": [
+ {
+ "name": "http-chain-rule-in",
+ "classifier-ref": [
+ {
+ "name": "http-dest",
+ "instance-name": "http-dest",
+ "direction": "in"
+ }
+ ],
+ "action-ref": [
+ {
+ "name": "chain1",
+ "order": 0
+ }
+ ]
+ },
+ {
+ "name": "http-chain-rule-out",
+ "classifier-ref": [
+ {
+ "name": "http-src",
+ "instance-name": "http-src",
+ "direction": "out"
+ }
+ ],
+ "action-ref": [
+ {
+ "name": "chain1",
+ "order": 0
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "clause": [
+ {
+ "name": "icmp-http-clause",
+ "subject-refs": [
+ "icmp-subject",
+ "http-subject"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+
+def get_action_instance_data(chain):
+ return {
+ "name": "use-chain-" + chain,
+ "action-definition-id": "3d886be7-059f-4c4f-bbef-0356bea40933",
+ "parameter-value": [
+ {
+ "name": "sfc-chain-name",
+ "string-value": chain
+ }
+ ]
+ }
+
+
+# Main definition - constants
+
+# =======================
+# MENUS FUNCTIONS
+# =======================
+
+# Main menu
+
+# =======================
+# MAIN PROGRAM
+# =======================
+
+# Main Program
+
+def get_tenant_uri():
+ return "/restconf/config/policy:tenants/policy:tenant/f5c7d344-d1c7-4208-8531-2c2693657e12"
+
+
+def get_tunnel_data():
+ return {
+ "opendaylight-inventory:nodes": {
+ "node": [
+ {
+ "id": "openflow:1",
+ "ofoverlay:tunnel": [
+ {
+ "tunnel-type": "overlay:tunnel-type-vxlan-gpe",
+ "node-connector-id": "openflow:1:1",
+ "ip": "192.168.50.70",
+ "port": 6633
+ },
+ {
+ "tunnel-type": "overlay:tunnel-type-vxlan",
+ "node-connector-id": "openflow:1:2",
+ "ip": "192.168.50.70",
+ "port": 4789
+ }
+ ]
+ },
+ {
+ "id": "openflow:6",
+ "ofoverlay:tunnel": [
+ {
+ "tunnel-type": "overlay:tunnel-type-vxlan-gpe",
+ "node-connector-id": "openflow:6:1",
+ "ip": "192.168.50.75",
+ "port": 6633
+ },
+ {
+ "tunnel-type": "overlay:tunnel-type-vxlan",
+ "node-connector-id": "openflow:6:2",
+ "ip": "192.168.50.75",
+ "port": 4789
+ }
+ ]
+ }
+ ]
+ }
+ }
+
+
+def get_tunnel_uri():
+ return "/restconf/config/opendaylight-inventory:nodes"
+
+
+def get_endpoint_data():
+ return [
+ {
+ "input": {
+
+ "endpoint-group": "e593f05d-96be-47ad-acd5-ba81465680d5",
+
+ "network-containment": "49850b5a-684d-4cc0-aafe-95d25c9a4b97",
+
+ "l2-context": "7b796915-adf4-4356-b5ca-de005ac410c1",
+ "mac-address": "00:00:00:00:36:02",
+
+ "l3-address": [
+ {
+ "ip-address": "10.0.36.2",
+ "l3-context": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "port-name": "vethl-h36_2",
+ "tenant": "f5c7d344-d1c7-4208-8531-2c2693657e12"
+ }
+ },
+ {
+ "input": {
+ "endpoint-group": "1eaf9a67-a171-42a8-9282-71cf702f61dd",
+ "network-containment": "7f43a456-2c99-497b-9ecf-7169be0163b9",
+ "l2-context": "7b796915-adf4-4356-b5ca-de005ac410c1",
+ "mac-address": "00:00:00:00:35:02",
+ "l3-address": [
+ {
+ "ip-address": "10.0.35.2",
+ "l3-context": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "port-name": "vethl-h35_2",
+ "tenant": "f5c7d344-d1c7-4208-8531-2c2693657e12"
+ }
+ },
+ {
+ "input": {
+
+ "endpoint-group": "1eaf9a67-a171-42a8-9282-71cf702f61dd",
+
+ "network-containment": "7f43a456-2c99-497b-9ecf-7169be0163b9",
+
+ "l2-context": "7b796915-adf4-4356-b5ca-de005ac410c1",
+ "mac-address": "00:00:00:00:35:03",
+
+ "l3-address": [
+ {
+ "ip-address": "10.0.35.3",
+ "l3-context": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "port-name": "vethl-h35_3",
+ "tenant": "f5c7d344-d1c7-4208-8531-2c2693657e12"
+ }
+ },
+ {
+ "input": {
+
+ "endpoint-group": "e593f05d-96be-47ad-acd5-ba81465680d5",
+
+ "network-containment": "49850b5a-684d-4cc0-aafe-95d25c9a4b97",
+
+ "l2-context": "7b796915-adf4-4356-b5ca-de005ac410c1",
+ "mac-address": "00:00:00:00:36:03",
+
+ "l3-address": [
+ {
+ "ip-address": "10.0.36.3",
+ "l3-context": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "port-name": "vethl-h36_3",
+ "tenant": "f5c7d344-d1c7-4208-8531-2c2693657e12"
+ }
+ },
+ {
+ "input": {
+
+ "endpoint-group": "e593f05d-96be-47ad-acd5-ba81465680d5",
+
+ "network-containment": "49850b5a-684d-4cc0-aafe-95d25c9a4b97",
+
+ "l2-context": "7b796915-adf4-4356-b5ca-de005ac410c1",
+ "mac-address": "00:00:00:00:36:04",
+
+ "l3-address": [
+ {
+ "ip-address": "10.0.36.4",
+ "l3-context": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "port-name": "vethl-h36_4",
+ "tenant": "f5c7d344-d1c7-4208-8531-2c2693657e12"
+ }
+ },
+ {
+ "input": {
+
+ "endpoint-group": "1eaf9a67-a171-42a8-9282-71cf702f61dd",
+
+ "network-containment": "7f43a456-2c99-497b-9ecf-7169be0163b9",
+
+ "l2-context": "7b796915-adf4-4356-b5ca-de005ac410c1",
+ "mac-address": "00:00:00:00:35:04",
+
+ "l3-address": [
+ {
+ "ip-address": "10.0.35.4",
+ "l3-context": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "port-name": "vethl-h35_4",
+ "tenant": "f5c7d344-d1c7-4208-8531-2c2693657e12"
+ }
+ },
+ {
+ "input": {
+
+ "endpoint-group": "1eaf9a67-a171-42a8-9282-71cf702f61dd",
+
+ "network-containment": "7f43a456-2c99-497b-9ecf-7169be0163b9",
+
+ "l2-context": "7b796915-adf4-4356-b5ca-de005ac410c1",
+ "mac-address": "00:00:00:00:35:05",
+
+ "l3-address": [
+ {
+ "ip-address": "10.0.35.5",
+ "l3-context": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "port-name": "vethl-h35_5",
+ "tenant": "f5c7d344-d1c7-4208-8531-2c2693657e12"
+ }
+ },
+ {
+ "input": {
+
+ "endpoint-group": "e593f05d-96be-47ad-acd5-ba81465680d5",
+
+ "network-containment": "49850b5a-684d-4cc0-aafe-95d25c9a4b97",
+
+ "l2-context": "7b796915-adf4-4356-b5ca-de005ac410c1",
+ "mac-address": "00:00:00:00:36:05",
+
+ "l3-address": [
+ {
+ "ip-address": "10.0.36.5",
+ "l3-context": "cbe0cc07-b8ff-451d-8171-9eef002a8e80"
+ }
+ ],
+ "port-name": "vethl-h36_5",
+ "tenant": "f5c7d344-d1c7-4208-8531-2c2693657e12"
+ }
+ }]
+
+
+def get_endpoint_uri():
+ return "/restconf/operations/endpoint:register-endpoint"
+
+
+if __name__ == "__main__":
+ # Launch main menu
+
+
+ # Some sensible defaults
+ # controller=os.environ.get('ODL')
+ controller = sys.argv[1]
+ epg1 = sys.argv[2]
+ epg2 = sys.argv[3]
+ epgs = [epg1, epg2]
+ chainName = sys.argv[4]
+ actionName = "use-chain-" + chainName
+ if controller == None:
+ sys.exit("No controller set.")
+
+ tenants = get(controller, DEFAULT_PORT, CONF_TENANT)
+
+ correctTenant = False
+ for tenant in tenants['tenants']['tenant']:
+ #print "Processing", tenant['id'] # debug
+
+ for epg in tenant['policy']['endpoint-group']:
+ if 'name' in epg and (epg['name'] in epgs):
+ #print epg['name'] # debug
+ correctTenant = True
+ break
+ if correctTenant:
+ print "Found correct tenant", tenant['id'] # debug
+ # print tenant['subject-feature-instances']['action-instance']
+ if 'subject-feature-instances' not in tenant: continue
+ if 'action-instance' not in tenant['subject-feature-instances']: continue
+ tenant['subject-feature-instances']['action-instance'].append(get_action_instance_data(chainName))
+ #print tenant['subject-feature-instances']['action-instance']
+ for contract in tenant['contract']:
+ if 'description' in contract.keys():
+ for epg in epgs:
+ if epg in contract['description'] and "IPv4" in contract['subject'][0]['name']:
+ #print "EPG:", epg, " Description:", contract['description'], " Subject: ", contract['subject'][0]['name']
+
+ if contract['subject'][0]['rule'][0]['action-ref'][0]['name'] == "Allow":
+ contract['subject'][0]['rule'][0]['action-ref'][0]['name']=actionName
+ #print contract['subject'][0]['rule'][0]['action-ref']
+
+ print "sending tenant"
+ put(controller, DEFAULT_PORT, CONF_TENANT, tenants,False)
--- /dev/null
+[[local|localrc]]
+#enable_plugin networking-odl https://github.com/stackforge/networking-odl
+enable_plugin networking-odl https://github.com/flavio-fernandes/networking-odl lithiumkilo
+
+LOGFILE=stack.sh.log
+LOG_COLOR=False
+SCREEN_LOGDIR=/opt/stack/data/log
+#RECLONE=yes
+RECLONE=no
+
+disable_all_services
+enable_service n-cpu n-novnc n-cauth
+
+HOST_IP=192.168.50.21
+HOST_NAME=devstack-compute-1
+SERVICE_HOST=192.168.50.20
+SERVICE_HOST_NAME=devstack-control
+
+VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP
+VNCSERVER_LISTEN=0.0.0.0
+
+ODL_MODE=compute
+ODL_MGR_IP=$ODL
+ODL_PORT=8080
+ENABLE_TENANT_TUNNELS=True
+Q_ML2_TENANT_NETWORK_TYPE=vxlan
+
+FLOATING_RANGE=192.168.111.0/24
+FIXED_RANGE="90.1.1.0/24"
+PUBLIC_NETWORK_GATEWAY=192.168.111.254
+
+PUBLIC_INTERFACE=eth2
+PUBLIC_BRIDGE=br-int
+
+## Neutron options
+Q_USE_SECGROUP=True
+ENABLE_TENANT_VLANS=True
+TENANT_VLAN_RANGE=3001:4000
+PHYSICAL_NETWORK=dr-external
+OVS_PHYSICAL_BRIDGE=br-int
+
+ODL_PROVIDER_MAPPINGS=$PHYSICAL_NETWORK:$PUBLIC_INTERFACE
+
+
+# NOTE: Set the database type
+DATABASE_TYPE=mysql
+KEYSTONE_CATALOG_BACKEND=sql
+
+Q_HOST=$SERVICE_HOST
+MYSQL_HOST=$SERVICE_HOST
+RABBIT_HOST=$SERVICE_HOST
+GLANCE_HOSTPORT=$SERVICE_HOST:9292
+KEYSTONE_AUTH_HOST=$SERVICE_HOST
+KEYSTONE_SERVICE_HOST=$SERVICE_HOST
+
+MYSQL_PASSWORD=mysql
+RABBIT_PASSWORD=rabbit
+#QPID_PASSWORD=rabbit
+SERVICE_TOKEN=service
+SERVICE_PASSWORD=admin
+ADMIN_PASSWORD=admin
+
+BRANCH=stable/kilo
+GLANCE_BRANCH=$BRANCH
+HORIZON_BRANCH=$BRANCH
+KEYSTONE_BRANCH=$BRANCH
+NOVA_BRANCH=$BRANCH
+NEUTRON_BRANCH=$BRANCH
+SWIFT_BRANCH=$BRANCH
+##CLIFF_BRANCH=$BRANCH
+##TEMPEST_BRANCH=$BRANCH
+CINDER_BRANCH=$BRANCH
+HEAT_BRANCH=$BRANCH
+TROVE_BRANCH=$BRANCH
+CEILOMETER_BRANCH=$BRANCH
+
+
+ODL_L3=True
+#Q_ML2_L3_PLUGIN=networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin
+## For L3
+[[post-config]|/etc/neutron/l3_agent.ini]]
+[DEFAULT]
+interface_driver = neutron.agent.linux.interface.NullDriver
+
+
+[[post-config|$NOVA_CONF]]
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+quota_ram = 2048000
+
+[[post-config|$NOVA_CONF]]
+[DEFAULT]
+vnc_enabled=True
+novncproxy_base_url=http://192.168.50.20:6080/vnc_auto.html
+vncserver_listen=0.0.0.0
+vncserver_proxyclient_address=192.168.50.21
+skip_isolated_core_check=true
+max_cores=128
+cpu_allocation_ratio=16.0
+quota_cores=128
+quota_instances=500
+quota_floating_ips=1000
+quota_metadata_items=300
+quota_security_group_rules=200
+quota_security_groups=200
+quota_volumes=100
+scheduler_default_filters = AllHostsFilter
+[quota]
+skip_isolated_core_check=true
+max_cores=128
+cpu_allocation_ratio=16.0
+quota_cores=128
+quota_instances=500
+quota_floating_ips=1000
+quota_metadata_items=300
+quota_security_group_rules=200
+quota_security_groups=200
+quota_volumes=100
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+[[post-config|$CINDER_CONF]]
+[DEFAULT]
+quota_gigabytes=-1
+quota_snapshots=-1
+quota_volumes=-1
+[quota]
+quota_gigabytes=-1
+quota_snapshots=-1
+quota_volumes=-1
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+[[post-config|$NEUTRON_CONF]]
+[DEFAULT]
+quota_network = 100
+quota_subnet = 100
+quota_port = 500
+quota_router = 100
+quota_floatingip = 5000
+quota_security_group_rules=200
+quota_security_groups=200
+[quota]
+quota_network = 100
+quota_subnet = 100
+quota_port = 500
+quota_router = 100
+quota_floatingip = 5000
+quota_security_group_rules=200
+quota_security_groups=200
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+[[post-config|$GLANCE_API_CONF]]
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+
--- /dev/null
+[[local|localrc]]
+
+
+enable_plugin tacker https://github.com/trozet/tacker SFC_refactor
+enable_plugin networking-odl https://github.com/openstack/networking-odl stable/kilo
+#enable_plugin networking-odl https://github.com/flavio-fernandes/networking-odl lithiumkilo
+
+
+#IMAGE_URLS="http://uec-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img,http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img,http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-uec.tar.gz"
+## Tacker
+TACKERCLIENT_REPO=https://github.com/trozet/python-tackerclient.git
+TACKERCLIENT_BRANCH=SFC_refactor
+TACKERHORIZON_REPO=https://github.com/trozet/tacker-horizon.git
+BR_MGMT=br-int
+## end Tacker
+
+LOGFILE=stack.sh.log
+SCREEN_LOGDIR=/opt/stack/data/log
+LOG_COLOR=False
+RECLONE=yes
+#RECLONE=no
+
+enable_service n-novnc
+##enable_service n-cauth
+enable_service odl-compute odl-neutron
+
+enable_service q-svc
+enable_service q-dhcp
+enable_service q-meta
+enable_service n-cauth
+enable_service tacker
+
+disable_service q-vpn
+disable_service q-metering
+disable_service q-lbaas
+disable_service q-lbaasv2
+disable_service q-fwaas
+disable_service swift
+disable_service cinder
+disable_service n-net
+disable_service tempest
+
+HOST_IP=192.168.50.20
+HOST_NAME=devstack-control
+SERVICE_HOST=$HOST_IP
+SERVICE_HOST_NAME=$HOST_NAME
+
+NEUTRON_CREATE_INITIAL_NETWORKS=False
+Q_ML2_TENANT_NETWORK_TYPE=vxlan
+Q_USE_SECGROUP=True
+
+## Tacker
+PIP_USE_MIRRORS=False
+USE_GET_PIP=1
+# eTacker
+
+ODL_MODE=externalodl
+
+ODL_MGR_IP=$ODL
+ODL_PORT=8080
+
+ENABLE_TENANT_TUNNELS=True
+
+VNCSERVER_PROXYCLIENT_ADDRESS=${HOST_IP}
+VNCSERVER_LISTEN=0.0.0.0
+
+# un commented for further tacker testing #Commented out for Tacker testing
+
+## External networking
+FLOATING_RANGE=192.168.111.0/24
+FIXED_RANGE="90.1.1.0/24"
+PUBLIC_NETWORK_GATEWAY=192.168.111.254
+
+PUBLIC_INTERFACE=eth2
+PUBLIC_BRIDGE=br-int
+
+## Neutron options
+ENABLE_TENANT_VLANS=True
+TENANT_VLAN_RANGE=3001:4000
+PHYSICAL_NETWORK=dr-external
+OVS_PHYSICAL_BRIDGE=br-int
+ODL_PROVIDER_MAPPINGS=$PHYSICAL_NETWORK:$PUBLIC_INTERFACE
+
+# end Tacker testing block
+MYSQL_HOST=$SERVICE_HOST
+RABBIT_HOST=$SERVICE_HOST
+GLANCE_HOSTPORT=$SERVICE_HOST:9292
+KEYSTONE_AUTH_HOST=$SERVICE_HOST
+KEYSTONE_SERVICE_HOST=$SERVICE_HOST
+
+MYSQL_PASSWORD=mysql
+RABBIT_PASSWORD=rabbit
+SERVICE_TOKEN=service
+SERVICE_PASSWORD=admin
+ADMIN_PASSWORD=admin
+
+BRANCH=stable/kilo
+GLANCE_BRANCH=$BRANCH
+HORIZON_BRANCH=$BRANCH
+KEYSTONE_BRANCH=$BRANCH
+NOVA_BRANCH=$BRANCH
+NEUTRON_BRANCH=$BRANCH
+SWIFT_BRANCH=$BRANCH
+CINDER_BRANCH=$BRANCH
+HEAT_BRANCH=$BRANCH
+TROVE_BRANCH=$BRANCH
+CEILOMETER_BRANCH=$BRANCH
+
+ODL_L3=True
+## For L3
+[[post-config]|/etc/neutron/l3_agent.ini]]
+[DEFAULT]
+interface_driver = neutron.agent.linux.interface.NullDriver
+
+[[post-config|/etc/neutron/neutron.conf]]
+[DEFAULT]
+service_plugins = networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin
+
+[[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
+[agent]
+minimize_polling=True
+
+[[post-config|/etc/tacker/tacker.conf]]
+[DEFAULT]
+service_plugins = tacker.vm.plugin.VNFMPlugin,tacker.sfc.plugin.SFCPlugin,tacker.sfc_classifier.plugin.SFCCPlugin
+[servicevm]
+infra_driver = heat
+[servicevm_heat]
+stack_retries = 10
+stack_retry_wait = 30
+[sfc]
+infra_driver = opendaylight
+[sfc_opendaylight]
+ip = $ODL_MGR_IP
+port = $ODL_PORT
+username = admin
+password = admin
+
+[[post-config|$NOVA_CONF]]
+[DEFAULT]
+skip_isolated_core_check=true
+max_cores=128
+quota_ram = 2048000
+cpu_allocation_ratio=16.0
+quota_cores=128
+quota_instances=500
+quota_floating_ips=1000
+quota_metadata_items=300
+quota_security_group_rules=200
+quota_security_groups=200
+quota_volumes=100
+#scheduler_default_filters = AllHostsFilter
+[quota]
+skip_isolated_core_check=true
+max_cores=128
+cpu_allocation_ratio=16.0
+quota_cores=128
+quota_instances=500
+quota_floating_ips=1000
+quota_metadata_items=300
+quota_security_group_rules=200
+quota_security_groups=200
+quota_volumes=100
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+[[post-config|$CINDER_CONF]]
+[DEFAULT]
+quota_gigabytes=-1
+quota_snapshots=-1
+quota_volumes=-1
+[quota]
+quota_gigabytes=-1
+quota_snapshots=-1
+quota_volumes=-1
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+[[post-config|$NEUTRON_CONF]]
+[DEFAULT]
+quota_network = 100
+quota_subnet = 100
+quota_port = 500
+quota_router = 100
+quota_floatingip = 5000
+quota_security_group_rules=200
+quota_security_groups=200
+[quota]
+quota_network = 100
+quota_subnet = 100
+quota_port = 500
+quota_router = 100
+quota_floatingip = 5000
+quota_security_group_rules=200
+quota_security_groups=200
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+[[post-config|$GLANCE_API_CONF]]
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+
--- /dev/null
+#!/usr/bin/env bash
+
+TAP=$1
+IP=$2
+MAC=$3
+TDEST=`/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
+sudo ip route add $2/32 dev $1
+sudo arp -i $1 -s $2 $3
+
+echo "Flow mod TBD"
+sudo ovs-ofctl add-flow br-int "table=0,ip,nw_dst=$TDEST,actions=output:4" -OOpenFlow13
+resetcontroller.sh
--- /dev/null
+[[local|localrc]]
+
+enable_plugin tacker https://github.com/trozet/tacker SFC_refactor
+enable_plugin networking-odl https://github.com/openstack/networking-odl stable/kilo
+#enable_plugin tacker https://github.com/trozet/tacker SFC_refactor
+#enable_plugin networking-odl https://github.com/openstack/networking-odl stable/kilo
+#enable_plugin networking-odl https://github.com/flavio-fernandes/networking-odl lithiumkilo
+
+
+#IMAGE_URLS="http://uec-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img,http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img,http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-uec.tar.gz"
+## Tacker
+TACKERCLIENT_REPO=https://github.com/trozet/python-tackerclient.git
+TACKERCLIENT_BRANCH=SFC_refactor
+TACKERHORIZON_REPO=https://github.com/trozet/tacker-horizon.git
+BR_MGMT=br-int
+## end Tacker
+
+LOGFILE=stack.sh.log
+SCREEN_LOGDIR=/opt/stack/data/log
+LOG_COLOR=False
+#RECLONE=yes
+RECLONE=no
+
+enable_service n-novnc
+enable_service n-cauth
+enable_service odl-compute odl-neutron
+
+##enable_service q-svc
+##enable_service q-dhcp
+##enable_service q-meta
+##enable_service n-cauth
+enable_service tacker
+
+##disable_service q-vpn
+##disable_service q-metering
+##disable_service q-lbaas
+##disable_service q-lbaasv2
+##disable_service q-fwaas
+##disable_service swift
+##disable_service cinder
+##disable_service n-net
+##disable_service tempest
+
+HOST_IP=192.168.50.20
+HOST_NAME=devstack-control
+SERVICE_HOST=$HOST_IP
+SERVICE_HOST_NAME=$HOST_NAME
+
+NEUTRON_CREATE_INITIAL_NETWORKS=False
+Q_ML2_TENANT_NETWORK_TYPE=vxlan
+Q_USE_SECGROUP=True
+
+## Tacker
+PIP_USE_MIRRORS=False
+USE_GET_PIP=1
+# eTacker
+
+ODL_MODE=externalodl
+
+ODL_MGR_IP=$ODL
+ODL_PORT=8080
+
+ENABLE_TENANT_TUNNELS=True
+
+VNCSERVER_PROXYCLIENT_ADDRESS=${HOST_IP}
+VNCSERVER_LISTEN=0.0.0.0
+
+# un commented for further tacker testing #Commented out for Tacker testing
+
+## External networking
+FLOATING_RANGE=192.168.111.0/24
+FIXED_RANGE="90.1.1.0/24"
+PUBLIC_NETWORK_GATEWAY=192.168.111.254
+
+PUBLIC_INTERFACE=eth2
+PUBLIC_BRIDGE=br-int
+
+## Neutron options
+ENABLE_TENANT_VLANS=True
+TENANT_VLAN_RANGE=3001:4000
+PHYSICAL_NETWORK=dr-external
+OVS_PHYSICAL_BRIDGE=br-int
+ODL_PROVIDER_MAPPINGS=$PHYSICAL_NETWORK:$PUBLIC_INTERFACE
+
+# end Tacker testing block
+MYSQL_HOST=$SERVICE_HOST
+RABBIT_HOST=$SERVICE_HOST
+GLANCE_HOSTPORT=$SERVICE_HOST:9292
+KEYSTONE_AUTH_HOST=$SERVICE_HOST
+KEYSTONE_SERVICE_HOST=$SERVICE_HOST
+
+MYSQL_PASSWORD=mysql
+RABBIT_PASSWORD=rabbit
+SERVICE_TOKEN=service
+SERVICE_PASSWORD=admin
+ADMIN_PASSWORD=admin
+
+BRANCH=stable/kilo
+GLANCE_BRANCH=$BRANCH
+HORIZON_BRANCH=$BRANCH
+KEYSTONE_BRANCH=$BRANCH
+NOVA_BRANCH=$BRANCH
+NEUTRON_BRANCH=$BRANCH
+SWIFT_BRANCH=$BRANCH
+CINDER_BRANCH=$BRANCH
+HEAT_BRANCH=$BRANCH
+TROVE_BRANCH=$BRANCH
+CEILOMETER_BRANCH=$BRANCH
+
+ODL_L3=True
+## For L3
+[[post-config]|/etc/neutron/l3_agent.ini]]
+[DEFAULT]
+interface_driver = neutron.agent.linux.interface.NullDriver
+
+[[post-config|/etc/neutron/neutron.conf]]
+[DEFAULT]
+service_plugins = networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin
+
+[[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]]
+[agent]
+minimize_polling=True
+
+[[post-config|/etc/tacker/tacker.conf]]
+[DEFAULT]
+service_plugins = tacker.vm.plugin.VNFMPlugin,tacker.sfc.plugin.SFCPlugin,tacker.sfc_classifier.plugin.SFCCPlugin
+[servicevm]
+infra_driver = heat
+[servicevm_heat]
+stack_retries = 10
+stack_retry_wait = 30
+[sfc]
+infra_driver = opendaylight
+[sfc_opendaylight]
+ip = $ODL_MGR_IP
+port = $ODL_PORT
+username = admin
+password = admin
+
+[[post-config|$NOVA_CONF]]
+[DEFAULT]
+skip_isolated_core_check=true
+max_cores=128
+quota_ram = 2048000
+cpu_allocation_ratio=16.0
+quota_cores=128
+quota_instances=500
+quota_floating_ips=1000
+quota_metadata_items=300
+quota_security_group_rules=200
+quota_security_groups=200
+quota_volumes=100
+#scheduler_default_filters = AllHostsFilter
+[quota]
+skip_isolated_core_check=true
+max_cores=128
+cpu_allocation_ratio=16.0
+quota_cores=128
+quota_instances=500
+quota_floating_ips=1000
+quota_metadata_items=300
+quota_security_group_rules=200
+quota_security_groups=200
+quota_volumes=100
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+[[post-config|$CINDER_CONF]]
+[DEFAULT]
+quota_gigabytes=-1
+quota_snapshots=-1
+quota_volumes=-1
+[quota]
+quota_gigabytes=-1
+quota_snapshots=-1
+quota_volumes=-1
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+[[post-config|$NEUTRON_CONF]]
+[DEFAULT]
+quota_network = 100
+quota_subnet = 100
+quota_port = 500
+quota_router = 100
+quota_floatingip = 5000
+quota_security_group_rules=200
+quota_security_groups=200
+[quota]
+quota_network = 100
+quota_subnet = 100
+quota_port = 500
+quota_router = 100
+quota_floatingip = 5000
+quota_security_group_rules=200
+quota_security_groups=200
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+[[post-config|$GLANCE_API_CONF]]
+[oslo_messaging_rabbit]
+heartbeat_timeout_threshold = 0
+
--- /dev/null
+#!/usr/bin/env bash
+
+if [ -f "/home/vagrant/sfc01.lock" ]; then
+ echo "You have already run sfc01"
+ exit
+fi
+echo "writing lock file /home/vagrant/sfc01.lock"
+touch /home/vagrant/sfc01.lock
+
+#echo "Setting GBP table offset to 20"
+#sh curl --user admin:admin --header "Content-Type:application/json" -t PUT --data '{ "of-overlay-config": { "gbp-ofoverlay-table-offset": "19" }}' http://$ODL:8181/restconf/config/ofoverlay:of-overlay-config
+
+echo "Importing image for SF"
+/vagrant/devstack-scripts/importSF.sh
+
+echo "Making aggregates/availability zones under admin/admin:"
+source openrc admin admin
+
+nova hypervisor-list
+nova aggregate-create control az-control
+nova aggregate-add-host 1 devstack-control
+nova aggregate-create compute az-compute
+nova aggregate-add-host 2 devstack-compute-1
+
+#echo "Adding key pair cloudkey to admin and service tenants"
+#nova keypair-add --pub-key cloud.key.pub cloudkey
+#source openrc heat service
+#nova keypair-add --pub-key cloud.key.pub cloudkey
+
+
+echo "Making infrastructure (SecGrps, Networks, Router) for GBP managed workloads under admin/admin:"
+source openrc admin admin
+
+neutron security-group-create client_sg
+neutron security-group-rule-create client_sg --direction ingress --ethertype IPv4
+neutron security-group-rule-create client_sg --direction egress --ethertype IPv4
+
+neutron security-group-create web_sg
+neutron security-group-rule-create web_sg --direction ingress --ethertype IPv4
+neutron security-group-rule-create web_sg --direction egress --ethertype IPv4
+
+neutron net-create net1
+neutron subnet-create net1 10.1.1.0/24 --name sub1 --gateway 10.1.1.1 --dns-nameservers list=true 8.8.4.4 8.8.8.8
+
+neutron net-create net2
+neutron subnet-create net2 20.1.1.0/24 --name sub2 --gateway 20.1.1.1 --dns-nameservers list=true 8.8.4.4 8.8.8.8
+
+neutron router-create r1
+neutron router-interface-add r1 sub1
+neutron router-interface-add r1 sub2
+
+novaboot-control.sh net1 client_sg 1
+novaboot-control.sh net2 web_sg 2
+novaboot-control.sh net1 client_sg 2
+
+
+echo "control:"
+nova list --host devstack-control
+echo "compute:"
+nova list --host devstack-compute-1
+
+
--- /dev/null
+#!/usr/bin/env bash
+
+if [ -f "/home/vagrant/sfc02.lock" ]; then
+ echo "You have already run sfc02"
+ exit
+fi
+echo "writing lock file /home/vagrant/sfc02.lock"
+touch /home/vagrant/sfc02.lock
+
+echo "Making OOB management network and security groups for SF in heat/service:"
+source openrc heat service
+#neutron net-create sf_mgmt
+#neutron subnet-create sf_mgmt 30.1.1.0/24 --name sf_mgmt_sub --gateway 30.1.1.1
+
+#neutron security-group-create sf_mgmt
+#neutron security-group-rule-create sf_mgmt --direction ingress --ethertype IPv4
+#neutron security-group-rule-create sf_mgmt --direction egress --ethertype IPv4
+
+echo "Making SFC net_mgmt for inband SFC traffic:"
+neutron net-create net_mgmt #--provider:network_type=flat --provider:physical_network dr-external --router:external
+neutron subnet-create net_mgmt 11.0.0.0/24
+
+echo "Import VNFD for test-VNF:"
+tacker vnfd-create --vnfd-file /vagrant/devstack-scripts/sfc-random/test-vnfd.yaml
+
+echo "Deploy VNFs:"
+tacker vnf-create --name testVNF1 --vnfd-name test-vnfd
+#tacker vnf-create --name testVNF2 --vnfd-name test-vnfd
+
+
+
+
+echo "Wait a few minutes and then check VNF status is ACTIVE (tacker vnf-list) then execute the following commands on devstack-compute-1 BEFORE running sfc03.sh:
+
+sudo ovs-vsctl show
+sudo ovs-vsctl del-port vxlangpe-br-int
+sudo ovs-vsctl del-port vxlan-br-int
+sudo ovs-vsctl show
+
+- remove VNF tenant from DataStore, it borks PolEnf"
+
--- /dev/null
+#!/usr/bin/env bash
+
+if [ -f "/home/vagrant/sfc03.lock" ]; then
+ echo "You have already run sfc03"
+ exit
+fi
+echo "writing lock file /home/vagrant/sfc03.lock"
+touch /home/vagrant/sfc03.lock
+
+echo "Creating chain 'mychain' under heat/service:"
+source openrc heat service
+
+tacker sfc-create --name mychain --chain testVNF1 --symmetrical True
+tacker sfc-show mychain
+
+
+echo "Can verify flows by 'dumpflows.sh | grep nsp' on devstack-compute-1 and lack of them on devstack-control. Then run sfc04 to add chain to policy."
--- /dev/null
+#!/usr/bin/env bash
+
+if [ -f "/home/vagrant/sfc04.lock" ]; then
+ echo "You have already run sfc04"
+ exit
+fi
+echo "writing lock file /home/vagrant/sfc04.lock"
+touch /home/vagrant/sfc04.lock
+
+python /vagrant/devstack-scripts/sfc/chain.py $ODL client_sg web_sg mychain
+
+echo "Chain action added to policy.
+
+kernelmods.sh
+"
--- /dev/null
+echo "nova list --host devstack-control"
+nova list --host devstack-control
+echo "nova list --host devstack-compute-1"
+nova list --host devstack-compute-1