class FlowConfigBlaster(object):
- putheaders = {'content-type': 'application/json'}
- getheaders = {'Accept': 'application/json'}
+ putheaders = {"content-type": "application/json"}
+ getheaders = {"Accept": "application/json"}
- FLWURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+ FLWURL = (
+ "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+ )
TBLURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0"
- INVURL = 'restconf/operational/opendaylight-inventory:nodes'
+ INVURL = "restconf/operational/opendaylight-inventory:nodes"
TIMEOUT = 10
flows = {}
# The "built-in" flow template
flow_mode_template = {
- u'flow': [
+ u"flow": [
{
- u'hard-timeout': 65000,
- u'idle-timeout': 65000,
- u'cookie_mask': 4294967295,
- u'flow-name': u'FLOW-NAME-TEMPLATE',
- u'priority': 2,
- u'strict': False,
- u'cookie': 0,
- u'table_id': 0,
- u'installHw': False,
- u'id': u'FLOW-ID-TEMPLATE',
- u'match': {
- u'ipv4-destination': u'0.0.0.0/32',
- u'ethernet-match': {
- u'ethernet-type': {
- u'type': 2048
- }
- }
+ u"hard-timeout": 65000,
+ u"idle-timeout": 65000,
+ u"cookie_mask": 4294967295,
+ u"flow-name": u"FLOW-NAME-TEMPLATE",
+ u"priority": 2,
+ u"strict": False,
+ u"cookie": 0,
+ u"table_id": 0,
+ u"installHw": False,
+ u"id": u"FLOW-ID-TEMPLATE",
+ u"match": {
+ u"ipv4-destination": u"0.0.0.0/32",
+ u"ethernet-match": {u"ethernet-type": {u"type": 2048}},
},
- u'instructions': {
- u'instruction': [
+ u"instructions": {
+ u"instruction": [
{
- u'order': 0,
- u'apply-actions': {
- u'action': [
- {
- u'drop-action': {},
- u'order': 0
- }
- ]
- }
+ u"order": 0,
+ u"apply-actions": {
+ u"action": [{u"drop-action": {}, u"order": 0}]
+ },
}
]
- }
+ },
}
]
}
def get_total_flows(self):
return self.total_flows.value
- def __init__(self, host, port, ncycles, nthreads, fpr, nnodes, nflows, startflow, auth, flow_mod_template=None):
+ def __init__(
+ self,
+ host,
+ port,
+ ncycles,
+ nthreads,
+ fpr,
+ nnodes,
+ nflows,
+ startflow,
+ auth,
+ flow_mod_template=None,
+ ):
self.host = host
self.port = port
self.ncycles = ncycles
if flow_mod_template:
self.flow_mode_template = flow_mod_template
- self.post_url_template = 'http://%s:' + self.port + '/' + self.TBLURL
- self.del_url_template = 'http://%s:' + self.port + '/' + self.FLWURL
+ self.post_url_template = "http://%s:" + self.port + "/" + self.TBLURL
+ self.del_url_template = "http://%s:" + self.port + "/" + self.FLWURL
self.stats = self.FcbStats()
self.total_ok_flows = 0
self.total_ok_rqsts = 0
- self.ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')) + startflow)
+ self.ip_addr = Counter(int(netaddr.IPAddress("10.0.0.1")) + startflow)
self.print_lock = threading.Lock()
self.cond = threading.Condition()
"""
hosts = self.host.split(",")
host = hosts[0]
- inventory_url = 'http://' + host + ":" + self.port + '/' + self.INVURL
+ inventory_url = "http://" + host + ":" + self.port + "/" + self.INVURL
nodes = self.nnodes
if not self.auth:
- r = session.get(inventory_url, headers=self.getheaders, stream=False, timeout=self.TIMEOUT)
+ r = session.get(
+ inventory_url,
+ headers=self.getheaders,
+ stream=False,
+ timeout=self.TIMEOUT,
+ )
else:
- r = session.get(inventory_url, headers=self.getheaders, stream=False, auth=('admin', 'admin'),
- timeout=self.TIMEOUT)
+ r = session.get(
+ inventory_url,
+ headers=self.getheaders,
+ stream=False,
+ auth=("admin", "admin"),
+ timeout=self.TIMEOUT,
+ )
if r.status_code == 200:
try:
- inv = json.loads(r.content)['nodes']['node']
+ inv = json.loads(r.content)["nodes"]["node"]
nn = 0
for n in range(len(inv)):
- if re.search('openflow', inv[n]['id']) is not None:
+ if re.search("openflow", inv[n]["id"]) is not None:
nn += 1
if nn != 0:
nodes = nn
Returns: The flow that gas been created from the template
"""
- flow = copy.deepcopy(self.flow_mode_template['flow'][0])
- flow['cookie'] = flow_id
- flow['flow-name'] = self.create_flow_name(flow_id)
- flow['id'] = str(flow_id)
- flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ipaddr))
+ flow = copy.deepcopy(self.flow_mode_template["flow"][0])
+ flow["cookie"] = flow_id
+ flow["flow-name"] = self.create_flow_name(flow_id)
+ flow["id"] = str(flow_id)
+ flow["match"]["ipv4-destination"] = "%s/32" % str(netaddr.IPAddress(ipaddr))
return flow
def post_flows(self, session, node, flow_list, flow_count):
flow_url = self.assemble_post_url(host, node)
if not self.auth:
- r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, timeout=self.TIMEOUT)
+ r = session.post(
+ flow_url,
+ data=flow_data,
+ headers=self.putheaders,
+ stream=False,
+ timeout=self.TIMEOUT,
+ )
else:
- r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, auth=('admin', 'admin'),
- timeout=self.TIMEOUT)
+ r = session.post(
+ flow_url,
+ data=flow_data,
+ headers=self.putheaders,
+ stream=False,
+ auth=("admin", "admin"),
+ timeout=self.TIMEOUT,
+ )
return r.status_code
:return: string containing plain json
"""
fmod = dict(self.flow_mode_template)
- fmod['flow'] = flow_list
+ fmod["flow"] = flow_list
flow_data = json.dumps(fmod)
return flow_data
n_nodes = self.get_num_nodes(s)
with self.print_lock:
- print(' Thread %d:\n Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes))
+ print(
+ " Thread %d:\n Adding %d flows on %d nodes"
+ % (tid, self.nflows, n_nodes)
+ )
nflows = 0
nb_actions = []
node_id = randrange(1, n_nodes + 1)
flow_list = []
for i in range(self.fpr):
- flow_id = tid * (self.ncycles * self.nflows) + nflows + start_flow_id + self.startflow
+ flow_id = (
+ tid * (self.ncycles * self.nflows)
+ + nflows
+ + start_flow_id
+ + self.startflow
+ )
self.flows[tid][flow_id] = node_id
- flow_list.append(self.create_flow_from_template(flow_id, self.ip_addr.increment(), node_id))
+ flow_list.append(
+ self.create_flow_from_template(
+ flow_id, self.ip_addr.increment(), node_id
+ )
+ )
nflows += 1
if nflows >= self.nflows:
break
rqst_stats[sts] = 1
flow_stats[sts] = len(nb_action[2])
- ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, flow_stats, t.secs)
+ ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(
+ rqst_stats, flow_stats, t.secs
+ )
with self.print_lock:
- print('\n Thread %d results (ADD): ' % tid)
- print(' Elapsed time: %.2fs,' % t.secs)
- print(' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps))
- print(' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps))
- print(' Stats ({Requests}, {Flows}): ')
- print(rqst_stats,)
+ print("\n Thread %d results (ADD): " % tid)
+ print(" Elapsed time: %.2fs," % t.secs)
+ print(" Requests/s: %.2f OK, %.2f Total" % (ok_rps, total_rps))
+ print(" Flows/s: %.2f OK, %.2f Total" % (ok_fps, total_fps))
+ print(" Stats ({Requests}, {Flows}): ")
+ print(rqst_stats)
print(flow_stats)
self.threads_done += 1
if not self.auth:
r = session.delete(flow_url, headers=self.getheaders, timeout=self.TIMEOUT)
else:
- r = session.delete(flow_url, headers=self.getheaders, auth=('admin', 'admin'), timeout=self.TIMEOUT)
+ r = session.delete(
+ flow_url,
+ headers=self.getheaders,
+ auth=("admin", "admin"),
+ timeout=self.TIMEOUT,
+ )
return r.status_code
n_nodes = self.get_num_nodes(s)
with self.print_lock:
- print('Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes))
+ print(
+ "Thread %d: Deleting %d flows on %d nodes" % (tid, self.nflows, n_nodes)
+ )
with Timer() as t:
for flow in range(self.nflows):
- flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
+ flow_id = (
+ tid * (self.ncycles * self.nflows)
+ + flow
+ + start_flow
+ + self.startflow
+ )
sts = self.delete_flow(s, self.flows[tid][flow_id], flow_id, flow)
try:
rqst_stats[sts] += 1
except KeyError:
rqst_stats[sts] = 1
- ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, rqst_stats, t.secs)
+ ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(
+ rqst_stats, rqst_stats, t.secs
+ )
with self.print_lock:
- print('\n Thread %d results (DELETE): ' % tid)
- print(' Elapsed time: %.2fs,' % t.secs)
- print(' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps))
- print(' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps))
- print(' Stats ({Requests})',)
+ print("\n Thread %d results (DELETE): " % tid)
+ print(" Elapsed time: %.2fs," % t.secs)
+ print(" Requests/s: %.2f OK, %.2f Total" % (ok_rps, total_rps))
+ print(" Flows/s: %.2f OK, %.2f Total" % (ok_fps, total_fps))
+ print(" Stats ({Requests})")
print(rqst_stats)
self.threads_done += 1
for c in range(self.ncycles):
self.stats = self.FcbStats()
with self.print_lock:
- print('\nCycle %d:' % c)
+ print("\nCycle %d:" % c)
threads = []
for i in range(self.nthreads):
thread.join()
with self.print_lock:
- print('\n*** Test summary:')
- print(' Elapsed time: %.2fs' % t.secs)
- print(' Peak requests/s: %.2f OK, %.2f Total' % (
- self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate()))
- print(' Peak flows/s: %.2f OK, %.2f Total' % (
- self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate()))
- print(' Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
- self.stats.get_ok_rqsts() / t.secs,
- self.stats.get_total_rqsts() / t.secs,
- (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate()))
- print(' Avg. flows/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
- self.stats.get_ok_flows() / t.secs,
- self.stats.get_total_flows() / t.secs,
- (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate()))
+ print("\n*** Test summary:")
+ print(" Elapsed time: %.2fs" % t.secs)
+ print(
+ " Peak requests/s: %.2f OK, %.2f Total"
+ % (self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())
+ )
+ print(
+ " Peak flows/s: %.2f OK, %.2f Total"
+ % (self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())
+ )
+ print(
+ " Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)"
+ % (
+ self.stats.get_ok_rqsts() / t.secs,
+ self.stats.get_total_rqsts() / t.secs,
+ (self.stats.get_total_rqsts() / t.secs * 100)
+ / self.stats.get_total_rqst_rate(),
+ )
+ )
+ print(
+ " Avg. flows/s: %.2f OK, %.2f Total (%.2f%% of peak total)"
+ % (
+ self.stats.get_ok_flows() / t.secs,
+ self.stats.get_total_flows() / t.secs,
+ (self.stats.get_total_flows() / t.secs * 100)
+ / self.stats.get_total_flow_rate(),
+ )
+ )
self.total_ok_flows += self.stats.get_ok_flows()
self.total_ok_rqsts += self.stats.get_ok_rqsts()
return self.total_ok_rqsts
def create_flow_name(self, flow_id):
- return 'TestFlow-%d' % flow_id
+ return "TestFlow-%d" % flow_id
def get_json_from_file(filename):
:param filename: File from which to get the template
:return: The json flow template (string)
"""
- with open(filename, 'r') as f:
+ with open(filename, "r") as f:
try:
ft = json.load(f)
- keys = ft['flow'][0].keys()
- if (u'cookie' in keys) and (u'flow-name' in keys) and (u'id' in keys) and (u'match' in keys):
- if u'ipv4-destination' in ft[u'flow'][0]['match'].keys():
+ keys = ft["flow"][0].keys()
+ if (
+ (u"cookie" in keys)
+ and (u"flow-name" in keys)
+ and (u"id" in keys)
+ and (u"match" in keys)
+ ):
+ if u"ipv4-destination" in ft[u"flow"][0]["match"].keys():
print('File "%s" ok to use as flow template' % filename)
return ft
except ValueError:
- print('JSON parsing of file %s failed' % filename)
+ print("JSON parsing of file %s failed" % filename)
pass
return None
# also beneficial to have unique "cookie" and "flow-name" attributes for easier
# identification of the flow.
###############################################################################
-example_flow_mod_json = '''{
+example_flow_mod_json = """{
"flow": [
{
"id": "38",
}
]
-}'''
+}"""
def create_arguments_parser():
Shorthand to arg parser on library level in order to access and eventually enhance in ancestors.
:return: argument parser supporting config blaster arguments and parameters
"""
- my_parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then'
- ' deletes flows into the config tree, as specified by'
- ' optional parameters.')
-
- my_parser.add_argument('--host', default='127.0.0.1',
- help='Host where odl controller is running (default is 127.0.0.1). '
- 'Specify a comma-separated list of hosts to perform round-robin load-balancing.')
- my_parser.add_argument('--port', default='8181',
- help='Port on which odl\'s RESTCONF is listening (default is 8181)')
- my_parser.add_argument('--cycles', type=int, default=1,
- help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are '
- 'performed in cycles. <THREADS> worker threads are started in each cycle and the cycle '
- 'ends when all threads finish. Another cycle is started when the previous cycle '
- 'finished.')
- my_parser.add_argument('--threads', type=int, default=1,
- help='Number of request worker threads to start in each cycle; default=1. '
- 'Each thread will add/delete <FLOWS> flows.')
- my_parser.add_argument('--flows', type=int, default=10,
- help='Number of flows that will be added/deleted by each worker thread in each cycle; '
- 'default 10')
- my_parser.add_argument('--fpr', type=int, default=1,
- help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
- 'default 1')
- my_parser.add_argument('--nodes', type=int, default=16,
- help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
- 'flows will be evenly distributed (programmed) into connected nodes.')
- my_parser.add_argument('--delay', type=int, default=0,
- help='Time (in seconds) to wait between the add and delete cycles; default=0')
- my_parser.add_argument('--delete', dest='delete', action='store_true', default=True,
- help='Delete all added flows one by one, benchmark delete '
- 'performance.')
- my_parser.add_argument('--no-delete', dest='delete', action='store_false',
- help='Do not perform the delete cycle.')
- my_parser.add_argument('--auth', dest='auth', action='store_true', default=False,
- help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
- 'default: no authentication')
- my_parser.add_argument('--startflow', type=int, default=0,
- help='The starting Flow ID; default=0')
- my_parser.add_argument('--file', default='',
- help='File from which to read the JSON flow template; default: no file, use a built in '
- 'template.')
+ my_parser = argparse.ArgumentParser(
+ description="Flow programming performance test: First adds and then"
+ " deletes flows into the config tree, as specified by"
+ " optional parameters."
+ )
+
+ my_parser.add_argument(
+ "--host",
+ default="127.0.0.1",
+ help="Host where odl controller is running (default is 127.0.0.1). "
+ "Specify a comma-separated list of hosts to perform round-robin load-balancing.",
+ )
+ my_parser.add_argument(
+ "--port",
+ default="8181",
+ help="Port on which odl's RESTCONF is listening (default is 8181)",
+ )
+ my_parser.add_argument(
+ "--cycles",
+ type=int,
+ default=1,
+ help="Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are "
+ "performed in cycles. <THREADS> worker threads are started in each cycle and the cycle "
+ "ends when all threads finish. Another cycle is started when the previous cycle "
+ "finished.",
+ )
+ my_parser.add_argument(
+ "--threads",
+ type=int,
+ default=1,
+ help="Number of request worker threads to start in each cycle; default=1. "
+ "Each thread will add/delete <FLOWS> flows.",
+ )
+ my_parser.add_argument(
+ "--flows",
+ type=int,
+ default=10,
+ help="Number of flows that will be added/deleted by each worker thread in each cycle; "
+ "default 10",
+ )
+ my_parser.add_argument(
+ "--fpr",
+ type=int,
+ default=1,
+ help="Flows-per-Request - number of flows (batch size) sent in each HTTP request; "
+ "default 1",
+ )
+ my_parser.add_argument(
+ "--nodes",
+ type=int,
+ default=16,
+ help="Number of nodes if mininet is not connected; default=16. If mininet is connected, "
+ "flows will be evenly distributed (programmed) into connected nodes.",
+ )
+ my_parser.add_argument(
+ "--delay",
+ type=int,
+ default=0,
+ help="Time (in seconds) to wait between the add and delete cycles; default=0",
+ )
+ my_parser.add_argument(
+ "--delete",
+ dest="delete",
+ action="store_true",
+ default=True,
+ help="Delete all added flows one by one, benchmark delete " "performance.",
+ )
+ my_parser.add_argument(
+ "--no-delete",
+ dest="delete",
+ action="store_false",
+ help="Do not perform the delete cycle.",
+ )
+ my_parser.add_argument(
+ "--auth",
+ dest="auth",
+ action="store_true",
+ default=False,
+ help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
+ "default: no authentication",
+ )
+ my_parser.add_argument(
+ "--startflow", type=int, default=0, help="The starting Flow ID; default=0"
+ )
+ my_parser.add_argument(
+ "--file",
+ default="",
+ help="File from which to read the JSON flow template; default: no file, use a built in "
+ "template.",
+ )
return my_parser
parser = create_arguments_parser()
in_args = parser.parse_args()
- if in_args.file != '':
+ if in_args.file != "":
flow_template = get_json_from_file(in_args.file)
else:
flow_template = None
- fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr, in_args.nodes,
- in_args.flows, in_args.startflow, in_args.auth)
+ fct = FlowConfigBlaster(
+ in_args.host,
+ in_args.port,
+ in_args.cycles,
+ in_args.threads,
+ in_args.fpr,
+ in_args.nodes,
+ in_args.flows,
+ in_args.startflow,
+ in_args.auth,
+ )
# Run through <cycles>, where <threads> are started in each cycle and
# <flows> are added from each thread
fct.add_blaster()
- print('\n*** Total flows added: %s' % fct.get_ok_flows())
- print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
+ print("\n*** Total flows added: %s" % fct.get_ok_flows())
+ print(" HTTP[OK] results: %d\n" % fct.get_ok_rqsts())
if in_args.delay > 0:
- print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
+ print(
+ "*** Waiting for %d seconds before the delete cycle ***\n" % in_args.delay
+ )
time.sleep(in_args.delay)
# Run through <cycles>, where <threads> are started in each cycle and
# <flows> previously added in an add cycle are deleted in each thread
if in_args.delete:
fct.delete_blaster()
- print('\n*** Total flows deleted: %s' % fct.get_ok_flows())
- print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
+ print("\n*** Total flows deleted: %s" % fct.get_ok_flows())
+ print(" HTTP[OK] results: %d\n" % fct.get_ok_rqsts())