self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
- print ("elapsed time: %f ms" % self.msecs)
+ print("elapsed time: %f ms" % self.msecs)
class FlowConfigBlaster(object):
FlowConfigBlaster Statistics: a class that stores and further processes
statistics collected by Blaster worker threads during their execution.
"""
+
def __init__(self):
self.ok_rqst_rate = Counter(0.0)
self.total_rqst_rate = Counter(0.0)
"""
Calculates the stats for RESTCONF request and flow programming
throughput, and aggregates statistics across all Blaster threads.
+
+ Args:
+ rqst_stats: Request statistics dictionary
+ flow_stats: Flow statistcis dictionary
+ elapsed_time: Elapsed time for the test
+
+ Returns: Rates (requests/sec) for successfully finished requests,
+ the total number of requests, sucessfully installed flow and
+ the total number of flows
"""
ok_rqsts = rqst_stats[200] + rqst_stats[204]
total_rqsts = sum(rqst_stats.values())
FlowConfigBlaster instantiation. Flow templates are json-compatible
dictionaries that MUST contain elements for flow cookie, flow name,
flow id and the destination IPv4 address in the flow match field.
- :param flow_id: Id for the new flow to create
- :param ipaddr: IP Address to put into the flow's match
- :return: The newly created flow instance
+
+ Args:
+ flow_id: Id for the new flow to create
+ ipaddr: IP Address to put into the flow's match
+ node_id: ID of the node where to create the flow
+
+ Returns: The flow that gas been created from the template
+
"""
flow = copy.deepcopy(self.flow_mode_template['flow'][0])
flow['cookie'] = flow_id
- flow['flow-name'] = 'TestFlow-%d' % flow_id
+ flow['flow-name'] = self.create_flow_name(flow_id)
flow['id'] = str(flow_id)
flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ipaddr))
return flow
:param session: 'requests' session on which to perform the POST
:param node: The ID of the openflow node to which to post the flows
:param flow_list: List of flows (in dictionary form) to POST
+ :param flow_count: Flow counter for round-robin host load balancing
+
:return: status code from the POST operation
"""
flow_data = self.convert_to_json(flow_list, node)
hosts = self.host.split(",")
host = hosts[flow_count % len(hosts)]
flow_url = self.assemble_post_url(host, node)
- # print flow_url
if not self.auth:
r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, timeout=self.TIMEOUT)
fmod = dict(self.flow_mode_template)
fmod['flow'] = flow_list
flow_data = json.dumps(fmod)
- # print flow_data
return flow_data
def add_flows(self, start_flow_id, tid):
n_nodes = self.get_num_nodes(s)
with self.print_lock:
- print ' Thread %d:\n Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+ print(' Thread %d:\n Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes))
nflows = 0
+ nb_actions = []
+ while nflows < self.nflows:
+ node_id = randrange(1, n_nodes + 1)
+ flow_list = []
+ for i in range(self.fpr):
+ flow_id = tid * (self.ncycles * self.nflows) + nflows + start_flow_id + self.startflow
+ self.flows[tid][flow_id] = node_id
+ flow_list.append(self.create_flow_from_template(flow_id, self.ip_addr.increment(), node_id))
+ nflows += 1
+ if nflows >= self.nflows:
+ break
+ nb_actions.append((s, node_id, flow_list, nflows))
+
with Timer() as t:
- while nflows < self.nflows:
- node_id = randrange(1, n_nodes + 1)
- flow_list = []
- for i in range(self.fpr):
- flow_id = tid * (self.ncycles * self.nflows) + nflows + start_flow_id + self.startflow
- self.flows[tid][flow_id] = node_id
- flow_list.append(self.create_flow_from_template(flow_id, self.ip_addr.increment(), node_id))
- nflows += 1
- if nflows >= self.nflows:
- break
- sts = self.post_flows(s, node_id, flow_list, nflows)
+ for nb_action in nb_actions:
+ sts = self.post_flows(*nb_action)
try:
rqst_stats[sts] += 1
- flow_stats[sts] += len(flow_list)
+ flow_stats[sts] += len(nb_action[2])
except KeyError:
rqst_stats[sts] = 1
- flow_stats[sts] = len(flow_list)
+ flow_stats[sts] = len(nb_action[2])
ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, flow_stats, t.secs)
with self.print_lock:
- print '\n Thread %d results (ADD): ' % tid
- print ' Elapsed time: %.2fs,' % t.secs
- print ' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)
- print ' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps)
- print ' Stats ({Requests}, {Flows}): ',
- print rqst_stats,
- print flow_stats
+ print('\n Thread %d results (ADD): ' % tid)
+ print(' Elapsed time: %.2fs,' % t.secs)
+ print(' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps))
+ print(' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps))
+ print(' Stats ({Requests}, {Flows}): ')
+ print(rqst_stats,)
+ print(flow_stats)
self.threads_done += 1
s.close()
def delete_flow(self, session, node, flow_id, flow_count):
"""
Deletes a single flow from the ODL config data store using RESTCONF
- :param session: 'requests' session on which to perform the POST
- :param node: Id of the openflow node from which to delete the flow
- :param flow_id: ID of the to-be-deleted flow
- :return: status code from the DELETE operation
+ Args:
+ session: 'requests' session on which to perform the POST
+ node: Id of the openflow node from which to delete the flow
+ flow_id: ID of the to-be-deleted flow
+ flow_count: Index of the flow being processed (for round-robin LB)
+
+ Returns: status code from the DELETE operation
+
"""
hosts = self.host.split(",")
host = hosts[flow_count % len(hosts)]
flow_url = self.del_url_template % (host, node, flow_id)
- # print flow_url
if not self.auth:
r = session.delete(flow_url, headers=self.getheaders, timeout=self.TIMEOUT)
n_nodes = self.get_num_nodes(s)
with self.print_lock:
- print 'Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+ print('Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes))
with Timer() as t:
for flow in range(self.nflows):
ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, rqst_stats, t.secs)
with self.print_lock:
- print '\n Thread %d results (DELETE): ' % tid
- print ' Elapsed time: %.2fs,' % t.secs
- print ' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)
- print ' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps)
- print ' Stats ({Requests})',
- print rqst_stats
+ print('\n Thread %d results (DELETE): ' % tid)
+ print(' Elapsed time: %.2fs,' % t.secs)
+ print(' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps))
+ print(' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps))
+ print(' Stats ({Requests})',)
+ print(rqst_stats)
self.threads_done += 1
s.close()
for c in range(self.ncycles):
self.stats = self.FcbStats()
with self.print_lock:
- print '\nCycle %d:' % c
+ print('\nCycle %d:' % c)
threads = []
for i in range(self.nthreads):
thread.join()
with self.print_lock:
- print '\n*** Test summary:'
- print ' Elapsed time: %.2fs' % t.secs
- print ' Peak requests/s: %.2f OK, %.2f Total' % (
- self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())
- print ' Peak flows/s: %.2f OK, %.2f Total' % (
- self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())
- print ' Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+ print('\n*** Test summary:')
+ print(' Elapsed time: %.2fs' % t.secs)
+ print(' Peak requests/s: %.2f OK, %.2f Total' % (
+ self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate()))
+ print(' Peak flows/s: %.2f OK, %.2f Total' % (
+ self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate()))
+ print(' Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
self.stats.get_ok_rqsts() / t.secs,
self.stats.get_total_rqsts() / t.secs,
- (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate())
- print ' Avg. flows/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
- self.stats.get_ok_flows() / t.secs,
- self.stats.get_total_flows() / t.secs,
- (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate())
+ (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate()))
+ print(' Avg. flows/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+ self.stats.get_ok_flows() / t.secs,
+ self.stats.get_total_flows() / t.secs,
+ (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate()))
self.total_ok_flows += self.stats.get_ok_flows()
self.total_ok_rqsts += self.stats.get_ok_rqsts()
def get_ok_rqsts(self):
return self.total_ok_rqsts
+ def create_flow_name(self, flow_id):
+ return 'TestFlow-%d' % flow_id
+
def get_json_from_file(filename):
"""
keys = ft['flow'][0].keys()
if (u'cookie' in keys) and (u'flow-name' in keys) and (u'id' in keys) and (u'match' in keys):
if u'ipv4-destination' in ft[u'flow'][0]['match'].keys():
- print 'File "%s" ok to use as flow template' % filename
+ print('File "%s" ok to use as flow template' % filename)
return ft
except ValueError:
- print 'JSON parsing of file %s failed' % filename
+ print('JSON parsing of file %s failed' % filename)
pass
return None
+
###############################################################################
# This is an example of what the content of a JSON flow mode template should
# look like. Cut & paste to create a custom template. "id" and "ipv4-destination"
# <flows> are added from each thread
fct.add_blaster()
- print '\n*** Total flows added: %s' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows added: %s' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())
if in_args.delay > 0:
- print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+ print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
time.sleep(in_args.delay)
# Run through <cycles>, where <threads> are started in each cycle and
# <flows> previously added in an add cycle are deleted in each thread
if in_args.delete:
fct.delete_blaster()
- print '\n*** Total flows deleted: %s' % fct.get_ok_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
+ print('\n*** Total flows deleted: %s' % fct.get_ok_flows())
+ print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts())