def wait_for_stats(crawler, exp_found, timeout, delay):
"""
- Waits for the ODL stats manager to catch up. Polls ODL inventory every <delay> seconds and compares the
- retrieved stats to the expected values. If stats collection has not finished within <timeout> seconds,
- Gives up/
-
+ Waits for the ODL stats manager to catch up. Polls ODL inventory every
+ <delay> seconds and compares the retrieved stats to the expected values. If
+ stats collection has not finished within <timeout> seconds, the test is
+ aborted.
:param crawler: Inventory crawler object
:param exp_found: Expected value for flows found in the network
- :param timeout: Max number of seconds to wait for stats collector to collect all stats
+ :param timeout: Max number of seconds to wait for stats collector to
+ collect all stats
:param delay: poll interval for inventory
:return: None
"""
if __name__ == "__main__":
- ########################################################################################
- # This program executes an ODL performance test. The test is executed in three steps:
- #
- # 1. The specified number of flows is added in the 'add cycle' (uses flow_config_blaster to blast flows)
+ ############################################################################
+ # This program executes an ODL performance test. The test is executed in
+ # three steps:
#
- # 2. The network is polled for flow statistics from the network (using the inventory_crawler) to make sure
- # that all flows have been properly programmed into the network and the ODL statistics collector can
- # properly read them
- #
- # 3. The flows are deleted in the flow cycle. Deletion happens either in 'bulk' (using the config_cleanup)
- # script or one by one (using the flow_config_blaster 'delete' method)
- ########################################################################################
-
- JSON_FLOW_MOD1 = '''{
- "flow-node-inventory:flow": [
- {
- "flow-node-inventory:cookie": %d,
- "flow-node-inventory:cookie_mask": 4294967295,
- "flow-node-inventory:flow-name": "%s",
- "flow-node-inventory:hard-timeout": %d,
- "flow-node-inventory:id": "%s",
- "flow-node-inventory:idle-timeout": %d,
- "flow-node-inventory:installHw": false,
- "flow-node-inventory:instructions": {
- "flow-node-inventory:instruction": [
- {
- "flow-node-inventory:apply-actions": {
- "flow-node-inventory:action": [
- {
- "flow-node-inventory:drop-action": {},
- "flow-node-inventory:order": 0
- }
- ]
- },
- "flow-node-inventory:order": 0
- }
- ]
- },
- "flow-node-inventory:match": {
- "flow-node-inventory:ipv4-destination": "%s/32",
- "flow-node-inventory:ethernet-match": {
- "flow-node-inventory:ethernet-type": {
- "flow-node-inventory:type": 2048
- }
- }
- },
- "flow-node-inventory:priority": 2,
- "flow-node-inventory:strict": false,
- "flow-node-inventory:table_id": 0
- }
- ]
- }'''
+ # 1. The specified number of flows is added in the 'add cycle' (uses
+ # flow_config_blaster to blast flows)
+ # 2. The network is polled for flow statistics from the network (using the
+ # inventory_crawler.py script) to make sure that all flows have been
+ # properly programmed into the network and the ODL statistics collector
+ # can properly read them
+ # 3. The flows are deleted in the flow cycle. Deletion happens either in
+ # 'bulk' (using the config_cleanup) script or one by one (using the
+ # flow_config_blaster 'delete' method)
+ ############################################################################
parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
'into the config tree, as specified by optional parameters.')
parser.add_argument('--flows', type=int, default=10,
help='Number of flows that will be added/deleted by each worker thread in each cycle; '
'default 10')
- parser.add_argument('--nodes', type=int, default=16,
- help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
- 'flows will be evenly distributed (programmed) into connected nodes.')
+ parser.add_argument('--fpr', type=int, default=1,
+ help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
+ 'default 1')
parser.add_argument('--delay', type=int, default=2,
help='Time (seconds) to between inventory polls when waiting for stats to catch up; default=1')
parser.add_argument('--timeout', type=int, default=100,
if in_args.file != '':
flow_template = get_json_from_file(in_args.file)
else:
- flow_template = JSON_FLOW_MOD1
+ flow_template = None
ic = InventoryCrawler(in_args.host, in_args.port, 0, 'operational', in_args.auth, False)
- fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
- in_args.flows, in_args.startflow, in_args.auth, flow_template)
-
- # Get the baseline stats. Required in Step 3 to validate if the delete function gets the controller back to
- # the baseline
+ fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr,
+ 16, in_args.flows, in_args.startflow, in_args.auth)
+ # Get the baseline stats. Required in Step 3 to validate if the delete
+ # function gets the controller back to the baseline
ic.crawl_inventory()
reported = ic.reported_flows
found = ic.found_flows
print ' Reported nodes: %d' % reported
print ' Found nodes: %d' % found
- # Run through <CYCLES> add cycles, where <THREADS> threads are started in each cycle and <FLOWS> flows are
- # added from each thread
+ # Run through <CYCLES> add cycles, where <THREADS> threads are started in
+ # each cycle and <FLOWS> flows are added from each thread
fct.add_blaster()
- print '\n*** Total flows added: %s' % fct.get_total_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_flows()
+ print '\n*** Total flows added: %d' % fct.get_ok_flows()
+ print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
# Wait for stats to catch up
wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay)
- # Run through <CYCLES> delete cycles, where <THREADS> threads are started in each cycle and <FLOWS> flows
- # previously added in an add cycle are deleted in each thread
+ # Run through <CYCLES> delete cycles, where <THREADS> threads are started
+ # in each cycle and <FLOWS> flows previously added in an add cycle are
+ # deleted in each thread
if in_args.bulk_delete:
print '\nDeleting all flows in bulk:'
sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
else:
print '\nDeleting flows one by one\n ',
fct.delete_blaster()
+ print '\n*** Total flows deleted: %d' % fct.get_ok_flows()
+ print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
# Wait for stats to catch up back to baseline
wait_for_stats(ic, found, in_args.timeout, in_args.delay)
import time
import threading
import re
+import copy
import requests
import netaddr
getheaders = {'Accept': 'application/json'}
FLWURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+ TBLURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0"
INVURL = 'restconf/operational/opendaylight-inventory:nodes'
- ok_total = 0
-
flows = {}
- def __init__(self, host, port, ncycles, nthreads, nnodes, nflows, startflow, auth, json_template):
+ # The "built-in" flow template
+ flow_mode_template = {
+ u'flow': [
+ {
+ u'hard-timeout': 65000,
+ u'idle-timeout': 65000,
+ u'cookie_mask': 4294967295,
+ u'flow-name': u'FLOW-NAME-TEMPLATE',
+ u'priority': 2,
+ u'strict': False,
+ u'cookie': 0,
+ u'table_id': 0,
+ u'installHw': False,
+ u'id': u'FLOW-ID-TEMPLATE',
+ u'match': {
+ u'ipv4-destination': u'0.0.0.0/32',
+ u'ethernet-match': {
+ u'ethernet-type': {
+ u'type': 2048
+ }
+ }
+ },
+ u'instructions': {
+ u'instruction': [
+ {
+ u'order': 0,
+ u'apply-actions': {
+ u'action': [
+ {
+ u'drop-action': {},
+ u'order': 0
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+ ]
+ }
+
+ class FcbStats(object):
+ """
+ FlowConfigBlaster Statistics: a class that stores and further processes
+ statistics collected by Blaster worker threads during their execution.
+ """
+ def __init__(self):
+ self.ok_rqst_rate = Counter(0.0)
+ self.total_rqst_rate = Counter(0.0)
+ self.ok_flow_rate = Counter(0.0)
+ self.total_flow_rate = Counter(0.0)
+ self.ok_rqsts = Counter(0)
+ self.total_rqsts = Counter(0)
+ self.ok_flows = Counter(0)
+ self.total_flows = Counter(0)
+
+ def process_stats(self, rqst_stats, flow_stats, elapsed_time):
+ """
+ Calculates the stats for RESTCONF request and flow programming
+ throughput, and aggregates statistics across all Blaster threads.
+ """
+ ok_rqsts = rqst_stats[200] + rqst_stats[204]
+ total_rqsts = sum(rqst_stats.values())
+ ok_flows = flow_stats[200] + flow_stats[204]
+ total_flows = sum(flow_stats.values())
+
+ ok_rqst_rate = ok_rqsts / elapsed_time
+ total_rqst_rate = total_rqsts / elapsed_time
+ ok_flow_rate = ok_flows / elapsed_time
+ total_flow_rate = total_flows / elapsed_time
+
+ self.ok_rqsts.increment(ok_rqsts)
+ self.total_rqsts.increment(total_rqsts)
+ self.ok_flows.increment(ok_flows)
+ self.total_flows.increment(total_flows)
+
+ self.ok_rqst_rate.increment(ok_rqst_rate)
+ self.total_rqst_rate.increment(total_rqst_rate)
+ self.ok_flow_rate.increment(ok_flow_rate)
+ self.total_flow_rate.increment(total_flow_rate)
+
+ return ok_rqst_rate, total_rqst_rate, ok_flow_rate, total_flow_rate
+
+ def get_ok_rqst_rate(self):
+ return self.ok_rqst_rate.value
+
+ def get_total_rqst_rate(self):
+ return self.total_rqst_rate.value
+
+ def get_ok_flow_rate(self):
+ return self.ok_flow_rate.value
+
+ def get_total_flow_rate(self):
+ return self.total_flow_rate.value
+
+ def get_ok_rqsts(self):
+ return self.ok_rqsts.value
+
+ def get_total_rqsts(self):
+ return self.total_rqsts.value
+
+ def get_ok_flows(self):
+ return self.ok_flows.value
+
+ def get_total_flows(self):
+ return self.total_flows.value
+
+ def __init__(self, host, port, ncycles, nthreads, fpr, nnodes, nflows, startflow, auth, flow_mod_template=None):
self.host = host
self.port = port
self.ncycles = ncycles
self.nthreads = nthreads
+ self.fpr = fpr
self.nnodes = nnodes
self.nflows = nflows
self.startflow = startflow
self.auth = auth
- self.json_template = json_template
- self.url_template = 'http://' + self.host + ":" + self.port + '/' + self.FLWURL
+ if flow_mod_template:
+ self.flow_mode_template = flow_mod_template
+
+ self.post_url_template = 'http://' + self.host + ":" + self.port + '/' + self.TBLURL
+ self.del_url_template = 'http://' + self.host + ":" + self.port + '/' + self.FLWURL
- self.ok_rate = Counter(0.0)
- self.total_rate = Counter(0.0)
+ self.stats = self.FcbStats()
+ self.total_ok_flows = 0
+ self.total_ok_rqsts = 0
self.ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')) + startflow)
def get_num_nodes(self, session):
"""
- Determines the number of OF nodes in the connected mininet network. If mininet is not connected, the default
- number of flows is 16
+ Determines the number of OF nodes in the connected mininet network. If
+ mininet is not connected, the default number of flows is set to 16.
+ :param session: 'requests' session which to use to query the controller
+ for openflow nodes
+ :return: None
"""
inventory_url = 'http://' + self.host + ":" + self.port + '/' + self.INVURL
nodes = self.nnodes
return nodes
- def add_flow(self, session, node, flow_id, ipaddr):
+ def create_flow_from_template(self, flow_id, ipaddr):
+ """
+ Create a new flow instance from the flow template specified during
+ FlowConfigBlaster instantiation. Flow templates are json-compatible
+ dictionaries that MUST contain elements for flow cookie, flow name,
+ flow id and the destination IPv4 address in the flow match field.
+ :param flow_id: Id for the new flow to create
+ :param ipaddr: IP Address to put into the flow's match
+ :return: The newly created flow instance
"""
- Adds a single flow to the config data store via REST
+ flow = copy.deepcopy(self.flow_mode_template['flow'][0])
+ flow['cookie'] = flow_id
+ flow['flow-name'] = 'TestFlow-%d' % flow_id
+ flow['id'] = str(flow_id)
+ flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ipaddr))
+ return flow
+
+ def post_flows(self, session, node, flow_list):
"""
- flow_data = self.json_template % (flow_id, 'TestFlow-%d' % flow_id, 65000, str(flow_id), 65000,
- str(netaddr.IPAddress(ipaddr)))
+ Performs a RESTCONF post of flows passed in the 'flow_list' parameters
+ :param session: 'requests' session on which to perform the POST
+ :param node: The ID of the openflow node to which to post the flows
+ :param flow_list: List of flows (in dictionary form) to POST
+ :return: status code from the POST operation
+ """
+ fmod = dict(self.flow_mode_template)
+ fmod['flow'] = flow_list
+ flow_data = json.dumps(fmod)
# print flow_data
- flow_url = self.url_template % (node, flow_id)
+ flow_url = self.post_url_template % node
# print flow_url
if not self.auth:
- r = session.put(flow_url, data=flow_data, headers=self.putheaders, stream=False)
+ r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False)
else:
- r = session.put(flow_url, data=flow_data, headers=self.putheaders, stream=False, auth=('admin', 'admin'))
+ r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, auth=('admin', 'admin'))
return r.status_code
- def add_flows(self, start_flow, tid):
+ def add_flows(self, start_flow_id, tid):
"""
- Adds flows into the ODL config space. This function is executed by a worker thread
+ Adds flows into the ODL config data store. This function is executed by
+ a worker thread (the Blaster thread). The number of flows created and
+ the batch size (i.e. how many flows will be put into a RESTCONF request)
+ are determined by control parameters initialized when FlowConfigBlaster
+ is created.
+ :param start_flow_id - the ID of the first flow. Each Blaster thread
+ programs a different set of flows
+ :param tid: Thread ID - used to id the Blaster thread when statistics
+ for the thread are printed out
+ :return: None
"""
-
- add_res = {200: 0}
+ rqst_stats = {200: 0, 204: 0}
+ flow_stats = {200: 0, 204: 0}
s = requests.Session()
with self.print_lock:
print ' Thread %d:\n Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+ nflows = 0
with Timer() as t:
- for flow in range(self.nflows):
+ while nflows < self.nflows:
node_id = randrange(1, n_nodes + 1)
- flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
- self.flows[tid][flow_id] = node_id
- sts = self.add_flow(s, node_id, flow_id, self.ip_addr.increment())
+ flow_list = []
+ for i in range(self.fpr):
+ flow_id = tid * (self.ncycles * self.nflows) + nflows + start_flow_id + self.startflow
+ self.flows[tid][flow_id] = node_id
+ flow_list.append(self.create_flow_from_template(flow_id, self.ip_addr.increment()))
+ nflows += 1
+ if nflows >= self.nflows:
+ break
+ sts = self.post_flows(s, node_id, flow_list)
try:
- add_res[sts] += 1
+ rqst_stats[sts] += 1
+ flow_stats[sts] += len(flow_list)
except KeyError:
- add_res[sts] = 1
-
- add_time = t.secs
- add_ok_rate = add_res[200] / add_time
- add_total_rate = sum(add_res.values()) / add_time
+ rqst_stats[sts] = 1
+ flow_stats[sts] = len(flow_list)
- self.ok_rate.increment(add_ok_rate)
- self.total_rate.increment(add_total_rate)
+ ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, flow_stats, t.secs)
with self.print_lock:
- print ' Thread %d: ' % tid
- print ' Add time: %.2f,' % add_time
- print ' Add success rate: %.2f, Add total rate: %.2f' % (add_ok_rate, add_total_rate)
- print ' Add Results: ',
- print add_res
- self.ok_total += add_res[200]
+ print '\n Thread %d results (ADD): ' % tid
+ print ' Elapsed time: %.2fs,' % t.secs
+ print ' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)
+ print ' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps)
+ print ' Stats ({Requests}, {Flows}): ',
+ print rqst_stats,
+ print flow_stats
self.threads_done += 1
s.close()
def delete_flow(self, session, node, flow_id):
"""
- Deletes a single flow from the ODL config data store via REST
+ Deletes a single flow from the ODL config data store using RESTCONF
+ :param session: 'requests' session on which to perform the POST
+ :param node: Id of the openflow node from which to delete the flow
+ :param flow_id: ID of the to-be-deleted flow
+ :return: status code from the DELETE operation
"""
- flow_url = self.url_template % (node, flow_id)
+ flow_url = self.del_url_template % (node, flow_id)
if not self.auth:
r = session.delete(flow_url, headers=self.getheaders)
def delete_flows(self, start_flow, tid):
"""
- Deletes flow from the ODL config space that have been added using the 'add_flows()' function. This function is
- executed by a worker thread
+ Deletes flow from the ODL config space that have been added using the
+ 'add_flows()' function. This function is executed by a worker thread
+ :param start_flow_id - the ID of the first flow. Each Blaster thread
+ deletes a different set of flows
+ :param tid: Thread ID - used to id the Blaster thread when statistics
+ for the thread are printed out
+ :return:
"""
- del_res = {200: 0}
+ """
+ """
+ rqst_stats = {200: 0, 204: 0}
s = requests.Session()
n_nodes = self.get_num_nodes(s)
flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
sts = self.delete_flow(s, self.flows[tid][flow_id], flow_id)
try:
- del_res[sts] += 1
+ rqst_stats[sts] += 1
except KeyError:
- del_res[sts] = 1
-
- del_time = t.secs
+ rqst_stats[sts] = 1
- del_ok_rate = del_res[200] / del_time
- del_total_rate = sum(del_res.values()) / del_time
-
- self.ok_rate.increment(del_ok_rate)
- self.total_rate.increment(del_total_rate)
+ ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, rqst_stats, t.secs)
with self.print_lock:
- print ' Thread %d: ' % tid
- print ' Delete time: %.2f,' % del_time
- print ' Delete success rate: %.2f, Delete total rate: %.2f' % (del_ok_rate, del_total_rate)
- print ' Delete Results: ',
- print del_res
+ print '\n Thread %d results (DELETE): ' % tid
+ print ' Elapsed time: %.2fs,' % t.secs
+ print ' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)
+ print ' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps)
+ print ' Stats ({Requests})',
+ print rqst_stats
self.threads_done += 1
s.close()
def run_cycle(self, function):
"""
- Runs an add or delete cycle. Starts a number of worker threads that each add a bunch of flows. Work is done
- in context of the worker threads.
+ Runs a flow-add or flow-delete test cycle. Each test consists of a
+ <cycles> test cycles, where <threads> worker (Blaster) threads are
+ started in each test cycle. Each Blaster thread programs <flows>
+ OpenFlow flows into the controller using the controller's RESTCONF API.
+ :param function: Add or delete, determines what test will be executed.
+ :return: None
"""
+ self.total_ok_flows = 0
+ self.total_ok_rqsts = 0
for c in range(self.ncycles):
+ self.stats = self.FcbStats()
with self.print_lock:
print '\nCycle %d:' % c
self.cond.wait()
with self.print_lock:
- print ' Total success rate: %.2f, Total rate: %.2f' % (
- self.ok_rate.value, self.total_rate.value)
- measured_rate = (self.nthreads * self.nflows) / t.secs
- print ' Measured rate: %.2f (%.2f%% of Total success rate)' % \
- (measured_rate, measured_rate / self.total_rate.value * 100)
- print ' Measured time: %.2fs' % t.secs
+ print '\n*** Test summary:'
+ print ' Elapsed time: %.2fs' % t.secs
+ print ' Peak requests/s: %.2f OK, %.2f Total' % (
+ self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())
+ print ' Peak flows/s: %.2f OK, %.2f Total' % (
+ self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())
+ print ' Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+ self.stats.get_ok_rqsts() / t.secs,
+ self.stats.get_total_rqsts() / t.secs,
+ (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate())
+ print ' Avg. flows/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+ self.stats.get_ok_flows() / t.secs,
+ self.stats.get_total_flows() / t.secs,
+ (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate())
+
+ self.total_ok_flows += self.stats.get_ok_flows()
+ self.total_ok_rqsts += self.stats.get_ok_rqsts()
self.threads_done = 0
- self.ok_rate.value = 0
- self.total_rate.value = 0
-
def add_blaster(self):
self.run_cycle(self.add_flows)
def delete_blaster(self):
self.run_cycle(self.delete_flows)
- def get_total_flows(self):
- return sum(len(self.flows[key]) for key in self.flows.keys())
-
def get_ok_flows(self):
- return self.ok_total
+ return self.total_ok_flows
+
+ def get_ok_rqsts(self):
+ return self.total_ok_rqsts
def get_json_from_file(filename):
:return: The json flow template (string)
"""
with open(filename, 'r') as f:
- read_data = f.read()
- return read_data
-
-
-if __name__ == "__main__":
- JSON_FLOW_MOD1 = '''{
- "flow-node-inventory:flow": [
- {
- "flow-node-inventory:cookie": %d,
- "flow-node-inventory:cookie_mask": 4294967295,
- "flow-node-inventory:flow-name": "%s",
- "flow-node-inventory:hard-timeout": %d,
- "flow-node-inventory:id": "%s",
- "flow-node-inventory:idle-timeout": %d,
- "flow-node-inventory:installHw": false,
- "flow-node-inventory:instructions": {
- "flow-node-inventory:instruction": [
- {
- "flow-node-inventory:apply-actions": {
- "flow-node-inventory:action": [
- {
- "flow-node-inventory:drop-action": {},
- "flow-node-inventory:order": 0
- }
- ]
- },
- "flow-node-inventory:order": 0
- }
- ]
- },
- "flow-node-inventory:match": {
- "flow-node-inventory:ipv4-destination": "%s/32",
- "flow-node-inventory:ethernet-match": {
- "flow-node-inventory:ethernet-type": {
- "flow-node-inventory:type": 2048
+ try:
+ ft = json.load(f)
+ keys = ft['flow'][0].keys()
+ if (u'cookie' in keys) and (u'flow-name' in keys) and (u'id' in keys) and (u'match' in keys):
+ if u'ipv4-destination' in ft[u'flow'][0]['match'].keys():
+ print 'File "%s" ok to use as flow template' % filename
+ return ft
+ except ValueError:
+ print 'JSON parsing of file %s failed' % filename
+ pass
+
+ return None
+
+###############################################################################
+# This is an example of what the content of a JSON flow mode template should
+# look like. Cut & paste to create a custom template. "id" and "ipv4-destination"
+# MUST be unique if multiple flows will be programmed in the same test. It's
+# also beneficial to have unique "cookie" and "flow-name" attributes for easier
+# identification of the flow.
+###############################################################################
+example_flow_mod_json = '''{
+ "flow": [
+ {
+ "id": "38",
+ "cookie": 38,
+ "instructions": {
+ "instruction": [
+ {
+ "order": 0,
+ "apply-actions": {
+ "action": [
+ {
+ "order": 0,
+ "drop-action": { }
+ }
+ ]
}
}
+ ]
+ },
+ "hard-timeout": 65000,
+ "match": {
+ "ethernet-match": {
+ "ethernet-type": {
+ "type": 2048
+ }
},
- "flow-node-inventory:priority": 2,
- "flow-node-inventory:strict": false,
- "flow-node-inventory:table_id": 0
- }
- ]
- }'''
+ "ipv4-destination": "10.0.0.38/32"
+ },
+ "flow-name": "TestFlow-8",
+ "strict": false,
+ "cookie_mask": 4294967295,
+ "priority": 2,
+ "table_id": 0,
+ "idle-timeout": 65000,
+ "installHw": false
+ }
+
+ ]
+}'''
+if __name__ == "__main__":
+ ############################################################################
+ # This program executes the base performance test. The test adds flows into
+ # the controller's config space. This function is basically the CLI frontend
+ # to the FlowConfigBlaster class and drives its main functions: adding and
+ # deleting flows from the controller's config data store
+ ############################################################################
parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
'into the config tree, as specified by optional parameters.')
parser.add_argument('--flows', type=int, default=10,
help='Number of flows that will be added/deleted by each worker thread in each cycle; '
'default 10')
+ parser.add_argument('--fpr', type=int, default=1,
+ help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
+ 'default 1')
parser.add_argument('--nodes', type=int, default=16,
help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
'flows will be evenly distributed (programmed) into connected nodes.')
if in_args.file != '':
flow_template = get_json_from_file(in_args.file)
else:
- flow_template = JSON_FLOW_MOD1
+ flow_template = None
- fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
- in_args.flows, in_args.startflow, in_args.auth, flow_template)
+ fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr, in_args.nodes,
+ in_args.flows, in_args.startflow, in_args.auth)
- # Run through <cycles>, where <threads> are started in each cycle and <flows> are added from each thread
+ # Run through <cycles>, where <threads> are started in each cycle and
+ # <flows> are added from each thread
fct.add_blaster()
- print '\n*** Total flows added: %s' % fct.get_total_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_flows()
+ print '\n*** Total flows added: %s' % fct.get_ok_flows()
+ print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
if in_args.delay > 0:
print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
time.sleep(in_args.delay)
- # Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
- # deleted in each thread
+ # Run through <cycles>, where <threads> are started in each cycle and
+ # <flows> previously added in an add cycle are deleted in each thread
if in_args.delete:
fct.delete_blaster()
+ print '\n*** Total flows deleted: %s' % fct.get_ok_flows()
+ print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()