def wait_for_stats(crawler, exp_found, timeout, delay):
"""
- Waits for the ODL stats manager to catch up. Polls ODL inventory every <delay> seconds and compares the
- retrieved stats to the expected values. If stats collection has not finished within <timeout> seconds,
- Gives up/
-
+ Waits for the ODL stats manager to catch up. Polls ODL inventory every
+ <delay> seconds and compares the retrieved stats to the expected values. If
+ stats collection has not finished within <timeout> seconds, the test is
+ aborted.
:param crawler: Inventory crawler object
:param exp_found: Expected value for flows found in the network
- :param timeout: Max number of seconds to wait for stats collector to collect all stats
+ :param timeout: Max number of seconds to wait for stats collector to
+ collect all stats
:param delay: poll interval for inventory
:return: None
"""
if __name__ == "__main__":
- ########################################################################################
- #This program executes an ODL performance test. The test is executed in three steps:
- #
- # 1. The specified number of flows is added in the 'add cycle' (uses flow_config_blaster to blast flows)
+ ############################################################################
+ # This program executes an ODL performance test. The test is executed in
+ # three steps:
#
- # 2. The network is polled for flow statistics from the network (using the inventory_crawler) to make sure
- # that all flows have been properly programmed into the network and the ODL statistics collector can
- # properly read them
- #
- # 3. The flows are deleted in the flow cycle. Deletion happens either in 'bulk' (using the config_cleanup)
- # script or one by one (using the flow_config_blaster 'delete' method)
- ########################################################################################
-
- JSON_FLOW_MOD1 = '''{
- "flow-node-inventory:flow": [
- {
- "flow-node-inventory:cookie": %d,
- "flow-node-inventory:cookie_mask": 4294967295,
- "flow-node-inventory:flow-name": "%s",
- "flow-node-inventory:hard-timeout": %d,
- "flow-node-inventory:id": "%s",
- "flow-node-inventory:idle-timeout": %d,
- "flow-node-inventory:installHw": false,
- "flow-node-inventory:instructions": {
- "flow-node-inventory:instruction": [
- {
- "flow-node-inventory:apply-actions": {
- "flow-node-inventory:action": [
- {
- "flow-node-inventory:drop-action": {},
- "flow-node-inventory:order": 0
- }
- ]
- },
- "flow-node-inventory:order": 0
- }
- ]
- },
- "flow-node-inventory:match": {
- "flow-node-inventory:ipv4-destination": "%s/32",
- "flow-node-inventory:ethernet-match": {
- "flow-node-inventory:ethernet-type": {
- "flow-node-inventory:type": 2048
- }
- }
- },
- "flow-node-inventory:priority": 2,
- "flow-node-inventory:strict": false,
- "flow-node-inventory:table_id": 0
- }
- ]
- }'''
+ # 1. The specified number of flows is added in the 'add cycle' (uses
+ # flow_config_blaster to blast flows)
+ # 2. The network is polled for flow statistics from the network (using the
+ # inventory_crawler.py script) to make sure that all flows have been
+ # properly programmed into the network and the ODL statistics collector
+ # can properly read them
+ # 3. The flows are deleted in the flow cycle. Deletion happens either in
+ # 'bulk' (using the config_cleanup) script or one by one (using the
+ # flow_config_blaster 'delete' method)
+ ############################################################################
parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
'into the config tree, as specified by optional parameters.')
parser.add_argument('--flows', type=int, default=10,
help='Number of flows that will be added/deleted by each worker thread in each cycle; '
'default 10')
- parser.add_argument('--nodes', type=int, default=16,
- help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
- 'flows will be evenly distributed (programmed) into connected nodes.')
+ parser.add_argument('--fpr', type=int, default=1,
+ help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
+ 'default 1')
parser.add_argument('--delay', type=int, default=2,
help='Time (seconds) to between inventory polls when waiting for stats to catch up; default=1')
parser.add_argument('--timeout', type=int, default=100,
if in_args.file != '':
flow_template = get_json_from_file(in_args.file)
else:
- flow_template = JSON_FLOW_MOD1
+ flow_template = None
ic = InventoryCrawler(in_args.host, in_args.port, 0, 'operational', in_args.auth, False)
- fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
- in_args.flows, in_args.startflow, in_args.auth, flow_template)
-
- # Get the baseline stats. Required in Step 3 to validate if the delete function gets the controller back to
- # the baseline
+ fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr,
+ 16, in_args.flows, in_args.startflow, in_args.auth)
+ # Get the baseline stats. Required in Step 3 to validate if the delete
+ # function gets the controller back to the baseline
ic.crawl_inventory()
reported = ic.reported_flows
found = ic.found_flows
print 'Baseline:'
- print ' Reported nodes: %d' % reported
- print ' Found nodes: %d' % found
+ print ' Reported flows: %d' % reported
+ print ' Found flows: %d' % found
- # Run through <CYCLES> add cycles, where <THREADS> threads are started in each cycle and <FLOWS> flows are
- # added from each thread
+ # Run through <CYCLES> add cycles, where <THREADS> threads are started in
+ # each cycle and <FLOWS> flows are added from each thread
fct.add_blaster()
- print '\n*** Total flows added: %s' % fct.get_total_flows()
- print ' HTTP[OK] results: %d\n' % fct.get_ok_flows()
+ print '\n*** Total flows added: %d' % fct.get_ok_flows()
+ print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
# Wait for stats to catch up
wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay)
- # Run through <CYCLES> delete cycles, where <THREADS> threads are started in each cycle and <FLOWS> flows
- # previously added in an add cycle are deleted in each thread
+ # Run through <CYCLES> delete cycles, where <THREADS> threads are started
+ # in each cycle and <FLOWS> flows previously added in an add cycle are
+ # deleted in each thread
if in_args.bulk_delete:
print '\nDeleting all flows in bulk:'
sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
else:
print '\nDeleting flows one by one\n ',
fct.delete_blaster()
+ print '\n*** Total flows deleted: %d' % fct.get_ok_flows()
+ print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()
# Wait for stats to catch up back to baseline
wait_for_stats(ic, found, in_args.timeout, in_args.delay)