X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=test%2Ftools%2Fodl-mdsal-clustering-tests%2Fclustering-performance-test%2Fflow_add_delete_test.py;h=8f7215cbad5aa6f0a2ba0ae08fae992f3ae22cc8;hb=072f6e3a8d1bdf8f4c663843589c22d93ba07791;hp=e191a15b62b922c98e5934af4ac0755f67fbe8d3;hpb=603c0e9a39a2c75dec35691e0c216ef6b2f82c37;p=integration%2Ftest.git diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py index e191a15b62..8f7215cbad 100755 --- a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py +++ b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py @@ -9,50 +9,53 @@ import argparse import time from flow_config_blaster import FlowConfigBlaster, get_json_from_file from inventory_crawler import InventoryCrawler -from config_cleanup import cleanup_config +from config_cleanup import cleanup_config_odl + + +def wait_for_stats(crawler, exp_found, timeout, delay): + """ + Waits for the ODL stats manager to catch up. Polls ODL inventory every + seconds and compares the retrieved stats to the expected values. If + stats collection has not finished within seconds, the test is + aborted. + :param crawler: Inventory crawler object + :param exp_found: Expected value for flows found in the network + :param timeout: Max number of seconds to wait for stats collector to + collect all stats + :param delay: poll interval for inventory + :return: None + """ + total_delay = 0 + print 'Waiting for stats to catch up:' + while True: + crawler.crawl_inventory() + print ' %d, %d' % (crawler.reported_flows, crawler.found_flows) + if crawler.found_flows == exp_found or total_delay > timeout: + break + total_delay += delay + time.sleep(delay) + + if total_delay < timeout: + print 'Stats collected in %d seconds.' % total_delay + else: + print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay if __name__ == "__main__": - - JSON_FLOW_MOD1 = '''{ - "flow-node-inventory:flow": [ - { - "flow-node-inventory:cookie": %d, - "flow-node-inventory:cookie_mask": 65535, - "flow-node-inventory:flow-name": "%s", - "flow-node-inventory:hard-timeout": %d, - "flow-node-inventory:id": "%s", - "flow-node-inventory:idle-timeout": %d, - "flow-node-inventory:installHw": false, - "flow-node-inventory:instructions": { - "flow-node-inventory:instruction": [ - { - "flow-node-inventory:apply-actions": { - "flow-node-inventory:action": [ - { - "flow-node-inventory:drop-action": {}, - "flow-node-inventory:order": 0 - } - ] - }, - "flow-node-inventory:order": 0 - } - ] - }, - "flow-node-inventory:match": { - "flow-node-inventory:ipv4-destination": "%s/32", - "flow-node-inventory:ethernet-match": { - "flow-node-inventory:ethernet-type": { - "flow-node-inventory:type": 2048 - } - } - }, - "flow-node-inventory:priority": 2, - "flow-node-inventory:strict": false, - "flow-node-inventory:table_id": 0 - } - ] - }''' + ############################################################################ + # This program executes an ODL performance test. The test is executed in + # three steps: + # + # 1. The specified number of flows is added in the 'add cycle' (uses + # flow_config_blaster to blast flows) + # 2. The network is polled for flow statistics from the network (using the + # inventory_crawler.py script) to make sure that all flows have been + # properly programmed into the network and the ODL statistics collector + # can properly read them + # 3. The flows are deleted in the flow cycle. Deletion happens either in + # 'bulk' (using the config_cleanup) script or one by one (using the + # flow_config_blaster 'delete' method) + ############################################################################ parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows ' 'into the config tree, as specified by optional parameters.') @@ -71,9 +74,9 @@ if __name__ == "__main__": parser.add_argument('--flows', type=int, default=10, help='Number of flows that will be added/deleted by each worker thread in each cycle; ' 'default 10') - parser.add_argument('--nodes', type=int, default=16, - help='Number of nodes if mininet is not connected; default=16. If mininet is connected, ' - 'flows will be evenly distributed (programmed) into connected nodes.') + parser.add_argument('--fpr', type=int, default=1, + help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; ' + 'default 1') parser.add_argument('--delay', type=int, default=2, help='Time (seconds) to between inventory polls when waiting for stats to catch up; default=1') parser.add_argument('--timeout', type=int, default=100, @@ -97,69 +100,47 @@ if __name__ == "__main__": if in_args.file != '': flow_template = get_json_from_file(in_args.file) else: - flow_template = JSON_FLOW_MOD1 + flow_template = None ic = InventoryCrawler(in_args.host, in_args.port, 0, 'operational', in_args.auth, False) - fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes, - in_args.flows, in_args.startflow, in_args.auth, flow_template) - - # Get baseline stats + fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr, + 16, in_args.flows, in_args.startflow, in_args.auth) + # Get the baseline stats. Required in Step 3 to validate if the delete + # function gets the controller back to the baseline ic.crawl_inventory() reported = ic.reported_flows found = ic.found_flows print 'Baseline:' - print ' Reported nodes: %d' % reported - print ' Found nodes: %d' % found + print ' Reported flows: %d' % reported + print ' Found flows: %d' % found - # Run through , where are started in each cycle and are added from each thread + # Run through add cycles, where threads are started in + # each cycle and flows are added from each thread fct.add_blaster() - print '\n*** Total flows added: %s' % fct.get_total_flows() - print ' HTTP[OK] results: %d\n' % fct.get_ok_flows() + print '\n*** Total flows added: %d' % fct.get_ok_flows() + print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() # Wait for stats to catch up - total_delay = 0 - exp_found = found + fct.get_ok_flows() - exp_reported = reported + fct.get_ok_flows() - - print 'Waiting for stats to catch up:' - while True: - ic.crawl_inventory() - print ' %d, %d' % (ic.reported_flows, ic.found_flows) - if ic.found_flows == exp_found or total_delay > in_args.timeout: - break - total_delay += in_args.delay - time.sleep(in_args.delay) + wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay) - if total_delay < in_args.timeout: - print 'Stats collected in %d seconds.' % total_delay - else: - print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay - - # Run through , where are started in each cycle and previously added in an add cycle are + # Run through delete cycles, where threads are started + # in each cycle and flows previously added in an add cycle are # deleted in each thread if in_args.bulk_delete: - print '\nDeleting all flows in bulk:\n ', - cleanup_config(in_args.host, in_args.port, in_args.auth) + print '\nDeleting all flows in bulk:' + sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth) + if sts != 200: + print ' Failed to delete flows, code %d' % sts + else: + print ' All flows deleted.' else: print '\nDeleting flows one by one\n ', fct.delete_blaster() + print '\n*** Total flows deleted: %d' % fct.get_ok_flows() + print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() - # Wait for stats to catch up - total_delay = 0 - - print '\nWaiting for stats to catch up:' - while True: - ic.crawl_inventory() - if ic.found_flows == found or total_delay > in_args.timeout: - break - total_delay += in_args.delay - print ' %d, %d' % (ic.reported_flows, ic.found_flows) - time.sleep(in_args.delay) - - if total_delay < in_args.timeout: - print 'Stats collected in %d seconds.' % total_delay - else: - print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay + # Wait for stats to catch up back to baseline + wait_for_stats(ic, found, in_args.timeout, in_args.delay)