From: Peter Gubka Date: Thu, 4 Jun 2015 09:18:15 +0000 (+0200) Subject: adding a statistic monitor script + minor changes X-Git-Tag: release/lithium~54 X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=commitdiff_plain;ds=sidebyside;h=a8ca12cfaa267cca247b366277761b9083cbe3b9;p=integration%2Ftest.git adding a statistic monitor script + minor changes flow_add_delete_test.py - fixed output message inventory_crawler.py - fix the problem when one instance of crawler runs more than once it did count nodes togenther Change-Id: I3f59031349c18e1ed0e995e8a1d62c3a92512e2f Signed-off-by: Peter Gubka --- diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py index 8eeca02e98..8f7215cbad 100755 --- a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py +++ b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py @@ -113,8 +113,8 @@ if __name__ == "__main__": found = ic.found_flows print 'Baseline:' - print ' Reported nodes: %d' % reported - print ' Found nodes: %d' % found + print ' Reported flows: %d' % reported + print ' Found flows: %d' % found # Run through add cycles, where threads are started in # each cycle and flows are added from each thread diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py new file mode 100755 index 0000000000..04a9fc3a80 --- /dev/null +++ b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py @@ -0,0 +1,159 @@ +#!/usr/bin/python + +""" +The script is based on the flow_add_delete_test.py. The only difference is that +it doesn't wait till stats are collected, but it triggers inventory data as +long as specified and produces an output file it's name is given. +""" + +import argparse +import time +from flow_config_blaster import FlowConfigBlaster, get_json_from_file +from inventory_crawler import InventoryCrawler +from config_cleanup import cleanup_config_odl + + +def get_time_delta(actualtime, basetime): + return actualtime - basetime + + +def monitor_stats(crawler, monitortime, period): + """ + Check incentory and yields collected data. + """ + basetime = time.time() + while True: + lastcrawl = time.time() + crawler.nodes = 0 + crawler.crawl_inventory() + actualtime = time.time() + yield (actualtime, crawler.nodes, crawler.reported_flows, crawler.found_flows) + if actualtime > basetime + monitortime: + break + time.sleep(period-get_time_delta(actualtime, lastcrawl)) + + +if __name__ == "__main__": + ############################################################################ + # This program executes an ODL performance test. The task is executed in + # four steps: + # + # 1. The specified number of flows is added in the 'add cycle' (uses + # flow_config_blaster to blast flows) + # 2. The network is polled for flow statistics from the network (using the + # inventory_crawler.py script) to make sure that all flows have been + # properly programmed into the network and the ODL statistics collector + # can properly read them as long as specified + # 3. The flows are deleted in the flow cycle. Deletion happens either in + # 'bulk' (using the config_cleanup) script or one by one (using the + # flow_config_blaster 'delete' method) + # 4. Same as 2. Monitoring and reporting the state of the inventory data + # for a specified period of time. + ############################################################################ + + parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows ' + 'into the config tree, as specified by optional parameters.') + + parser.add_argument('--host', default='127.0.0.1', + help='Host where odl controller is running (default is 127.0.0.1)') + parser.add_argument('--port', default='8181', + help='Port on which odl\'s RESTCONF is listening (default is 8181)') + parser.add_argument('--cycles', type=int, default=1, + help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are ' + 'performed in cycles. worker threads are started in each cycle and the cycle ' + 'ends when all threads finish. Another cycle is started when the previous cycle finished.') + parser.add_argument('--threads', type=int, default=1, + help='Number of request worker threads to start in each cycle; default=1. ' + 'Each thread will add/delete flows.') + parser.add_argument('--flows', type=int, default=10, + help='Number of flows that will be added/deleted by each worker thread in each cycle; ' + 'default 10') + parser.add_argument('--fpr', type=int, default=1, + help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; ' + 'default 1') + parser.add_argument('--delay', type=int, default=2, + help='Time (seconds) to between inventory polls when waiting for stats to catch up; default=1') + parser.add_argument('--timeout', type=int, default=100, + help='The maximum time (seconds) to wait between the add and delete cycles; default=100') + parser.add_argument('--delete', dest='delete', action='store_true', default=True, + help='Delete all added flows one by one, benchmark delete ' + 'performance.') + parser.add_argument('--bulk-delete', dest='bulk_delete', action='store_true', default=False, + help='Delete all flows in bulk; default=False') + parser.add_argument('--auth', dest='auth', action='store_true', + help="Use authenticated access to REST (username: 'admin', password: 'admin'); default=False") + parser.add_argument('--startflow', type=int, default=0, + help='The starting Flow ID; default=0') + parser.add_argument('--file', default='', + help='File from which to read the JSON flow template; default: no file, use a built in ' + 'template.') + parser.add_argument('--config_monitor', type=int, default=60, + help='Time to monotir inventory after flows are configured in seconds; default=60') + parser.add_argument('--deconfig_monitor', type=int, default=60, + help='Time to monitor inventory after flows are de configured in seconds; default=60') + parser.add_argument('--monitor_period', type=int, default=10, + help='Monitor period of triggering inventory crawler in seconds; default=10') + parser.add_argument('--monitor_outfile', default=None, help='Output file(if specified)') + + in_args = parser.parse_args() + + # Initialize + if in_args.file != '': + flow_template = get_json_from_file(in_args.file) + else: + flow_template = None + + ic = InventoryCrawler(in_args.host, in_args.port, 0, 'operational', in_args.auth, False) + + fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr, + 16, in_args.flows, in_args.startflow, in_args.auth) + # Get the baseline stats. Required in Step 3 to validate if the delete + # function gets the controller back to the baseline + ic.crawl_inventory() + reported = ic.reported_flows + found = ic.found_flows + + print 'Baseline:' + print ' Reported nodes: %d' % reported + print ' Found nodes: %d' % found + + stats = [] + stats.append((time.time(), ic.nodes, ic.reported_flows, ic.found_flows)) + # Run through add cycles, where threads are started in + # each cycle and flows are added from each thread + fct.add_blaster() + + print '\n*** Total flows added: %d' % fct.get_ok_flows() + print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + + # monitor stats and save results in the list + for stat_item in monitor_stats(ic, in_args.config_monitor, in_args.monitor_period): + print stat_item + stats.append(stat_item) + + # Run through delete cycles, where threads are started + # in each cycle and flows previously added in an add cycle are + # deleted in each thread + if in_args.bulk_delete: + print '\nDeleting all flows in bulk:' + sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth) + if sts != 200: + print ' Failed to delete flows, code %d' % sts + else: + print ' All flows deleted.' + else: + print '\nDeleting flows one by one\n ', + fct.delete_blaster() + print '\n*** Total flows deleted: %d' % fct.get_ok_flows() + print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + + # monitor stats and append to the list + for stat_item in monitor_stats(ic, in_args.deconfig_monitor, in_args.monitor_period): + print stat_item + stats.append(stat_item) + + # if requested, write collected data into the file + if in_args.monitor_outfile is not None: + with open(in_args.monitor_outfile, 'wt') as fd: + for e in stats: + fd.write('{0} {1} {2} {3}\n'.format(e[0], e[1], e[2], e[3])) diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py index 6d1dd8b005..2712314b39 100755 --- a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py +++ b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py @@ -107,6 +107,7 @@ class InventoryCrawler(object): """ Collects and prints summary information about all openflow nodes in a data store (either operational or config) """ + self.nodes = 0 self.found_flows = 0 self.reported_flows = 0 self.table_stats_unavailable = 0