Merge "test to configure 100k flows"
[integration/test.git] / test / tools / odl-mdsal-clustering-tests / clustering-performance-test / flow_add_delete_test.py
1 #!/usr/bin/python
2
3 __author__ = "Jan Medved"
4 __copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
5 __license__ = "New-style BSD"
6 __email__ = "jmedved@cisco.com"
7
8 import argparse
9 import time
10 from flow_config_blaster import FlowConfigBlaster, get_json_from_file
11 from inventory_crawler import InventoryCrawler
12 from config_cleanup import cleanup_config_odl
13
14
15 def wait_for_stats(crawler, exp_found, timeout, delay):
16     total_delay = 0
17     print 'Waiting for stats to catch up:'
18     while True:
19         crawler.crawl_inventory()
20         print '   %d, %d' % (crawler.reported_flows, crawler.found_flows)
21         if crawler.found_flows == exp_found or total_delay > timeout:
22             break
23         total_delay += delay
24         time.sleep(delay)
25
26     if total_delay < timeout:
27         print 'Stats collected in %d seconds.' % total_delay
28     else:
29         print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay
30
31
32 if __name__ == "__main__":
33
34     JSON_FLOW_MOD1 = '''{
35         "flow-node-inventory:flow": [
36             {
37                 "flow-node-inventory:cookie": %d,
38                 "flow-node-inventory:cookie_mask": 4294967295,
39                 "flow-node-inventory:flow-name": "%s",
40                 "flow-node-inventory:hard-timeout": %d,
41                 "flow-node-inventory:id": "%s",
42                 "flow-node-inventory:idle-timeout": %d,
43                 "flow-node-inventory:installHw": false,
44                 "flow-node-inventory:instructions": {
45                     "flow-node-inventory:instruction": [
46                         {
47                             "flow-node-inventory:apply-actions": {
48                                 "flow-node-inventory:action": [
49                                     {
50                                         "flow-node-inventory:drop-action": {},
51                                         "flow-node-inventory:order": 0
52                                     }
53                                 ]
54                             },
55                             "flow-node-inventory:order": 0
56                         }
57                     ]
58                 },
59                 "flow-node-inventory:match": {
60                     "flow-node-inventory:ipv4-destination": "%s/32",
61                     "flow-node-inventory:ethernet-match": {
62                         "flow-node-inventory:ethernet-type": {
63                             "flow-node-inventory:type": 2048
64                         }
65                     }
66                 },
67                 "flow-node-inventory:priority": 2,
68                 "flow-node-inventory:strict": false,
69                 "flow-node-inventory:table_id": 0
70             }
71         ]
72     }'''
73
74     parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
75                                                  'into the config tree, as specified by optional parameters.')
76
77     parser.add_argument('--host', default='127.0.0.1',
78                         help='Host where odl controller is running (default is 127.0.0.1)')
79     parser.add_argument('--port', default='8181',
80                         help='Port on which odl\'s RESTCONF is listening (default is 8181)')
81     parser.add_argument('--cycles', type=int, default=1,
82                         help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are '
83                              'performed in cycles. <THREADS> worker threads are started in each cycle and the cycle '
84                              'ends when all threads finish. Another cycle is started when the previous cycle finished.')
85     parser.add_argument('--threads', type=int, default=1,
86                         help='Number of request worker threads to start in each cycle; default=1. '
87                              'Each thread will add/delete <FLOWS> flows.')
88     parser.add_argument('--flows', type=int, default=10,
89                         help='Number of flows that will be added/deleted by each worker thread in each cycle; '
90                              'default 10')
91     parser.add_argument('--nodes', type=int, default=16,
92                         help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
93                              'flows will be evenly distributed (programmed) into connected nodes.')
94     parser.add_argument('--delay', type=int, default=2,
95                         help='Time (seconds) to between inventory polls when waiting for stats to catch up; default=1')
96     parser.add_argument('--timeout', type=int, default=100,
97                         help='The maximum time (seconds) to wait between the add and delete cycles; default=100')
98     parser.add_argument('--delete', dest='delete', action='store_true', default=True,
99                         help='Delete all added flows one by one, benchmark delete '
100                              'performance.')
101     parser.add_argument('--bulk-delete', dest='bulk_delete', action='store_true', default=False,
102                         help='Delete all flows in bulk; default=False')
103     parser.add_argument('--auth', dest='auth', action='store_true',
104                         help="Use authenticated access to REST (username: 'admin', password: 'admin'); default=False")
105     parser.add_argument('--startflow', type=int, default=0,
106                         help='The starting Flow ID; default=0')
107     parser.add_argument('--file', default='',
108                         help='File from which to read the JSON flow template; default: no file, use a built in '
109                              'template.')
110
111     in_args = parser.parse_args()
112
113     # Initialize
114     if in_args.file != '':
115         flow_template = get_json_from_file(in_args.file)
116     else:
117         flow_template = JSON_FLOW_MOD1
118
119     ic = InventoryCrawler(in_args.host, in_args.port, 0, 'operational', in_args.auth, False)
120
121     fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
122                             in_args.flows, in_args.startflow, in_args.auth, flow_template)
123
124     # Get baseline stats
125     ic.crawl_inventory()
126     reported = ic.reported_flows
127     found = ic.found_flows
128
129     print 'Baseline:'
130     print '   Reported nodes: %d' % reported
131     print '   Found nodes:    %d' % found
132
133     # Run through <cycles>, where <threads> are started in each cycle and <flows> are added from each thread
134     fct.add_blaster()
135
136     print '\n*** Total flows added: %s' % fct.get_total_flows()
137     print '    HTTP[OK] results:  %d\n' % fct.get_ok_flows()
138
139     # Wait for stats to catch up
140     wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay)
141
142     # Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
143     # deleted in each thread
144     if in_args.bulk_delete:
145         print '\nDeleting all flows in bulk:'
146         sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
147         if sts != 200:
148             print '   Failed to delete flows, code %d' % sts
149         else:
150             print '   All flows deleted.'
151     else:
152         print '\nDeleting flows one by one\n   ',
153         fct.delete_blaster()
154
155     # Wait for stats to catch up
156     wait_for_stats(ic, found, in_args.timeout, in_args.delay)