- Put duplicate code in flow_add_delete_test into a common function
authorJan Medved <jmedved@cisco.com>
Thu, 16 Oct 2014 17:16:18 +0000 (10:16 -0700)
committerJan Medved <jmedved@cisco.com>
Thu, 16 Oct 2014 17:17:30 +0000 (10:17 -0700)
- Added support for Floodlight in config_cleanup

Change-Id: I6f7e07e05138d4cd44195f8c4d8385aad6fe64f5
Signed-off-by: Jan Medved <jmedved@cisco.com>
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py

index 56e1bd75c6ec300f5206c3e5aad0567ffdbdafa7..128c782e817653cb2f3a1633ce874da866300e08 100755 (executable)
@@ -6,26 +6,29 @@ __email__ = "jmedved@cisco.com"
 
 import argparse
 import requests
+import sys
 
+getheaders = {'Accept': 'application/json'}
 
-def cleanup_config(host, port, auth):
-    CONFIGURL = 'restconf/config/opendaylight-inventory:nodes'
-    getheaders = {'Accept': 'application/json'}
+def cleanup_config_fl(host, port):
+    global getheaders
 
-    url = 'http://' + host + ":" + port + '/' + CONFIGURL
-    s = requests.Session()
+    url = 'http://' + host + ":" + port + '/wm/staticflowentrypusher/clear/all/json'
+    r = requests.get(url, headers=getheaders)
+    return r.status_code
 
-    if not auth:
-        r = s.delete(url, headers=getheaders)
-    else:
-        r = s.delete(url, headers=getheaders, auth=('admin', 'admin'))
 
-    s.close()
+def cleanup_config_odl(host, port, auth):
+    global getheaders
+
+    url = 'http://' + host + ":" + port + '/restconf/config/opendaylight-inventory:nodes'
 
-    if r.status_code != 200:
-        print 'Failed to delete nodes in the config space, code %d' % r.status_code
+    if not auth:
+        r = requests.delete(url, headers=getheaders)
     else:
-        print 'Nodes in config space deleted.'
+        r = requests.delete(url, headers=getheaders, auth=('admin', 'admin'))
+
+    return r.status_code
 
 
 if __name__ == "__main__":
@@ -38,6 +41,23 @@ if __name__ == "__main__":
     parser.add_argument('--auth', dest='auth', action='store_true', default=False,
                         help="Use authenticated access to REST "
                         "(username: 'admin', password: 'admin').")
+    parser.add_argument('--controller', choices=['odl', 'floodlight'], default='odl',
+                         help='Controller type (ODL or Floodlight); default odl (OpenDaylight)')
 
     in_args = parser.parse_args()
-    cleanup_config(in_args.host, in_args.port, in_args.auth)
+
+    if in_args.controller == 'odl':
+        sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
+        exp = 200
+    elif in_args.controller == 'floodlight':
+        sts = cleanup_config_fl(in_args.host, in_args.port)
+        exp = 204
+    else:
+        print 'Unknown controller type'
+        sys.exit(-1)
+
+    if sts != exp:
+        print 'Failed to delete nodes in the config space, code %d' % sts
+    else:
+        print 'Nodes in config space deleted.'
+
index e191a15b62b922c98e5934af4ac0755f67fbe8d3..f8416f2f9be236c616a3e0411015912b43c67c73 100755 (executable)
@@ -9,7 +9,24 @@ import argparse
 import time
 from flow_config_blaster import FlowConfigBlaster, get_json_from_file
 from inventory_crawler import InventoryCrawler
-from config_cleanup import cleanup_config
+from config_cleanup import cleanup_config_odl
+
+
+def wait_for_stats(crawler, exp_found, timeout, delay):
+    total_delay = 0
+    print 'Waiting for stats to catch up:'
+    while True:
+        crawler.crawl_inventory()
+        print '   %d, %d' % (crawler.reported_flows, crawler.found_flows)
+        if crawler.found_flows == exp_found or total_delay > timeout:
+            break
+        total_delay += delay
+        time.sleep(delay)
+
+    if total_delay < timeout:
+        print 'Stats collected in %d seconds.' % total_delay
+    else:
+        print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay
 
 
 if __name__ == "__main__":
@@ -18,7 +35,7 @@ if __name__ == "__main__":
         "flow-node-inventory:flow": [
             {
                 "flow-node-inventory:cookie": %d,
-                "flow-node-inventory:cookie_mask": 65535,
+                "flow-node-inventory:cookie_mask": 4294967295,
                 "flow-node-inventory:flow-name": "%s",
                 "flow-node-inventory:hard-timeout": %d,
                 "flow-node-inventory:id": "%s",
@@ -120,46 +137,20 @@ if __name__ == "__main__":
     print '    HTTP[OK] results:  %d\n' % fct.get_ok_flows()
 
     # Wait for stats to catch up
-    total_delay = 0
-    exp_found = found + fct.get_ok_flows()
-    exp_reported = reported + fct.get_ok_flows()
-
-    print 'Waiting for stats to catch up:'
-    while True:
-        ic.crawl_inventory()
-        print '   %d, %d' % (ic.reported_flows, ic.found_flows)
-        if ic.found_flows == exp_found or total_delay > in_args.timeout:
-            break
-        total_delay += in_args.delay
-        time.sleep(in_args.delay)
-
-    if total_delay < in_args.timeout:
-        print 'Stats collected in %d seconds.' % total_delay
-    else:
-        print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay
+    wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay)
 
     # Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
     # deleted in each thread
     if in_args.bulk_delete:
-        print '\nDeleting all flows in bulk:\n   ',
-        cleanup_config(in_args.host, in_args.port, in_args.auth)
+        print '\nDeleting all flows in bulk:'
+        sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
+        if sts != 200:
+            print '   Failed to delete flows, code %d' % sts
+        else:
+            print '   All flows deleted.'
     else:
         print '\nDeleting flows one by one\n   ',
         fct.delete_blaster()
 
     # Wait for stats to catch up
-    total_delay = 0
-
-    print '\nWaiting for stats to catch up:'
-    while True:
-        ic.crawl_inventory()
-        if ic.found_flows == found or total_delay > in_args.timeout:
-            break
-        total_delay += in_args.delay
-        print '   %d, %d' % (ic.reported_flows, ic.found_flows)
-        time.sleep(in_args.delay)
-
-    if total_delay < in_args.timeout:
-        print 'Stats collected in %d seconds.' % total_delay
-    else:
-        print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay
+    wait_for_stats(ic, found, in_args.timeout, in_args.delay)