Added FlowConfigBlaster 'Floodlight Edition' - flow config blaster that uses Floodlig...
authorJan Medved <jmedved@cisco.com>
Wed, 15 Oct 2014 02:48:07 +0000 (19:48 -0700)
committerJan Medved <jmedved@cisco.com>
Wed, 15 Oct 2014 02:49:25 +0000 (19:49 -0700)
Change-Id: I9572137ae12aa39d82859bd3742194bcb00669c5
Signed-off-by: Jan Medved <jmedved@cisco.com>
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/README
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py [new file with mode: 0755]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/pretty_print.py [new file with mode: 0755]

index cd5161b84a15493f56a026240a24f8e39f292708..6246ee9b32799e1163eafd32f30e980380e0b3c7 100644 (file)
@@ -22,6 +22,10 @@ The test suite contains the following scripts:
   flow_config_blaster.py provides the FlowConfigBlaster class that is 
   reusable in other tests, such as in flow_add_delete_test.py
 
+- flow_config_blaster_fle.py:
+  "FlowConfigBlaster Floodlight Edition" - the same as flow_config_blaster,
+  but for the Floodlight controller.
+
 - config_cleanup.py:
   Cleans up the config data store by deleting the entire inventory.
 
@@ -227,19 +231,23 @@ optional arguments:
                         127.0.0.1)
   --port PORT           Port on which odl's RESTCONF is listening (default is
                         8181)
-  --flows FLOWS         Number of flow add/delete requests to send in each
-                        cycle; default 10
-  --cycles CYCLES       Number of flow add/delete cycles to send in each
-                        thread; default 1
-  --threads THREADS     Number of request worker threads, default=1. Each
-                        thread will add/delete FLOWS flows.
+  --cycles CYCLES       Number of flow add/delete cycles; default 1. Both Flow
+                        Adds and Flow Deletes are performed in cycles.
+                        <THREADS> worker threads are started in each cycle and
+                        the cycle ends when all threads finish. Another cycle
+                        is started when the previous cycle finished.
+  --threads THREADS     Number of request worker threads to start in each
+                        cycle; default=1. Each thread will add/delete <FLOWS>
+                        flows.
+  --flows FLOWS         Number of flows that will be added/deleted by each
+                        worker thread in each cycle; default 10
   --nodes NODES         Number of nodes if mininet is not connected;
                         default=16. If mininet is connected, flows will be
                         evenly distributed (programmed) into connected nodes.
-  --delay DELAY         Time to wait between the add and delete cycles;
-                        default=0
-  --timeout TIMEOUT     The maximum time to wait between the add and delete
-                        cycles; default=100
+  --delay DELAY         Time (seconds) to between inventory polls when waiting
+                        for stats to catch up; default=1
+  --timeout TIMEOUT     The maximum time (seconds) to wait between the add and
+                        delete cycles; default=100
   --delete              Delete all added flows one by one, benchmark delete
                         performance.
   --bulk-delete         Delete all flows in bulk; default=False
@@ -247,3 +255,11 @@ optional arguments:
                         password: 'admin'); default=False
   --startflow STARTFLOW
                         The starting Flow ID; default=0
+  --file FILE           File from which to read the JSON flow template;
+                        default: no file, use a built in template.
+
+Examples:
+---------
+To put 5000 flows into ODL, then wait for stats to catch up and then delete
+the flows in bulk (using config_cleanup):
+   > ./flow_add_delete_test.py --flows=5000 --auth --no-delete --bulk-delete
index 0002b23f27a1da92b0b7cdd1ba5dae8f095dc372..56e1bd75c6ec300f5206c3e5aad0567ffdbdafa7 100755 (executable)
@@ -31,15 +31,13 @@ def cleanup_config(host, port, auth):
 if __name__ == "__main__":
 
     parser = argparse.ArgumentParser(description='Cleans up the config space')
-    parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
+    parser.add_argument('--host', default='127.0.0.1', help='host where '
                         'odl controller is running (default is 127.0.0.1)')
-    parser.add_argument('--odlport', default='8181', help='port on '
+    parser.add_argument('--port', default='8181', help='port on '
                         'which odl\'s RESTCONF is listening (default is 8181)')
-    parser.add_argument('--no-auth', dest='auth', action='store_false', default=False,
-                        help="Do not use authenticated access to REST (default)")
-    parser.add_argument('--auth', dest='auth', action='store_true',
+    parser.add_argument('--auth', dest='auth', action='store_true', default=False,
                         help="Use authenticated access to REST "
                         "(username: 'admin', password: 'admin').")
 
     in_args = parser.parse_args()
-    cleanup_config(in_args.odlhost, in_args.odlport, in_args.auth)
+    cleanup_config(in_args.host, in_args.port, in_args.auth)
index a541c8fa844dc926e5f4818e26218a9862af87e2..e191a15b62b922c98e5934af4ac0755f67fbe8d3 100755 (executable)
@@ -61,20 +61,23 @@ if __name__ == "__main__":
                         help='Host where odl controller is running (default is 127.0.0.1)')
     parser.add_argument('--port', default='8181',
                         help='Port on which odl\'s RESTCONF is listening (default is 8181)')
-    parser.add_argument('--flows', type=int, default=10,
-                        help='Number of flow add/delete requests to send in each cycle; default 10')
     parser.add_argument('--cycles', type=int, default=1,
-                        help='Number of flow add/delete cycles to send in each thread; default 1')
+                        help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are '
+                             'performed in cycles. <THREADS> worker threads are started in each cycle and the cycle '
+                             'ends when all threads finish. Another cycle is started when the previous cycle finished.')
     parser.add_argument('--threads', type=int, default=1,
-                        help='Number of request worker threads, default=1. '
-                             'Each thread will add/delete nflows.')
+                        help='Number of request worker threads to start in each cycle; default=1. '
+                             'Each thread will add/delete <FLOWS> flows.')
+    parser.add_argument('--flows', type=int, default=10,
+                        help='Number of flows that will be added/deleted by each worker thread in each cycle; '
+                             'default 10')
     parser.add_argument('--nodes', type=int, default=16,
                         help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
                              'flows will be evenly distributed (programmed) into connected nodes.')
     parser.add_argument('--delay', type=int, default=2,
-                        help='Time to wait between the add and delete cycles; default=0')
+                        help='Time (seconds) to between inventory polls when waiting for stats to catch up; default=1')
     parser.add_argument('--timeout', type=int, default=100,
-                        help='The maximum time to wait between the add and delete cycles; default=100')
+                        help='The maximum time (seconds) to wait between the add and delete cycles; default=100')
     parser.add_argument('--delete', dest='delete', action='store_true', default=True,
                         help='Delete all added flows one by one, benchmark delete '
                              'performance.')
index 9a1ae165f69a16f9bb98d574b305d1825b6912ca..19dfed7c610b3171237b4f8c57181c40370c7720 100755 (executable)
@@ -7,10 +7,11 @@ __email__ = "jmedved@cisco.com"
 from random import randrange
 import json
 import argparse
-import requests
 import time
 import threading
 import re
+
+import requests
 import netaddr
 
 
@@ -74,7 +75,6 @@ class FlowConfigBlaster(object):
 
         self.ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')) + startflow)
 
-
         self.print_lock = threading.Lock()
         self.cond = threading.Condition()
         self.threads_done = 0
@@ -111,12 +111,12 @@ class FlowConfigBlaster(object):
         return nodes
 
 
-    def add_flow(self, session, tid, node, flow_id, ipaddr):
+    def add_flow(self, session, node, flow_id, ipaddr):
         """
         Adds a single flow to the config data store via REST
         """
-        flow_data = self.json_template % (tid + flow_id, 'TestFlow-%d' % flow_id, 65000,
-                                          str(flow_id), 65000, str(netaddr.IPAddress(ipaddr)))
+        flow_data = self.json_template % (flow_id, 'TestFlow-%d' % flow_id, 65000, str(flow_id), 65000,
+                                          str(netaddr.IPAddress(ipaddr)))
         # print flow_data
         flow_url = self.url_template % (node, flow_id)
         # print flow_url
@@ -148,7 +148,7 @@ class FlowConfigBlaster(object):
                 node_id = randrange(1, n_nodes + 1)
                 flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
                 self.flows[tid][flow_id] = node_id
-                sts = self.add_flow(s, tid, node_id, flow_id, self.ip_addr.increment())
+                sts = self.add_flow(s, node_id, flow_id, self.ip_addr.increment())
                 try:
                     add_res[sts] += 1
                 except KeyError:
@@ -179,12 +179,6 @@ class FlowConfigBlaster(object):
     def delete_flow(self, session, node, flow_id):
         """
         Deletes a single flow from the ODL config data store via REST
-
-        :param session:
-        :param url_template:
-        :param node:
-        :param flow_id:
-        :return:
         """
         flow_url = self.url_template % (node, flow_id)
 
@@ -264,7 +258,7 @@ class FlowConfigBlaster(object):
 
             with self.print_lock:
                 print '    Total success rate: %.2f, Total rate: %.2f' % (
-                      self.ok_rate.value, self.total_rate.value)
+                    self.ok_rate.value, self.total_rate.value)
                 measured_rate = self.nthreads * self.nflows * self.ncycles / t.secs
                 print '    Measured rate:      %.2f (%.2f%% of Total success rate)' % \
                       (measured_rate, measured_rate / self.total_rate.value * 100)
@@ -299,7 +293,7 @@ if __name__ == "__main__":
         "flow-node-inventory:flow": [
             {
                 "flow-node-inventory:cookie": %d,
-                "flow-node-inventory:cookie_mask": 65535,
+                "flow-node-inventory:cookie_mask": 4294967295,
                 "flow-node-inventory:flow-name": "%s",
                 "flow-node-inventory:hard-timeout": %d,
                 "flow-node-inventory:id": "%s",
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py
new file mode 100755 (executable)
index 0000000..429294b
--- /dev/null
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+__author__ = "Jan Medved"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__email__ = "jmedved@cisco.com"
+
+from flow_config_blaster import FlowConfigBlaster
+import argparse
+import netaddr
+import time
+import json
+
+class FlowConfigBlasterFLE(FlowConfigBlaster):
+    """
+    FlowConfigBlaster, Floodlight Edition; Uses the Floodlight Static Flow Entry Pusher REST API to inject flows.
+    """
+    flow = {
+        'switch': "00:00:00:00:00:00:00:01",
+        "name": "flow-mod",
+        "cookie": "0",
+        "priority": "32768",
+        "ether-type": "2048",
+        "dst-ip": "10.0.0.1/32",
+        "active": "true",
+        "actions": "output=flood"
+    }
+
+    def __init__(self, host, port, ncycles, nthreads, nnodes, nflows, startflow):
+        FlowConfigBlaster.__init__(self, host, port, ncycles, nthreads, nnodes, nflows, startflow, False, '')
+
+        # Create the service URL
+        self.url = 'http://' + self.host + ":" + self.port + '/wm/staticflowentrypusher/json'
+
+
+    def get_num_nodes(self, session):
+        """
+        Determines the number of nodes in the network. Overrides the get_num_nodes method in FlowConfigBlaster.
+        :param session:
+        :return:
+        """
+        url = 'http://' + self.host + ":" + self.port + '/wm/core/controller/switches/json'
+        nodes = self.nnodes
+
+        r = session.get(url, headers=self.getheaders, stream=False)
+
+        if r.status_code == 200:
+            try:
+                nodes = len(json.loads(r.content))
+            except KeyError:
+                pass
+
+        return nodes
+
+
+    def add_flow(self, session, node, flow_id, ipaddr):
+        """
+        Adds a flow. Overrides the add_flow method in FlowConfigBlaster.
+        :param session:
+        :param node:
+        :param flow_id:
+        :param ipaddr:
+        :return:
+        """
+        self.flow['switch'] = "00:00:00:00:00:00:00:%s" % '{0:02x}'.format(node)
+        self.flow['name'] = 'TestFlow-%d' % flow_id
+        self.flow['cookie'] = str(flow_id)
+        self.flow['dst-ip'] = "%s/32" % str(netaddr.IPAddress(ipaddr))
+
+        flow_data = json.dumps(self.flow)
+        # print flow_data
+        # print flow_url
+
+        r = session.post(self.url, data=flow_data, headers=self.putheaders, stream=False)
+        return r.status_code
+
+
+    def delete_flow(self, session, node, flow_id):
+        """
+        Deletes a flow. Overrides the delete_flow method in FlowConfigBlaster.
+        :param session:
+        :param node:
+        :param flow_id:
+        :return:
+        """
+        f = {'name': 'TestFlow-%d' % flow_id}
+        flow_data = json.dumps(f)
+
+        r = session.delete(self.url, data=flow_data, headers=self.getheaders)
+        return r.status_code
+
+
+if __name__ == "__main__":
+
+    parser = argparse.ArgumentParser(description='Flow programming performance test for Floodlight: First adds and '
+                                                 'then deletes flows using the Static Flow Entry Pusher REST API.')
+
+    parser.add_argument('--host', default='127.0.0.1',
+                        help='Host where the controller is running (default is 127.0.0.1)')
+    parser.add_argument('--port', default='8080',
+                        help='Port on which the controller\'s RESTCONF is listening (default is 8080)')
+    parser.add_argument('--cycles', type=int, default=1,
+                        help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are '
+                             'performed in cycles. <THREADS> worker threads are started in each cycle and the cycle '
+                             'ends when all threads finish. Another cycle is started when the previous cycle finished.')
+    parser.add_argument('--threads', type=int, default=1,
+                        help='Number of request worker threads to start in each cycle; default=1. '
+                             'Each thread will add/delete <FLOWS> flows.')
+    parser.add_argument('--flows', type=int, default=10,
+                        help='Number of flows that will be added/deleted by each worker thread in each cycle; '
+                             'default 10')
+    parser.add_argument('--nodes', type=int, default=16,
+                        help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
+                             'flows will be evenly distributed (programmed) into connected nodes.')
+    parser.add_argument('--delay', type=int, default=0,
+                        help='Time (in seconds) to wait between the add and delete cycles; default=0')
+    parser.add_argument('--no-delete', dest='delete', action='store_false',
+                        help='Do not perform the delete cycle.')
+    parser.add_argument('--startflow', type=int, default=0,
+                        help='The starting Flow ID; default=0')
+
+    in_args = parser.parse_args()
+
+
+    fct = FlowConfigBlasterFLE(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
+                               in_args.flows, in_args.startflow)
+
+    # Run through <cycles>, where <threads> are started in each cycle and <flows> are added from each thread
+    fct.add_blaster()
+
+    print '\n*** Total flows added: %s' % fct.get_total_flows()
+    print '    HTTP[OK] results:  %d\n' % fct.get_ok_flows()
+
+    if in_args.delay > 0:
+        print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+        time.sleep(in_args.delay)
+
+    # Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
+    # deleted in each thread
+    if in_args.delete:
+        fct.delete_blaster()
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/pretty_print.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/pretty_print.py
new file mode 100755 (executable)
index 0000000..5cc4fe3
--- /dev/null
@@ -0,0 +1,15 @@
+#!/usr/bin/python
+__author__ = "Jan Medved"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__email__ = "jmedved@cisco.com"
+
+import json
+import sys
+
+if __name__ == "__main__":
+
+    data = sys.stdin.readlines()
+    payload = json.loads(data.pop(0))
+    s = json.dumps(payload, sort_keys=True, indent=4, separators=(',', ': '))
+    print '%s\n\n' % s