Reworked FlowConfigBlaster to be able to batch multiple flows in a single RESTCONF...
authorJan Medved <jmedved@cisco.com>
Sun, 3 May 2015 21:31:05 +0000 (14:31 -0700)
committerJan Medved <jmedved@cisco.com>
Sun, 3 May 2015 21:32:50 +0000 (14:32 -0700)
Change-Id: I14806d575581565533c65bb678a9930fcd995a40
Signed-off-by: Jan Medved <jmedved@cisco.com>
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow-template.json [new file with mode: 0644]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/multi-blaster.sh

diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow-template.json b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow-template.json
new file mode 100644 (file)
index 0000000..25cc393
--- /dev/null
@@ -0,0 +1,40 @@
+{
+    "flow": [
+        {
+            "id": "38",
+            "cookie": 38,
+            "instructions": {
+                "instruction": [
+                    {
+                        "order": 0,
+                        "apply-actions": {
+                            "action": [
+                                {
+                                    "order": 0,
+                                    "drop-action": { }
+                                }
+                            ]
+                        }
+                    }
+                ]
+            },
+            "hard-timeout": 65000,
+            "match": {
+                "ethernet-match": {
+                    "ethernet-type": {
+                        "type": 2048
+                    }
+                },
+                "ipv4-destination": "10.0.0.38/32"
+            },
+            "flow-name": "TestFlow-8",
+            "strict": false,
+            "cookie_mask": 4294967295,
+            "priority": 2,
+            "table_id": 0,
+            "idle-timeout": 65000,
+            "installHw": false
+        }
+
+    ]
+}
index 26f798aedbe63c1b8f6d0f5142db9f58f3d056c2..8eeca02e98f18725d32c3c9cb799f2011a0f4a40 100755 (executable)
@@ -14,13 +14,14 @@ from config_cleanup import cleanup_config_odl
 
 def wait_for_stats(crawler, exp_found, timeout, delay):
     """
-    Waits for the ODL stats manager to catch up. Polls ODL inventory every <delay> seconds and compares the
-    retrieved stats to the expected values. If stats collection has not finished within <timeout> seconds,
-    Gives up/
-
+    Waits for the ODL stats manager to catch up. Polls ODL inventory every
+    <delay> seconds and compares the retrieved stats to the expected values. If
+    stats collection has not finished within <timeout> seconds, the test is
+    aborted.
     :param crawler: Inventory crawler object
     :param exp_found: Expected value for flows found in the network
-    :param timeout: Max number of seconds to wait for stats collector to collect all stats
+    :param timeout: Max number of seconds to wait for stats collector to
+                    collect all stats
     :param delay: poll interval for inventory
     :return: None
     """
@@ -41,58 +42,20 @@ def wait_for_stats(crawler, exp_found, timeout, delay):
 
 
 if __name__ == "__main__":
-    ########################################################################################
-    # This program executes an ODL performance test. The test is executed in three steps:
-    #
-    # 1. The specified number of flows is added in the 'add cycle' (uses flow_config_blaster to blast flows)
+    ############################################################################
+    # This program executes an ODL performance test. The test is executed in
+    # three steps:
     #
-    # 2. The network is polled for flow statistics from the network (using the inventory_crawler) to make sure
-    #    that all flows have been properly programmed into the network and the ODL statistics collector can
-    #    properly read them
-    #
-    # 3. The flows are deleted in the flow cycle. Deletion happens either in 'bulk' (using the config_cleanup)
-    #    script or one by one (using the flow_config_blaster 'delete' method)
-    ########################################################################################
-
-    JSON_FLOW_MOD1 = '''{
-        "flow-node-inventory:flow": [
-            {
-                "flow-node-inventory:cookie": %d,
-                "flow-node-inventory:cookie_mask": 4294967295,
-                "flow-node-inventory:flow-name": "%s",
-                "flow-node-inventory:hard-timeout": %d,
-                "flow-node-inventory:id": "%s",
-                "flow-node-inventory:idle-timeout": %d,
-                "flow-node-inventory:installHw": false,
-                "flow-node-inventory:instructions": {
-                    "flow-node-inventory:instruction": [
-                        {
-                            "flow-node-inventory:apply-actions": {
-                                "flow-node-inventory:action": [
-                                    {
-                                        "flow-node-inventory:drop-action": {},
-                                        "flow-node-inventory:order": 0
-                                    }
-                                ]
-                            },
-                            "flow-node-inventory:order": 0
-                        }
-                    ]
-                },
-                "flow-node-inventory:match": {
-                    "flow-node-inventory:ipv4-destination": "%s/32",
-                    "flow-node-inventory:ethernet-match": {
-                        "flow-node-inventory:ethernet-type": {
-                            "flow-node-inventory:type": 2048
-                        }
-                    }
-                },
-                "flow-node-inventory:priority": 2,
-                "flow-node-inventory:strict": false,
-                "flow-node-inventory:table_id": 0
-            }
-        ]
-    }'''
+    # 1. The specified number of flows is added in the 'add cycle' (uses
+    #    flow_config_blaster to blast flows)
+    # 2. The network is polled for flow statistics from the network (using the
+    #    inventory_crawler.py script) to make sure that all flows have been
+    #    properly programmed into the network and the ODL statistics collector
+    #    can properly read them
+    # 3. The flows are deleted in the flow cycle. Deletion happens either in
+    #    'bulk' (using the config_cleanup) script or one by one (using the
+    #     flow_config_blaster 'delete' method)
+    ############################################################################
 
     parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
                                                  'into the config tree, as specified by optional parameters.')
@@ -111,9 +74,9 @@ if __name__ == "__main__":
     parser.add_argument('--flows', type=int, default=10,
                         help='Number of flows that will be added/deleted by each worker thread in each cycle; '
                              'default 10')
-    parser.add_argument('--nodes', type=int, default=16,
-                        help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
-                             'flows will be evenly distributed (programmed) into connected nodes.')
+    parser.add_argument('--fpr', type=int, default=1,
+                        help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
+                             'default 1')
     parser.add_argument('--delay', type=int, default=2,
                         help='Time (seconds) to between inventory polls when waiting for stats to catch up; default=1')
     parser.add_argument('--timeout', type=int, default=100,
@@ -137,15 +100,14 @@ if __name__ == "__main__":
     if in_args.file != '':
         flow_template = get_json_from_file(in_args.file)
     else:
-        flow_template = JSON_FLOW_MOD1
+        flow_template = None
 
     ic = InventoryCrawler(in_args.host, in_args.port, 0, 'operational', in_args.auth, False)
 
-    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
-                            in_args.flows, in_args.startflow, in_args.auth, flow_template)
-
-    # Get the baseline stats. Required in Step 3 to validate if the delete function gets the controller back to
-    # the baseline
+    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr,
+                            16, in_args.flows, in_args.startflow, in_args.auth)
+    # Get the baseline stats. Required in Step 3 to validate if the delete
+    # function gets the controller back to the baseline
     ic.crawl_inventory()
     reported = ic.reported_flows
     found = ic.found_flows
@@ -154,18 +116,19 @@ if __name__ == "__main__":
     print '   Reported nodes: %d' % reported
     print '   Found nodes:    %d' % found
 
-    # Run through <CYCLES> add cycles, where <THREADS> threads are started in each cycle and <FLOWS> flows are
-    # added from each thread
+    # Run through <CYCLES> add cycles, where <THREADS> threads are started in
+    # each cycle and <FLOWS> flows are added from each thread
     fct.add_blaster()
 
-    print '\n*** Total flows added: %s' % fct.get_total_flows()
-    print '    HTTP[OK] results:  %d\n' % fct.get_ok_flows()
+    print '\n*** Total flows added: %d' % fct.get_ok_flows()
+    print '    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts()
 
     # Wait for stats to catch up
     wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay)
 
-    # Run through <CYCLES> delete cycles, where <THREADS> threads  are started in each cycle and <FLOWS> flows
-    # previously added in an add cycle are deleted in each thread
+    # Run through <CYCLES> delete cycles, where <THREADS> threads  are started
+    # in each cycle and <FLOWS> flows previously added in an add cycle are
+    # deleted in each thread
     if in_args.bulk_delete:
         print '\nDeleting all flows in bulk:'
         sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
@@ -176,6 +139,8 @@ if __name__ == "__main__":
     else:
         print '\nDeleting flows one by one\n   ',
         fct.delete_blaster()
+        print '\n*** Total flows deleted: %d' % fct.get_ok_flows()
+        print '    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts()
 
     # Wait for stats to catch up back to baseline
     wait_for_stats(ic, found, in_args.timeout, in_args.delay)
index 52b53eb1e6b93f34a315e385608f8c8b05fd9561..219d33ea56fcdcfde133304e5e3be2bc13b0492a 100755 (executable)
@@ -10,6 +10,7 @@ import argparse
 import time
 import threading
 import re
+import copy
 
 import requests
 import netaddr
@@ -51,27 +52,138 @@ class FlowConfigBlaster(object):
     getheaders = {'Accept': 'application/json'}
 
     FLWURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+    TBLURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0"
     INVURL = 'restconf/operational/opendaylight-inventory:nodes'
 
-    ok_total = 0
-
     flows = {}
 
-    def __init__(self, host, port, ncycles, nthreads, nnodes, nflows, startflow, auth, json_template):
+    # The "built-in" flow template
+    flow_mode_template = {
+        u'flow': [
+            {
+                u'hard-timeout': 65000,
+                u'idle-timeout': 65000,
+                u'cookie_mask': 4294967295,
+                u'flow-name': u'FLOW-NAME-TEMPLATE',
+                u'priority': 2,
+                u'strict': False,
+                u'cookie': 0,
+                u'table_id': 0,
+                u'installHw': False,
+                u'id': u'FLOW-ID-TEMPLATE',
+                u'match': {
+                    u'ipv4-destination': u'0.0.0.0/32',
+                    u'ethernet-match': {
+                        u'ethernet-type': {
+                            u'type': 2048
+                        }
+                    }
+                },
+                u'instructions': {
+                    u'instruction': [
+                        {
+                            u'order': 0,
+                            u'apply-actions': {
+                                u'action': [
+                                    {
+                                        u'drop-action': {},
+                                        u'order': 0
+                                    }
+                                ]
+                            }
+                        }
+                    ]
+                }
+            }
+        ]
+    }
+
+    class FcbStats(object):
+        """
+        FlowConfigBlaster Statistics: a class that stores and further processes
+        statistics collected by Blaster worker threads during their execution.
+        """
+        def __init__(self):
+            self.ok_rqst_rate = Counter(0.0)
+            self.total_rqst_rate = Counter(0.0)
+            self.ok_flow_rate = Counter(0.0)
+            self.total_flow_rate = Counter(0.0)
+            self.ok_rqsts = Counter(0)
+            self.total_rqsts = Counter(0)
+            self.ok_flows = Counter(0)
+            self.total_flows = Counter(0)
+
+        def process_stats(self, rqst_stats, flow_stats, elapsed_time):
+            """
+            Calculates the stats for RESTCONF request and flow programming
+            throughput, and aggregates statistics across all Blaster threads.
+            """
+            ok_rqsts = rqst_stats[200] + rqst_stats[204]
+            total_rqsts = sum(rqst_stats.values())
+            ok_flows = flow_stats[200] + flow_stats[204]
+            total_flows = sum(flow_stats.values())
+
+            ok_rqst_rate = ok_rqsts / elapsed_time
+            total_rqst_rate = total_rqsts / elapsed_time
+            ok_flow_rate = ok_flows / elapsed_time
+            total_flow_rate = total_flows / elapsed_time
+
+            self.ok_rqsts.increment(ok_rqsts)
+            self.total_rqsts.increment(total_rqsts)
+            self.ok_flows.increment(ok_flows)
+            self.total_flows.increment(total_flows)
+
+            self.ok_rqst_rate.increment(ok_rqst_rate)
+            self.total_rqst_rate.increment(total_rqst_rate)
+            self.ok_flow_rate.increment(ok_flow_rate)
+            self.total_flow_rate.increment(total_flow_rate)
+
+            return ok_rqst_rate, total_rqst_rate, ok_flow_rate, total_flow_rate
+
+        def get_ok_rqst_rate(self):
+            return self.ok_rqst_rate.value
+
+        def get_total_rqst_rate(self):
+            return self.total_rqst_rate.value
+
+        def get_ok_flow_rate(self):
+            return self.ok_flow_rate.value
+
+        def get_total_flow_rate(self):
+            return self.total_flow_rate.value
+
+        def get_ok_rqsts(self):
+            return self.ok_rqsts.value
+
+        def get_total_rqsts(self):
+            return self.total_rqsts.value
+
+        def get_ok_flows(self):
+            return self.ok_flows.value
+
+        def get_total_flows(self):
+            return self.total_flows.value
+
+    def __init__(self, host, port, ncycles, nthreads, fpr, nnodes, nflows, startflow, auth, flow_mod_template=None):
         self.host = host
         self.port = port
         self.ncycles = ncycles
         self.nthreads = nthreads
+        self.fpr = fpr
         self.nnodes = nnodes
         self.nflows = nflows
         self.startflow = startflow
         self.auth = auth
 
-        self.json_template = json_template
-        self.url_template = 'http://' + self.host + ":" + self.port + '/' + self.FLWURL
+        if flow_mod_template:
+            self.flow_mode_template = flow_mod_template
+
+        self.post_url_template = 'http://' + self.host + ":" + self.port + '/' + self.TBLURL
+        self.del_url_template = 'http://' + self.host + ":" + self.port + '/' + self.FLWURL
 
-        self.ok_rate = Counter(0.0)
-        self.total_rate = Counter(0.0)
+        self.stats = self.FcbStats()
+        self.total_ok_flows = 0
+        self.total_ok_rqsts = 0
 
         self.ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')) + startflow)
 
@@ -84,8 +196,11 @@ class FlowConfigBlaster(object):
 
     def get_num_nodes(self, session):
         """
-        Determines the number of OF nodes in the connected mininet network. If mininet is not connected, the default
-        number of flows is 16
+        Determines the number of OF nodes in the connected mininet network. If
+        mininet is not connected, the default number of flows is set to 16.
+        :param session: 'requests' session which to use to query the controller
+                        for openflow nodes
+        :return: None
         """
         inventory_url = 'http://' + self.host + ":" + self.port + '/' + self.INVURL
         nodes = self.nnodes
@@ -109,29 +224,60 @@ class FlowConfigBlaster(object):
 
         return nodes
 
-    def add_flow(self, session, node, flow_id, ipaddr):
+    def create_flow_from_template(self, flow_id, ipaddr):
+        """
+        Create a new flow instance from the flow template specified during
+        FlowConfigBlaster instantiation. Flow templates are json-compatible
+        dictionaries that MUST contain elements for flow cookie, flow name,
+        flow id and the destination IPv4 address in the flow match field.
+        :param flow_id: Id for the new flow to create
+        :param ipaddr: IP Address to put into the flow's match
+        :return: The newly created flow instance
         """
-        Adds a single flow to the config data store via REST
+        flow = copy.deepcopy(self.flow_mode_template['flow'][0])
+        flow['cookie'] = flow_id
+        flow['flow-name'] = 'TestFlow-%d' % flow_id
+        flow['id'] = str(flow_id)
+        flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ipaddr))
+        return flow
+
+    def post_flows(self, session, node, flow_list):
         """
-        flow_data = self.json_template % (flow_id, 'TestFlow-%d' % flow_id, 65000, str(flow_id), 65000,
-                                          str(netaddr.IPAddress(ipaddr)))
+        Performs a RESTCONF post of flows passed in the 'flow_list' parameters
+        :param session: 'requests' session on which to perform the POST
+        :param node: The ID of the openflow node to which to post the flows
+        :param flow_list: List of flows (in dictionary form) to POST
+        :return: status code from the POST operation
+        """
+        fmod = dict(self.flow_mode_template)
+        fmod['flow'] = flow_list
+        flow_data = json.dumps(fmod)
         # print flow_data
-        flow_url = self.url_template % (node, flow_id)
+        flow_url = self.post_url_template % node
         # print flow_url
 
         if not self.auth:
-            r = session.put(flow_url, data=flow_data, headers=self.putheaders, stream=False)
+            r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False)
         else:
-            r = session.put(flow_url, data=flow_data, headers=self.putheaders, stream=False, auth=('admin', 'admin'))
+            r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, auth=('admin', 'admin'))
 
         return r.status_code
 
-    def add_flows(self, start_flow, tid):
+    def add_flows(self, start_flow_id, tid):
         """
-        Adds flows into the ODL config space. This function is executed by a worker thread
+        Adds flows into the ODL config data store. This function is executed by
+        a worker thread (the Blaster thread). The number of flows created and
+        the batch size (i.e. how many flows will be put into a RESTCONF request)
+        are determined by control parameters initialized when FlowConfigBlaster
+        is created.
+        :param start_flow_id - the ID of the first flow. Each Blaster thread
+                               programs a different set of flows
+        :param tid: Thread ID - used to id the Blaster thread when statistics
+                                for the thread are printed out
+        :return: None
         """
-
-        add_res = {200: 0}
+        rqst_stats = {200: 0, 204: 0}
+        flow_stats = {200: 0, 204: 0}
 
         s = requests.Session()
 
@@ -140,31 +286,36 @@ class FlowConfigBlaster(object):
         with self.print_lock:
             print '    Thread %d:\n        Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes)
 
+        nflows = 0
         with Timer() as t:
-            for flow in range(self.nflows):
+            while nflows < self.nflows:
                 node_id = randrange(1, n_nodes + 1)
-                flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
-                self.flows[tid][flow_id] = node_id
-                sts = self.add_flow(s, node_id, flow_id, self.ip_addr.increment())
+                flow_list = []
+                for i in range(self.fpr):
+                    flow_id = tid * (self.ncycles * self.nflows) + nflows + start_flow_id + self.startflow
+                    self.flows[tid][flow_id] = node_id
+                    flow_list.append(self.create_flow_from_template(flow_id, self.ip_addr.increment()))
+                    nflows += 1
+                    if nflows >= self.nflows:
+                        break
+                sts = self.post_flows(s, node_id, flow_list)
                 try:
-                    add_res[sts] += 1
+                    rqst_stats[sts] += 1
+                    flow_stats[sts] += len(flow_list)
                 except KeyError:
-                    add_res[sts] = 1
-
-        add_time = t.secs
-        add_ok_rate = add_res[200] / add_time
-        add_total_rate = sum(add_res.values()) / add_time
+                    rqst_stats[sts] = 1
+                    flow_stats[sts] = len(flow_list)
 
-        self.ok_rate.increment(add_ok_rate)
-        self.total_rate.increment(add_total_rate)
+        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, flow_stats, t.secs)
 
         with self.print_lock:
-            print '    Thread %d: ' % tid
-            print '        Add time: %.2f,' % add_time
-            print '        Add success rate:  %.2f, Add total rate: %.2f' % (add_ok_rate, add_total_rate)
-            print '        Add Results: ',
-            print add_res
-            self.ok_total += add_res[200]
+            print '\n    Thread %d results (ADD): ' % tid
+            print '        Elapsed time: %.2fs,' % t.secs
+            print '        Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)
+            print '        Flows/s:    %.2f OK, %.2f Total' % (ok_fps, total_fps)
+            print '        Stats ({Requests}, {Flows}): ',
+            print rqst_stats,
+            print flow_stats
             self.threads_done += 1
 
         s.close()
@@ -174,9 +325,13 @@ class FlowConfigBlaster(object):
 
     def delete_flow(self, session, node, flow_id):
         """
-        Deletes a single flow from the ODL config data store via REST
+        Deletes a single flow from the ODL config data store using RESTCONF
+        :param session: 'requests' session on which to perform the POST
+        :param node: Id of the openflow node from which to delete the flow
+        :param flow_id: ID of the to-be-deleted flow
+        :return: status code from the DELETE operation
         """
-        flow_url = self.url_template % (node, flow_id)
+        flow_url = self.del_url_template % (node, flow_id)
 
         if not self.auth:
             r = session.delete(flow_url, headers=self.getheaders)
@@ -187,10 +342,17 @@ class FlowConfigBlaster(object):
 
     def delete_flows(self, start_flow, tid):
         """
-        Deletes flow from the ODL config space that have been added using the 'add_flows()' function. This function is
-        executed by a worker thread
+        Deletes flow from the ODL config space that have been added using the
+        'add_flows()' function. This function is executed by a worker thread
+        :param start_flow_id - the ID of the first flow. Each Blaster thread
+                               deletes a different set of flows
+        :param tid: Thread ID - used to id the Blaster thread when statistics
+                                for the thread are printed out
+        :return:
         """
-        del_res = {200: 0}
+        """
+        """
+        rqst_stats = {200: 0, 204: 0}
 
         s = requests.Session()
         n_nodes = self.get_num_nodes(s)
@@ -203,24 +365,19 @@ class FlowConfigBlaster(object):
                 flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
                 sts = self.delete_flow(s, self.flows[tid][flow_id], flow_id)
                 try:
-                    del_res[sts] += 1
+                    rqst_stats[sts] += 1
                 except KeyError:
-                    del_res[sts] = 1
-
-        del_time = t.secs
+                    rqst_stats[sts] = 1
 
-        del_ok_rate = del_res[200] / del_time
-        del_total_rate = sum(del_res.values()) / del_time
-
-        self.ok_rate.increment(del_ok_rate)
-        self.total_rate.increment(del_total_rate)
+        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, rqst_stats, t.secs)
 
         with self.print_lock:
-            print '    Thread %d: ' % tid
-            print '        Delete time: %.2f,' % del_time
-            print '        Delete success rate:  %.2f, Delete total rate: %.2f' % (del_ok_rate, del_total_rate)
-            print '        Delete Results: ',
-            print del_res
+            print '\n    Thread %d results (DELETE): ' % tid
+            print '        Elapsed time: %.2fs,' % t.secs
+            print '        Requests/s:  %.2f OK,  %.2f Total' % (ok_rps, total_rps)
+            print '        Flows/s:     %.2f OK,  %.2f Total' % (ok_fps, total_fps)
+            print '        Stats ({Requests})',
+            print rqst_stats
             self.threads_done += 1
 
         s.close()
@@ -230,11 +387,18 @@ class FlowConfigBlaster(object):
 
     def run_cycle(self, function):
         """
-        Runs an add or delete cycle. Starts a number of worker threads that each add a bunch of flows. Work is done
-        in context of the worker threads.
+        Runs a flow-add or flow-delete test cycle. Each test consists of a
+        <cycles> test cycles, where <threads> worker (Blaster) threads are
+        started in each test cycle. Each Blaster thread programs <flows>
+        OpenFlow flows into the controller using the controller's RESTCONF API.
+        :param function: Add or delete, determines what test will be executed.
+        :return: None
         """
+        self.total_ok_flows = 0
+        self.total_ok_rqsts = 0
 
         for c in range(self.ncycles):
+            self.stats = self.FcbStats()
             with self.print_lock:
                 print '\nCycle %d:' % c
 
@@ -251,28 +415,36 @@ class FlowConfigBlaster(object):
                         self.cond.wait()
 
             with self.print_lock:
-                print '    Total success rate: %.2f, Total rate: %.2f' % (
-                    self.ok_rate.value, self.total_rate.value)
-                measured_rate = (self.nthreads * self.nflows) / t.secs
-                print '    Measured rate:      %.2f (%.2f%% of Total success rate)' % \
-                      (measured_rate, measured_rate / self.total_rate.value * 100)
-                print '    Measured time:      %.2fs' % t.secs
+                print '\n*** Test summary:'
+                print '    Elapsed time:    %.2fs' % t.secs
+                print '    Peak requests/s: %.2f OK, %.2f Total' % (
+                    self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())
+                print '    Peak flows/s:    %.2f OK, %.2f Total' % (
+                    self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())
+                print '    Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+                    self.stats.get_ok_rqsts() / t.secs,
+                    self.stats.get_total_rqsts() / t.secs,
+                    (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate())
+                print '    Avg. flows/s:    %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+                    self.stats.get_ok_flows() / t.secs,
+                    self.stats.get_total_flows() / t.secs,
+                    (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate())
+
+                self.total_ok_flows += self.stats.get_ok_flows()
+                self.total_ok_rqsts += self.stats.get_ok_rqsts()
                 self.threads_done = 0
 
-            self.ok_rate.value = 0
-            self.total_rate.value = 0
-
     def add_blaster(self):
         self.run_cycle(self.add_flows)
 
     def delete_blaster(self):
         self.run_cycle(self.delete_flows)
 
-    def get_total_flows(self):
-        return sum(len(self.flows[key]) for key in self.flows.keys())
-
     def get_ok_flows(self):
-        return self.ok_total
+        return self.total_ok_flows
+
+    def get_ok_rqsts(self):
+        return self.total_ok_rqsts
 
 
 def get_json_from_file(filename):
@@ -282,51 +454,74 @@ def get_json_from_file(filename):
     :return: The json flow template (string)
     """
     with open(filename, 'r') as f:
-        read_data = f.read()
-    return read_data
-
-
-if __name__ == "__main__":
-    JSON_FLOW_MOD1 = '''{
-        "flow-node-inventory:flow": [
-            {
-                "flow-node-inventory:cookie": %d,
-                "flow-node-inventory:cookie_mask": 4294967295,
-                "flow-node-inventory:flow-name": "%s",
-                "flow-node-inventory:hard-timeout": %d,
-                "flow-node-inventory:id": "%s",
-                "flow-node-inventory:idle-timeout": %d,
-                "flow-node-inventory:installHw": false,
-                "flow-node-inventory:instructions": {
-                    "flow-node-inventory:instruction": [
-                        {
-                            "flow-node-inventory:apply-actions": {
-                                "flow-node-inventory:action": [
-                                    {
-                                        "flow-node-inventory:drop-action": {},
-                                        "flow-node-inventory:order": 0
-                                    }
-                                ]
-                            },
-                            "flow-node-inventory:order": 0
-                        }
-                    ]
-                },
-                "flow-node-inventory:match": {
-                    "flow-node-inventory:ipv4-destination": "%s/32",
-                    "flow-node-inventory:ethernet-match": {
-                        "flow-node-inventory:ethernet-type": {
-                            "flow-node-inventory:type": 2048
+        try:
+            ft = json.load(f)
+            keys = ft['flow'][0].keys()
+            if (u'cookie' in keys) and (u'flow-name' in keys) and (u'id' in keys) and (u'match' in keys):
+                if u'ipv4-destination' in ft[u'flow'][0]['match'].keys():
+                    print 'File "%s" ok to use as flow template' % filename
+                    return ft
+        except ValueError:
+            print 'JSON parsing of file %s failed' % filename
+            pass
+
+    return None
+
+###############################################################################
+# This is an example of what the content of a JSON flow mode template should
+# look like. Cut & paste to create a custom template. "id" and "ipv4-destination"
+# MUST be unique if multiple flows will be programmed in the same test. It's
+# also beneficial to have unique "cookie" and "flow-name" attributes for easier
+# identification of the flow.
+###############################################################################
+example_flow_mod_json = '''{
+    "flow": [
+        {
+            "id": "38",
+            "cookie": 38,
+            "instructions": {
+                "instruction": [
+                    {
+                        "order": 0,
+                        "apply-actions": {
+                            "action": [
+                                {
+                                    "order": 0,
+                                    "drop-action": { }
+                                }
+                            ]
                         }
                     }
+                ]
+            },
+            "hard-timeout": 65000,
+            "match": {
+                "ethernet-match": {
+                    "ethernet-type": {
+                        "type": 2048
+                    }
                 },
-                "flow-node-inventory:priority": 2,
-                "flow-node-inventory:strict": false,
-                "flow-node-inventory:table_id": 0
-            }
-        ]
-    }'''
+                "ipv4-destination": "10.0.0.38/32"
+            },
+            "flow-name": "TestFlow-8",
+            "strict": false,
+            "cookie_mask": 4294967295,
+            "priority": 2,
+            "table_id": 0,
+            "idle-timeout": 65000,
+            "installHw": false
+        }
+
+    ]
+}'''
 
+if __name__ == "__main__":
+    ############################################################################
+    # This program executes the base performance test. The test adds flows into
+    # the controller's config space. This function is basically the CLI frontend
+    # to the FlowConfigBlaster class and drives its main functions: adding and
+    # deleting flows from the controller's config data store
+    ############################################################################
     parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
                                                  'into the config tree, as specified by optional parameters.')
 
@@ -344,6 +539,9 @@ if __name__ == "__main__":
     parser.add_argument('--flows', type=int, default=10,
                         help='Number of flows that will be added/deleted by each worker thread in each cycle; '
                              'default 10')
+    parser.add_argument('--fpr', type=int, default=1,
+                        help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
+                             'default 1')
     parser.add_argument('--nodes', type=int, default=16,
                         help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
                              'flows will be evenly distributed (programmed) into connected nodes.')
@@ -368,22 +566,25 @@ if __name__ == "__main__":
     if in_args.file != '':
         flow_template = get_json_from_file(in_args.file)
     else:
-        flow_template = JSON_FLOW_MOD1
+        flow_template = None
 
-    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
-                            in_args.flows, in_args.startflow, in_args.auth, flow_template)
+    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr, in_args.nodes,
+                            in_args.flows, in_args.startflow, in_args.auth)
 
-    # Run through <cycles>, where <threads> are started in each cycle and <flows> are added from each thread
+    # Run through <cycles>, where <threads> are started in each cycle and
+    # <flows> are added from each thread
     fct.add_blaster()
 
-    print '\n*** Total flows added: %s' % fct.get_total_flows()
-    print '    HTTP[OK] results:  %d\n' % fct.get_ok_flows()
+    print '\n*** Total flows added: %s' % fct.get_ok_flows()
+    print '    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts()
 
     if in_args.delay > 0:
         print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
         time.sleep(in_args.delay)
 
-    # Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
-    # deleted in each thread
+    # Run through <cycles>, where <threads> are started in each cycle and
+    # <flows> previously added in an add cycle are deleted in each thread
     if in_args.delete:
         fct.delete_blaster()
+        print '\n*** Total flows deleted: %s' % fct.get_ok_flows()
+        print '    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts()
index 11204548742a1a338ecb383c0785da76523ed58f..ac531254f0dcc63ef3abdea4fcccf1a73276f79c 100755 (executable)
@@ -51,7 +51,7 @@ class FlowConfigBlasterFLE(FlowConfigBlaster):
 
         return nodes
 
-    def add_flow(self, session, node, flow_id, ipaddr):
+    def post_flows(self, session, node, flow_id, ipaddr):
         """
         Adds a flow. Overrides the add_flow method in FlowConfigBlaster.
         :param session:
index 154c1501e50b1bfa0a559a7cbc64ee04da86dee7..854643fa21c6aa8a64fcaa67b374b7a946a2208c 100755 (executable)
@@ -20,19 +20,21 @@ auth=false
 threads=1
 flows=1000
 cycles=1
-odl_host=127.0.0.1
-odl_port=8181
+host=127.0.0.1
+port=8181
+fpr=1
 
 function usage {
     echo "usage: $program_name [-h?an] [-i instances] [-c cycles] [-f flows] [-t threads] [-o odl_host] [-p odl_port]"
     echo "     -h|?          print this message"
     echo "     -a            use default authentication ('admin/admin')"
+    echo "     -b batchsize  # offlows per RESTCONF add-flow request"
     echo "     -n            use the 'no-delete' flag in '$CMD'"
     echo "     -i instances  number of '$CMD' instances to spawn"
     echo "     -c cycles     number of cycles"
     echo "     -f flows      number of flows"
-    echo "     -o odl_host   IP Address of the ODL controller"
-    echo "     -p odl_port   RESTCONF port in the ODL controller"
+    echo "     -h host       IP Address of the ODL controller"
+    echo "     -p port       RESTCONF port in the ODL controller"
     echo "     -t threads    number of threads"
     echo "Optional flags/arguments [acfnopt] are passed to '$CMD'."
 }
@@ -40,7 +42,7 @@ function usage {
 # Initialize our own variables:
 
 
-while getopts "h?ac:f:i:no:p:t:" opt; do
+while getopts "h?ab:c:f:i:no:p:t:" opt; do
     case "$opt" in
     h|\?)
         usage
@@ -48,6 +50,8 @@ while getopts "h?ac:f:i:no:p:t:" opt; do
         ;;
     a)  auth=true
         ;;
+    b)  fpr=$OPTARG
+        ;;
     c)  cycles=$OPTARG
         ;;
     f)  flows=$OPTARG
@@ -56,9 +60,9 @@ while getopts "h?ac:f:i:no:p:t:" opt; do
         ;;
     n)  no_delete=true
         ;;
-    o)  odl_host=$OPTARG
+    h)  host=$OPTARG
         ;;
-    p)  odl_port=$OPTARG
+    p)  port=$OPTARG
         ;;
     t)  threads=$OPTARG
         ;;
@@ -76,7 +80,7 @@ while [  $i -lt $instances ]; do
     let "startflow=$flows_per_instance * $i"
 
     CMD_STRING=$(printf '%s --cycles %s --flows %s --threads %s ' $CMD $cycles $flows $threads)
-    CMD_STRING+=$(printf ' --host %s --port %s --startflow %s' $odl_host $odl_port $startflow)
+    CMD_STRING+=$(printf ' --host %s --port %s --startflow %s --fpr %s' $host $port $startflow $fpr)
     if [ "$auth" = true ] ; then
         CMD_STRING+=' --auth'
     fi