Migrate Get Requests invocations(libraries)
[integration/test.git] / tools / odl-mdsal-clustering-tests / clustering-performance-test / flow_config_blaster.py
index 8b38b14b8fc89e81e599a2fe942e253a651be8b1..09a25dabd915baab654ed1e717e3705ac514c2ef 100755 (executable)
@@ -46,57 +46,50 @@ class Timer(object):
         self.secs = self.end - self.start
         self.msecs = self.secs * 1000  # millisecs
         if self.verbose:
-            print ("elapsed time: %f ms" % self.msecs)
+            print("elapsed time: %f ms" % self.msecs)
 
 
 class FlowConfigBlaster(object):
-    putheaders = {'content-type': 'application/json'}
-    getheaders = {'Accept': 'application/json'}
+    putheaders = {"content-type": "application/json"}
+    getheaders = {"Accept": "application/json"}
 
-    FLWURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+    FLWURL = (
+        "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+    )
     TBLURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0"
-    INVURL = 'restconf/operational/opendaylight-inventory:nodes'
+    INVURL = "restconf/operational/opendaylight-inventory:nodes"
     TIMEOUT = 10
 
     flows = {}
 
     # The "built-in" flow template
     flow_mode_template = {
-        u'flow': [
+        "flow": [
             {
-                u'hard-timeout': 65000,
-                u'idle-timeout': 65000,
-                u'cookie_mask': 4294967295,
-                u'flow-name': u'FLOW-NAME-TEMPLATE',
-                u'priority': 2,
-                u'strict': False,
-                u'cookie': 0,
-                u'table_id': 0,
-                u'installHw': False,
-                u'id': u'FLOW-ID-TEMPLATE',
-                u'match': {
-                    u'ipv4-destination': u'0.0.0.0/32',
-                    u'ethernet-match': {
-                        u'ethernet-type': {
-                            u'type': 2048
-                        }
-                    }
+                "hard-timeout": 65000,
+                "idle-timeout": 65000,
+                "cookie_mask": 4294967295,
+                "flow-name": "FLOW-NAME-TEMPLATE",
+                "priority": 2,
+                "strict": False,
+                "cookie": 0,
+                "table_id": 0,
+                "installHw": False,
+                "id": "FLOW-ID-TEMPLATE",
+                "match": {
+                    "ipv4-destination": "0.0.0.0/32",
+                    "ethernet-match": {"ethernet-type": {"type": 2048}},
                 },
-                u'instructions': {
-                    u'instruction': [
+                "instructions": {
+                    "instruction": [
                         {
-                            u'order': 0,
-                            u'apply-actions': {
-                                u'action': [
-                                    {
-                                        u'drop-action': {},
-                                        u'order': 0
-                                    }
-                                ]
-                            }
+                            "order": 0,
+                            "apply-actions": {
+                                "action": [{"drop-action": {}, "order": 0}]
+                            },
                         }
                     ]
-                }
+                },
             }
         ]
     }
@@ -106,6 +99,7 @@ class FlowConfigBlaster(object):
         FlowConfigBlaster Statistics: a class that stores and further processes
         statistics collected by Blaster worker threads during their execution.
         """
+
         def __init__(self):
             self.ok_rqst_rate = Counter(0.0)
             self.total_rqst_rate = Counter(0.0)
@@ -176,7 +170,19 @@ class FlowConfigBlaster(object):
         def get_total_flows(self):
             return self.total_flows.value
 
-    def __init__(self, host, port, ncycles, nthreads, fpr, nnodes, nflows, startflow, auth, flow_mod_template=None):
+    def __init__(
+        self,
+        host,
+        port,
+        ncycles,
+        nthreads,
+        fpr,
+        nnodes,
+        nflows,
+        startflow,
+        auth,
+        flow_mod_template=None,
+    ):
         self.host = host
         self.port = port
         self.ncycles = ncycles
@@ -190,14 +196,14 @@ class FlowConfigBlaster(object):
         if flow_mod_template:
             self.flow_mode_template = flow_mod_template
 
-        self.post_url_template = 'http://%s:' + self.port + '/' + self.TBLURL
-        self.del_url_template = 'http://%s:' + self.port + '/' + self.FLWURL
+        self.post_url_template = "http://%s:" + self.port + "/" + self.TBLURL
+        self.del_url_template = "http://%s:" + self.port + "/" + self.FLWURL
 
         self.stats = self.FcbStats()
         self.total_ok_flows = 0
         self.total_ok_rqsts = 0
 
-        self.ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')) + startflow)
+        self.ip_addr = Counter(int(netaddr.IPAddress("10.0.0.1")) + startflow)
 
         self.print_lock = threading.Lock()
         self.cond = threading.Condition()
@@ -216,21 +222,31 @@ class FlowConfigBlaster(object):
         """
         hosts = self.host.split(",")
         host = hosts[0]
-        inventory_url = 'http://' + host + ":" + self.port + '/' + self.INVURL
+        inventory_url = "http://" + host + ":" + self.port + "/" + self.INVURL
         nodes = self.nnodes
 
         if not self.auth:
-            r = session.get(inventory_url, headers=self.getheaders, stream=False, timeout=self.TIMEOUT)
+            r = session.get(
+                inventory_url,
+                headers=self.getheaders,
+                stream=False,
+                timeout=self.TIMEOUT,
+            )
         else:
-            r = session.get(inventory_url, headers=self.getheaders, stream=False, auth=('admin', 'admin'),
-                            timeout=self.TIMEOUT)
+            r = session.get(
+                inventory_url,
+                headers=self.getheaders,
+                stream=False,
+                auth=("admin", "admin"),
+                timeout=self.TIMEOUT,
+            )
 
         if r.status_code == 200:
             try:
-                inv = json.loads(r.content)['nodes']['node']
+                inv = json.loads(r.content)["nodes"]["node"]
                 nn = 0
                 for n in range(len(inv)):
-                    if re.search('openflow', inv[n]['id']) is not None:
+                    if re.search("openflow", inv[n]["id"]) is not None:
                         nn += 1
                 if nn != 0:
                     nodes = nn
@@ -254,11 +270,11 @@ class FlowConfigBlaster(object):
         Returns: The flow that gas been created from the template
 
         """
-        flow = copy.deepcopy(self.flow_mode_template['flow'][0])
-        flow['cookie'] = flow_id
-        flow['flow-name'] = self.create_flow_name(flow_id)
-        flow['id'] = str(flow_id)
-        flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ipaddr))
+        flow = copy.deepcopy(self.flow_mode_template["flow"][0])
+        flow["cookie"] = flow_id
+        flow["flow-name"] = self.create_flow_name(flow_id)
+        flow["id"] = str(flow_id)
+        flow["match"]["ipv4-destination"] = "%s/32" % str(netaddr.IPAddress(ipaddr))
         return flow
 
     def post_flows(self, session, node, flow_list, flow_count):
@@ -276,13 +292,24 @@ class FlowConfigBlaster(object):
         hosts = self.host.split(",")
         host = hosts[flow_count % len(hosts)]
         flow_url = self.assemble_post_url(host, node)
-        # print flow_url
 
         if not self.auth:
-            r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, timeout=self.TIMEOUT)
+            r = session.post(
+                flow_url,
+                data=flow_data,
+                headers=self.putheaders,
+                stream=False,
+                timeout=self.TIMEOUT,
+            )
         else:
-            r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, auth=('admin', 'admin'),
-                             timeout=self.TIMEOUT)
+            r = session.post(
+                flow_url,
+                data=flow_data,
+                headers=self.putheaders,
+                stream=False,
+                auth=("admin", "admin"),
+                timeout=self.TIMEOUT,
+            )
 
         return r.status_code
 
@@ -303,9 +330,8 @@ class FlowConfigBlaster(object):
         :return: string containing plain json
         """
         fmod = dict(self.flow_mode_template)
-        fmod['flow'] = flow_list
+        fmod["flow"] = flow_list
         flow_data = json.dumps(fmod)
-        # print flow_data
         return flow_data
 
     def add_flows(self, start_flow_id, tid):
@@ -329,38 +355,56 @@ class FlowConfigBlaster(object):
         n_nodes = self.get_num_nodes(s)
 
         with self.print_lock:
-            print '    Thread %d:\n        Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+            print(
+                "    Thread %d:\n        Adding %d flows on %d nodes"
+                % (tid, self.nflows, n_nodes)
+            )
 
         nflows = 0
+        nb_actions = []
+        while nflows < self.nflows:
+            node_id = randrange(1, n_nodes + 1)
+            flow_list = []
+            for i in range(self.fpr):
+                flow_id = (
+                    tid * (self.ncycles * self.nflows)
+                    + nflows
+                    + start_flow_id
+                    + self.startflow
+                )
+                self.flows[tid][flow_id] = node_id
+                flow_list.append(
+                    self.create_flow_from_template(
+                        flow_id, self.ip_addr.increment(), node_id
+                    )
+                )
+                nflows += 1
+                if nflows >= self.nflows:
+                    break
+            nb_actions.append((s, node_id, flow_list, nflows))
+
         with Timer() as t:
-            while nflows < self.nflows:
-                node_id = randrange(1, n_nodes + 1)
-                flow_list = []
-                for i in range(self.fpr):
-                    flow_id = tid * (self.ncycles * self.nflows) + nflows + start_flow_id + self.startflow
-                    self.flows[tid][flow_id] = node_id
-                    flow_list.append(self.create_flow_from_template(flow_id, self.ip_addr.increment(), node_id))
-                    nflows += 1
-                    if nflows >= self.nflows:
-                        break
-                sts = self.post_flows(s, node_id, flow_list, nflows)
+            for nb_action in nb_actions:
+                sts = self.post_flows(*nb_action)
                 try:
                     rqst_stats[sts] += 1
-                    flow_stats[sts] += len(flow_list)
+                    flow_stats[sts] += len(nb_action[2])
                 except KeyError:
                     rqst_stats[sts] = 1
-                    flow_stats[sts] = len(flow_list)
+                    flow_stats[sts] = len(nb_action[2])
 
-        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, flow_stats, t.secs)
+        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(
+            rqst_stats, flow_stats, t.secs
+        )
 
         with self.print_lock:
-            print '\n    Thread %d results (ADD): ' % tid
-            print '        Elapsed time: %.2fs,' % t.secs
-            print '        Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)
-            print '        Flows/s:    %.2f OK, %.2f Total' % (ok_fps, total_fps)
-            print '        Stats ({Requests}, {Flows}): ',
-            print rqst_stats,
-            print flow_stats
+            print("\n    Thread %d results (ADD): " % tid)
+            print("        Elapsed time: %.2fs," % t.secs)
+            print("        Requests/s: %.2f OK, %.2f Total" % (ok_rps, total_rps))
+            print("        Flows/s:    %.2f OK, %.2f Total" % (ok_fps, total_fps))
+            print("        Stats ({Requests}, {Flows}): ")
+            print(rqst_stats)
+            print(flow_stats)
             self.threads_done += 1
 
         s.close()
@@ -384,12 +428,16 @@ class FlowConfigBlaster(object):
         hosts = self.host.split(",")
         host = hosts[flow_count % len(hosts)]
         flow_url = self.del_url_template % (host, node, flow_id)
-        # print flow_url
 
         if not self.auth:
             r = session.delete(flow_url, headers=self.getheaders, timeout=self.TIMEOUT)
         else:
-            r = session.delete(flow_url, headers=self.getheaders, auth=('admin', 'admin'), timeout=self.TIMEOUT)
+            r = session.delete(
+                flow_url,
+                headers=self.getheaders,
+                auth=("admin", "admin"),
+                timeout=self.TIMEOUT,
+            )
 
         return r.status_code
 
@@ -410,26 +458,35 @@ class FlowConfigBlaster(object):
         n_nodes = self.get_num_nodes(s)
 
         with self.print_lock:
-            print 'Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+            print(
+                "Thread %d: Deleting %d flows on %d nodes" % (tid, self.nflows, n_nodes)
+            )
 
         with Timer() as t:
             for flow in range(self.nflows):
-                flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
+                flow_id = (
+                    tid * (self.ncycles * self.nflows)
+                    + flow
+                    + start_flow
+                    + self.startflow
+                )
                 sts = self.delete_flow(s, self.flows[tid][flow_id], flow_id, flow)
                 try:
                     rqst_stats[sts] += 1
                 except KeyError:
                     rqst_stats[sts] = 1
 
-        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, rqst_stats, t.secs)
+        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(
+            rqst_stats, rqst_stats, t.secs
+        )
 
         with self.print_lock:
-            print '\n    Thread %d results (DELETE): ' % tid
-            print '        Elapsed time: %.2fs,' % t.secs
-            print '        Requests/s:  %.2f OK,  %.2f Total' % (ok_rps, total_rps)
-            print '        Flows/s:     %.2f OK,  %.2f Total' % (ok_fps, total_fps)
-            print '        Stats ({Requests})',
-            print rqst_stats
+            print("\n    Thread %d results (DELETE): " % tid)
+            print("        Elapsed time: %.2fs," % t.secs)
+            print("        Requests/s:  %.2f OK,  %.2f Total" % (ok_rps, total_rps))
+            print("        Flows/s:     %.2f OK,  %.2f Total" % (ok_fps, total_fps))
+            print("        Stats ({Requests})")
+            print(rqst_stats)
             self.threads_done += 1
 
         s.close()
@@ -452,7 +509,7 @@ class FlowConfigBlaster(object):
         for c in range(self.ncycles):
             self.stats = self.FcbStats()
             with self.print_lock:
-                print '\nCycle %d:' % c
+                print("\nCycle %d:" % c)
 
             threads = []
             for i in range(self.nthreads):
@@ -466,20 +523,34 @@ class FlowConfigBlaster(object):
                     thread.join()
 
             with self.print_lock:
-                print '\n*** Test summary:'
-                print '    Elapsed time:    %.2fs' % t.secs
-                print '    Peak requests/s: %.2f OK, %.2f Total' % (
-                    self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())
-                print '    Peak flows/s:    %.2f OK, %.2f Total' % (
-                    self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())
-                print '    Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
-                    self.stats.get_ok_rqsts() / t.secs,
-                    self.stats.get_total_rqsts() / t.secs,
-                    (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate())
-                print '    Avg. flows/s:    %.2f OK, %.2f Total (%.2f%% of peak total)' % (
-                    self.stats.get_ok_flows() / t.secs,
-                    self.stats.get_total_flows() / t.secs,
-                    (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate())
+                print("\n*** Test summary:")
+                print("    Elapsed time:    %.2fs" % t.secs)
+                print(
+                    "    Peak requests/s: %.2f OK, %.2f Total"
+                    % (self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())
+                )
+                print(
+                    "    Peak flows/s:    %.2f OK, %.2f Total"
+                    % (self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())
+                )
+                print(
+                    "    Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)"
+                    % (
+                        self.stats.get_ok_rqsts() / t.secs,
+                        self.stats.get_total_rqsts() / t.secs,
+                        (self.stats.get_total_rqsts() / t.secs * 100)
+                        / self.stats.get_total_rqst_rate(),
+                    )
+                )
+                print(
+                    "    Avg. flows/s:    %.2f OK, %.2f Total (%.2f%% of peak total)"
+                    % (
+                        self.stats.get_ok_flows() / t.secs,
+                        self.stats.get_total_flows() / t.secs,
+                        (self.stats.get_total_flows() / t.secs * 100)
+                        / self.stats.get_total_flow_rate(),
+                    )
+                )
 
                 self.total_ok_flows += self.stats.get_ok_flows()
                 self.total_ok_rqsts += self.stats.get_ok_rqsts()
@@ -498,7 +569,7 @@ class FlowConfigBlaster(object):
         return self.total_ok_rqsts
 
     def create_flow_name(self, flow_id):
-        return 'TestFlow-%d' % flow_id
+        return "TestFlow-%d" % flow_id
 
 
 def get_json_from_file(filename):
@@ -507,16 +578,21 @@ def get_json_from_file(filename):
     :param filename: File from which to get the template
     :return: The json flow template (string)
     """
-    with open(filename, 'r') as f:
+    with open(filename, "r") as f:
         try:
             ft = json.load(f)
-            keys = ft['flow'][0].keys()
-            if (u'cookie' in keys) and (u'flow-name' in keys) and (u'id' in keys) and (u'match' in keys):
-                if u'ipv4-destination' in ft[u'flow'][0]['match'].keys():
-                    print 'File "%s" ok to use as flow template' % filename
+            keys = ft["flow"][0].keys()
+            if (
+                ("cookie" in keys)
+                and ("flow-name" in keys)
+                and ("id" in keys)
+                and ("match" in keys)
+            ):
+                if "ipv4-destination" in ft["flow"][0]["match"].keys():
+                    print('File "%s" ok to use as flow template' % filename)
                     return ft
         except ValueError:
-            print 'JSON parsing of file %s failed' % filename
+            print("JSON parsing of file %s failed" % filename)
             pass
 
     return None
@@ -529,7 +605,7 @@ def get_json_from_file(filename):
 # also beneficial to have unique "cookie" and "flow-name" attributes for easier
 # identification of the flow.
 ###############################################################################
-example_flow_mod_json = '''{
+example_flow_mod_json = """{
     "flow": [
         {
             "id": "38",
@@ -568,7 +644,7 @@ example_flow_mod_json = '''{
         }
 
     ]
-}'''
+}"""
 
 
 def create_arguments_parser():
@@ -576,47 +652,96 @@ def create_arguments_parser():
     Shorthand to arg parser on library level in order to access and eventually enhance in ancestors.
     :return: argument parser supporting config blaster arguments and parameters
     """
-    my_parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then'
-                                                    ' deletes flows into the config tree, as specified by'
-                                                    ' optional parameters.')
-
-    my_parser.add_argument('--host', default='127.0.0.1',
-                           help='Host where odl controller is running (default is 127.0.0.1).  '
-                                'Specify a comma-separated list of hosts to perform round-robin load-balancing.')
-    my_parser.add_argument('--port', default='8181',
-                           help='Port on which odl\'s RESTCONF is listening (default is 8181)')
-    my_parser.add_argument('--cycles', type=int, default=1,
-                           help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are '
-                                'performed in cycles. <THREADS> worker threads are started in each cycle and the cycle '
-                                'ends when all threads finish. Another cycle is started when the previous cycle '
-                                'finished.')
-    my_parser.add_argument('--threads', type=int, default=1,
-                           help='Number of request worker threads to start in each cycle; default=1. '
-                                'Each thread will add/delete <FLOWS> flows.')
-    my_parser.add_argument('--flows', type=int, default=10,
-                           help='Number of flows that will be added/deleted by each worker thread in each cycle; '
-                                'default 10')
-    my_parser.add_argument('--fpr', type=int, default=1,
-                           help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
-                                'default 1')
-    my_parser.add_argument('--nodes', type=int, default=16,
-                           help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
-                                'flows will be evenly distributed (programmed) into connected nodes.')
-    my_parser.add_argument('--delay', type=int, default=0,
-                           help='Time (in seconds) to wait between the add and delete cycles; default=0')
-    my_parser.add_argument('--delete', dest='delete', action='store_true', default=True,
-                           help='Delete all added flows one by one, benchmark delete '
-                                'performance.')
-    my_parser.add_argument('--no-delete', dest='delete', action='store_false',
-                           help='Do not perform the delete cycle.')
-    my_parser.add_argument('--auth', dest='auth', action='store_true', default=False,
-                           help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
-                                'default: no authentication')
-    my_parser.add_argument('--startflow', type=int, default=0,
-                           help='The starting Flow ID; default=0')
-    my_parser.add_argument('--file', default='',
-                           help='File from which to read the JSON flow template; default: no file, use a built in '
-                                'template.')
+    my_parser = argparse.ArgumentParser(
+        description="Flow programming performance test: First adds and then"
+        " deletes flows into the config tree, as specified by"
+        " optional parameters."
+    )
+
+    my_parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where odl controller is running (default is 127.0.0.1).  "
+        "Specify a comma-separated list of hosts to perform round-robin load-balancing.",
+    )
+    my_parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which odl's RESTCONF is listening (default is 8181)",
+    )
+    my_parser.add_argument(
+        "--cycles",
+        type=int,
+        default=1,
+        help="Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are "
+        "performed in cycles. <THREADS> worker threads are started in each cycle and the cycle "
+        "ends when all threads finish. Another cycle is started when the previous cycle "
+        "finished.",
+    )
+    my_parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in each cycle; default=1. "
+        "Each thread will add/delete <FLOWS> flows.",
+    )
+    my_parser.add_argument(
+        "--flows",
+        type=int,
+        default=10,
+        help="Number of flows that will be added/deleted by each worker thread in each cycle; "
+        "default 10",
+    )
+    my_parser.add_argument(
+        "--fpr",
+        type=int,
+        default=1,
+        help="Flows-per-Request - number of flows (batch size) sent in each HTTP request; "
+        "default 1",
+    )
+    my_parser.add_argument(
+        "--nodes",
+        type=int,
+        default=16,
+        help="Number of nodes if mininet is not connected; default=16. If mininet is connected, "
+        "flows will be evenly distributed (programmed) into connected nodes.",
+    )
+    my_parser.add_argument(
+        "--delay",
+        type=int,
+        default=0,
+        help="Time (in seconds) to wait between the add and delete cycles; default=0",
+    )
+    my_parser.add_argument(
+        "--delete",
+        dest="delete",
+        action="store_true",
+        default=True,
+        help="Delete all added flows one by one, benchmark delete " "performance.",
+    )
+    my_parser.add_argument(
+        "--no-delete",
+        dest="delete",
+        action="store_false",
+        help="Do not perform the delete cycle.",
+    )
+    my_parser.add_argument(
+        "--auth",
+        dest="auth",
+        action="store_true",
+        default=False,
+        help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
+        "default: no authentication",
+    )
+    my_parser.add_argument(
+        "--startflow", type=int, default=0, help="The starting Flow ID; default=0"
+    )
+    my_parser.add_argument(
+        "--file",
+        default="",
+        help="File from which to read the JSON flow template; default: no file, use a built in "
+        "template.",
+    )
     return my_parser
 
 
@@ -631,28 +756,39 @@ if __name__ == "__main__":
     parser = create_arguments_parser()
     in_args = parser.parse_args()
 
-    if in_args.file != '':
+    if in_args.file != "":
         flow_template = get_json_from_file(in_args.file)
     else:
         flow_template = None
 
-    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr, in_args.nodes,
-                            in_args.flows, in_args.startflow, in_args.auth)
+    fct = FlowConfigBlaster(
+        in_args.host,
+        in_args.port,
+        in_args.cycles,
+        in_args.threads,
+        in_args.fpr,
+        in_args.nodes,
+        in_args.flows,
+        in_args.startflow,
+        in_args.auth,
+    )
 
     # Run through <cycles>, where <threads> are started in each cycle and
     # <flows> are added from each thread
     fct.add_blaster()
 
-    print '\n*** Total flows added: %s' % fct.get_ok_flows()
-    print '    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts()
+    print("\n*** Total flows added: %s" % fct.get_ok_flows())
+    print("    HTTP[OK] results:  %d\n" % fct.get_ok_rqsts())
 
     if in_args.delay > 0:
-        print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+        print(
+            "*** Waiting for %d seconds before the delete cycle ***\n" % in_args.delay
+        )
         time.sleep(in_args.delay)
 
     # Run through <cycles>, where <threads> are started in each cycle and
     # <flows> previously added in an add cycle are deleted in each thread
     if in_args.delete:
         fct.delete_blaster()
-        print '\n*** Total flows deleted: %s' % fct.get_ok_flows()
-        print '    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts()
+        print("\n*** Total flows deleted: %s" % fct.get_ok_flows())
+        print("    HTTP[OK] results:    %d\n" % fct.get_ok_rqsts())