Fix Flake8 errors 39/88739/4
authorJamo Luhrsen <jluhrsen@gmail.com>
Fri, 27 Mar 2020 00:22:06 +0000 (17:22 -0700)
committerJamo Luhrsen <jluhrsen@gmail.com>
Fri, 27 Mar 2020 03:20:45 +0000 (20:20 -0700)
flake8 now running with python3 was turning up a lot of
errors about print statements. This fixes some of those
errors, and seems to be enough to get flake8 passing
again.

Signed-off-by: Jamo Luhrsen <jluhrsen@gmail.com>
Change-Id: Ie02f5266391702e425147b2981aad4bb828bdb0a
Signed-off-by: Jamo Luhrsen <jluhrsen@gmail.com>
49 files changed:
csit/libraries/AAAJsonUtils.py
csit/libraries/AuthStandalone.py
csit/libraries/ClusterStateLibrary.py
csit/libraries/Counter.py
csit/libraries/CrudLibrary.py
csit/libraries/DynamicMininet.py
csit/libraries/JsonGenerator.py
csit/libraries/MininetTopo/create_fullymesh.py
csit/libraries/ScaleClient.py
csit/libraries/Topology.py
csit/libraries/Topologynew.py
csit/libraries/UtilLibrary.py
csit/libraries/VsctlListParser.py
csit/libraries/XmlComparator.py
csit/libraries/backuprestore/JsonDiffTool.py
csit/libraries/backuprestore/jsonpathl.py
csit/libraries/ipaddr.py
csit/suites/groupbasedpolicy/common_scripts/dpdumpflows.py
csit/suites/groupbasedpolicy/common_scripts/infrastructure_launch.py
csit/suites/lacp/Lacp_Feature_OF13/LACP_custom1.py
tools/clustering/cluster-debugging/transaction-tracking/process.py
tools/clustering/cluster-deployer/deploy.py
tools/clustering/cluster-deployer/remote_host.py
tools/fastbgp/play.py
tools/mdsal_benchmark/dsbenchmark.py
tools/mdsal_benchmark/ntfbenchmark.py
tools/mdsal_benchmark/rpcbenchmark.py
tools/netconf_tools/configurer.py
tools/netconf_tools/getter.py
tools/odl-lispflowmapping-performance-tests/mapping_blaster.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/create_plot_data_files.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_bulk.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_read_blaster.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/odl_tester.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_stats.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/pretty_print.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/shard_perf_test.py
tools/odl-mdsal-clustering-tests/replace_cars.py
tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py
tools/pcep_updater/updater.py
tools/wcbench/stats.py

index 3db0088a54e01529ebd7153b7122d88c45eab17f..80391e35e9a05be6b826cc2398a07f9b695e33be 100644 (file)
@@ -29,7 +29,7 @@ def countnodes(args):
     try:
         jsonobj = json.loads(args['jsonblob'])
     except KeyError:
-        print "countnodes: json blob to parse not found"
+        print("countnodes: json blob to parse not found")
         raise
 
     if 'subnode' in args:
@@ -159,13 +159,13 @@ def get_id_by_name(args):
     try:
         jsonobj = json.loads(str(args['jsonblob']))
     except KeyError:
-        print "get_id_by_name: json blob not specified:"
+        print("get_id_by_name: json blob not specified:")
         raise
 
     try:
         name = args['name']
     except KeyError:
-        print "get_id_by_name: name [usr, domain, role] not specified in args"
+        print("get_id_by_name: name [usr, domain, role] not specified in args")
         raise
 
     if 'head' in args:
@@ -178,7 +178,7 @@ def get_id_by_name(args):
     try:
         datatype = args['typeval']
     except KeyError:
-        print "get_id_by_name: need a type arg to process correct name for id"
+        print("get_id_by_name: need a type arg to process correct name for id")
         raise
 
     try:
@@ -231,13 +231,13 @@ def get_attribute_by_id(args):
     try:
         jsonobj = json.loads(args['jsonblob'])
     except KeyError:
-        print "get_attribute_by_id: json blob not specified:"
+        print("get_attribute_by_id: json blob not specified:")
         raise
 
     try:
         nodeid = args['id']
     except KeyError:
-        print "get_attribute_by_id: id to look for not specified in parameters"
+        print("get_attribute_by_id: id to look for not specified in parameters")
         raise
 
     if 'attr' in args:
@@ -261,13 +261,13 @@ def get_attribute_by_id(args):
     try:
         datatype = args['typeval']
     except KeyError:
-        print "get_attribute_by_id: need type arg to process name for id"
+        print("get_attribute_by_id: need type arg to process name for id")
         raise
 
     try:
         size = args['size']
     except KeyError:
-        print "get_attribute_by_id: specify number of records we need"
+        print("get_attribute_by_id: specify number of records we need")
         raise
 
     typename = datatype + 'id'
index 2435423bcecc7317974369fd6c39a4c3436f8ab5..1ba99c1f176b004aa2e47072881c547a2e8ab339 100644 (file)
@@ -152,7 +152,6 @@ class _TokenReusingSession(object):
             raise RuntimeError("Parse failed: " + resp.text)
         self.token = token
         # TODO: Use logging so that callers could see token refreshes.
-        # print "DEBUG: token:", token
         # We keep self.session to use for the following restconf requests.
 
     def oneshot_method(self, method, uri, **kwargs):
@@ -200,7 +199,6 @@ class _TokenClosingSession(object):
             raise RuntimeError("Parse failed: " + resp.text)
         self.token = token
         # TODO: Use logging so that callers could see token refreshes.
-        # print "DEBUG: token:", token
         # We keep self.session to use for the following restconf requests.
 
     def oneshot_method(self, method, uri, **kwargs):
index ab5a643bb29d97a7ffb88f1ca612e5ef93cecd7d..815e6e3dcf0fc4da557e491deb16b9c9ddbe0ee6 100644 (file)
@@ -22,18 +22,18 @@ def getClusterRoles(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesI
     for ip in ips:
         i = 1
         dict[ip] = None
-        print "numOfShards => " + str(numOfShards)
+        print("numOfShards => ", str(numOfShards))
         while i <= numOfShards:
             shardMemberName = "member-" + str(i) + "-" + shardName
             j = 1
-            print 'j => ' + str(j)
-            print 'numOfTries => ' + str(numOfTries)
+            print('j => ', str(j))
+            print('numOfTries => ', str(numOfTries))
             while int(j) <= int(numOfTries):
                 print("Try number " + str(j))
                 try:
                     print("getting role of " + ip + "  for shardName = " + shardMemberName)
                     url = SettingsLibrary.getJolokiaURL(ip, str(port), str(i), shardName)
-                    print url
+                    print(url)
                     resp = UtilLibrary.get(url)
                     print(resp)
                     if resp.status_code != 200:
@@ -92,7 +92,7 @@ def getFollowers(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesInSe
         for ip in dict.keys():
             if dict[ip] == 'Follower':
                 result.append(ip)
-        print "i=", i, "result=", result
+        print("i=%s result=%s" % (i, result))
         if (len(result) == (len(ips) - 1)):
             break
         sleep(1)
@@ -116,7 +116,7 @@ def testGetClusterRoles():
 def testGetLeader():
     leader = getLeader("shard-inventory-config", 3, 1, 1, 8181,
                        "10.194.126.116", "10.194.126.117", "10.194.126.118")
-    print leader
+    print(leader)
     return leader
 
 
index 67400204035cf9e14b66fe71de09ad7faca11f16..4f327ee3fd1ce7d053ff5d3bbf40498c6ace52bc 100644 (file)
@@ -187,4 +187,4 @@ class Counter(dict):
 
 if __name__ == '__main__':
     import doctest
-    print doctest.testmod()
+    print(doctest.testmod())
index e92bf56116f2f7dd2fe8d8c64e94547b97a7a84e..d5979ea5853dce6731bbecfd7856dcf243015a4f 100644 (file)
@@ -142,7 +142,7 @@ def buyCar(hostname, port, numberOfCarBuyers, start=0):
     </note>
     """
 
-    print "Buying " + str(numberOfCarBuyers) + " Cars"
+    print("Buying " + str(numberOfCarBuyers) + " Cars")
     for x in range(start, start + numberOfCarBuyers):
         strId = str(x + 1)
 
index 6e274c6da6d6e65ea36e747ae9d1bf136bce6c69..8be374e4ff92466e1f88eb336ab62e46ac1cce10 100644 (file)
@@ -53,7 +53,7 @@ class DynamicMininet(cmd.Cmd):
             :param num: initial number of switches in the topology
         """
         if self._running:
-            print 'Mininet topology is already active'
+            print('Mininet topology is already active')
             return
         cntl, numsw = line.split()
         self._topo = mininet.topo.Topo()
@@ -68,10 +68,10 @@ class DynamicMininet(cmd.Cmd):
 
     def help_start(self):
         """Provide help message for start command"""
-        print 'Starts mininet'
-        print 'Usage: start <controller_ip> <num>'
-        print '\tcontroller_ip - controllers ip or host name'
-        print '\tnum           - number of switches at start'
+        print('Starts mininet')
+        print('Usage: start <controller_ip> <num>')
+        print('\tcontroller_ip - controllers ip or host name')
+        print('\tnum           - number of switches at start')
 
     def do_start_with_cluster(self, line):
         """Starts mininet network with initial number of switches
@@ -81,7 +81,7 @@ class DynamicMininet(cmd.Cmd):
                                    e.g.  1.1.1.1,2.2.2.2,3.3.3.3 (no spaces)
         """
         if self._running:
-            print 'Mininet topology is already active'
+            print('Mininet topology is already active')
             return
         cntls = line.split(',')
 
@@ -93,7 +93,7 @@ class DynamicMininet(cmd.Cmd):
         for i, cntl_ip in enumerate(cntls):
             cnt = self._net.addController('c{0}'.format(i), controller=RemoteController, ip=cntl_ip, port=6633)
             controllers.append(cnt)
-            print "contrller {0} created".format(cnt)
+            print("contrller {0} created".format(cnt))
 
         self._net.buildFromTopo(topo=self._topo)
         self._net.start()
@@ -101,9 +101,9 @@ class DynamicMininet(cmd.Cmd):
 
     def help_start_with_cluster(self):
         """Provide help message for start_with_cluster command"""
-        print 'Starts mininet with one switch'
-        print 'Usage: start <controller_ips>'
-        print '\tcontroller_ips - comma separated list of controllers ip or host names'
+        print('Starts mininet with one switch')
+        print('Usage: start <controller_ips>')
+        print('\tcontroller_ips - comma separated list of controllers ip or host names')
 
     def do_start_switches_with_cluster(self, line):
         """Starts mininet network with initial number of switches
@@ -114,7 +114,7 @@ class DynamicMininet(cmd.Cmd):
                                    e.g.  1.1.1.1,2.2.2.2,3.3.3.3 (no spaces)
         """
         if self._running:
-            print 'Mininet topology is already active'
+            print('Mininet topology is already active')
             return
         num, contls = line.split()
         cntls = contls.split(',')
@@ -127,7 +127,7 @@ class DynamicMininet(cmd.Cmd):
         for i, cntl_ip in enumerate(cntls):
             cnt = self._net.addController('c{0}'.format(i), controller=RemoteController, ip=cntl_ip, port=6633)
             controllers.append(cnt)
-            print "contrller {0} created".format(cnt)
+            print("contrller {0} created".format(cnt))
 
         self._net.buildFromTopo(topo=self._topo)
         self._net.start()
@@ -135,10 +135,10 @@ class DynamicMininet(cmd.Cmd):
 
     def help_start_switches_with_cluster(self):
         """Provide help message for start_with_cluster command"""
-        print 'Starts mininet with one switch'
-        print 'Usage: start <swnr> <controller_ips>'
-        print '\tswnt - number of switches in topology'
-        print '\tcontroller_ips - comma separated list of controllers ip or host names'
+        print('Starts mininet with one switch')
+        print('Usage: start <swnr> <controller_ips>')
+        print('\tswnt - number of switches in topology')
+        print('\tcontroller_ips - comma separated list of controllers ip or host names')
 
     def do_add_switch(self, line):
         """Adds one switch to the network
@@ -157,8 +157,8 @@ class DynamicMininet(cmd.Cmd):
 
     def help_add_switch(self):
         """Provide help message for add_switch command"""
-        print 'Adds one sinle switch to the running topology'
-        print 'Usage: add_switch'
+        print('Adds one sinle switch to the running topology')
+        print('Usage: add_switch')
 
     def do_add_switches(self, line):
         """Adds switches to the network
@@ -170,9 +170,9 @@ class DynamicMininet(cmd.Cmd):
 
     def help_add_switches(self):
         """Provide help message for add_switch command"""
-        print 'Adds one sinle switch to the running topology'
-        print 'Usage: add_switches <num>'
-        print '\tnum - number of switches tp be added'
+        print('Adds one sinle switch to the running topology')
+        print('Usage: add_switches <num>')
+        print('\tnum - number of switches tp be added')
 
     def do_exit(self, line):
         """Stops mininet"""
@@ -183,8 +183,8 @@ class DynamicMininet(cmd.Cmd):
 
     def help_exit(self):
         """Provide help message for exit command"""
-        print 'Exit mininet cli'
-        print 'Usage: exit'
+        print('Exit mininet cli')
+        print('Usage: exit')
 
     def do_sh(self, line):
         """Run an external shell command
@@ -195,9 +195,9 @@ class DynamicMininet(cmd.Cmd):
 
     def help_sh(self, line):
         """Provide help message for sh command"""
-        print 'Executes given commandAdds one sinle switch to the running topology'
-        print 'Usage: sh <line>'
-        print '\tline - command to be executed(e.g. ps -e'
+        print('Executes given commandAdds one sinle switch to the running topology')
+        print('Usage: sh <line>')
+        print('\tline - command to be executed(e.g. ps -e')
 
     def emptyline(self):
         pass
index 4722b614392ba89636114ad2763b663d300e724c..94caad67bcdf70cf4f6f16be91e98f5a5ccf36d0 100644 (file)
@@ -64,7 +64,7 @@ def copy_eid(objA, objB):
             try:
                 setattr(objA, name, value)
             except AttributeError:
-                print name, "giving attribute error in", objA
+                print("%s giving attribute error in %s" % (name, objA))
 
 
 def copy_rloc(objA, objB):
@@ -80,7 +80,7 @@ def copy_rloc(objA, objB):
             try:
                 setattr(objA, name, value)
             except AttributeError:
-                print name, "giving attribute error in", objA
+                print(" %s giving attribute error in" % (name, objA))
 
 
 def clean_hops(obj):
@@ -262,7 +262,7 @@ def Get_LispAddress_JSON_And_Wrap_input(eid_string, vni=None):
     return Wrap_input(Get_LispAddress_JSON(eid_string, vni))
 
 
-def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=001, loc_id="ISP1"):
+def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1"):
     """ Description: Returns locator record object from pyangbind generated classes
         Returns: locator record object
         Params:
@@ -290,7 +290,7 @@ def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=001, loc_id="ISP1"
     return lrecord_obj
 
 
-def Get_LocatorRecord_JSON(rloc, weights='1/1/255/0', flags=001, loc_id="ISP1"):
+def Get_LocatorRecord_JSON(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1"):
     """ Description: Returns locator record dictionary
         Returns: python dictionary
         Params:
@@ -330,7 +330,7 @@ def Get_MappingRecord_Object(eid, locators, ttl=1440, authoritative=True, action
         loc_id = loc.keys()[0]
         loc_obj = loc[loc_id]
         if loc_id in loc_ids:
-            print "Locator objects should have different keys"
+            print("Locator objects should have different keys")
             break
         # TODO: Locator-id, currently in the format of loc_id0, loc_id1
         mrecord_obj.LocatorRecord.add(loc_id)
index b1191a17464b3060e73daa157efa526be0b00d77..9019ab1adc7fda782d0ea6cfc176dcd8d4c46040 100644 (file)
@@ -24,10 +24,10 @@ __created__ = "19 March 2014"
 
 if len(sys.argv) < 5:
     print("Please povide correct inputs. Exiting!!!")
-    print "{0}  <switch_count> <host_per_switch> <base_mac: Eg:00:4b:00:00:00:00 > \
-          <base_ip: Eg:75.75.0.0>".format(sys.argv[0].split('/')[-1])
-    print "Dpid of switches is derived from base mac and \
-           host ip address is derived from base ip"
+    print("{0}  <switch_count> <host_per_switch> <base_mac: Eg:00:4b:00:00:00:00 > \
+          <base_ip: Eg:75.75.0.0>".format(sys.argv[0].split('/')[-1]))
+    print("Dpid of switches is derived from base mac and \
+           host ip address is derived from base ip")
     sys.exit(1)
 
 switch_count = int(sys.argv[1])
@@ -99,8 +99,8 @@ if __name__ == "__main__":
     \nHence generating this python file dynamically\"\"\"     \
     \nfrom mininet.topo import Topo\nclass DemoTopo(Topo):          \
     \n'.format(switch_count, switch_count * host_per_switch, sys.argv[0]))
-    print "This topology has %d switches %d hosts" \
-          % (switch_count, switch_count * host_per_switch)
+    print("This topology has %d switches %d hosts"
+          % (switch_count, switch_count * host_per_switch))
     configfile.write("    def __init__(self):\n ")
     configfile.write("        #  Initialize topology\n")
     configfile.write("        Topo.__init__(self)\n")
index fd9f39f1eed7fad5ffd009872de310bedf220f23..10ec961d6e009c7a2898f6d69235b37adb2c2928 100644 (file)
@@ -647,7 +647,6 @@ def flow_stats_collected(controller=''):
     Returns:
         :returns (switches, flows_reported, flows-found): tupple with counts of switches, reported and found flows
     """
-    # print type(flow_details), flow_details
     active_flows = 0
     found_flows = 0
     switches = _get_operational_inventory_of_switches(controller)
@@ -659,7 +658,7 @@ def flow_stats_collected(controller=''):
             active_flows += t['opendaylight-flow-table-statistics:flow-table-statistics']['active-flows']
             if 'flow' in t:
                 found_flows += len(t['flow'])
-    print "Switches,ActiveFlows(reported)/FlowsFound", len(switches), active_flows, found_flows
+    print("Switches,ActiveFlows(reported)/FlowsFound", len(switches), active_flows, found_flows)
     return len(switches), active_flows, found_flows
 
 
index 4c2b066004a58633597f58255370fd5a17fe6663..b96e48e364235f5120c1ca66738db92bf6d3f859 100644 (file)
@@ -43,5 +43,5 @@ class Topology(object):
 
 if __name__ == '__main__':
     topology = Topology()
-    print topology.get_nodes_from_topology(2)
-    print topology.get_nodes_from_topology('2')
+    print(topology.get_nodes_from_topology(2))
+    print(topology.get_nodes_from_topology('2'))
index 80c83f0a8a80e1239dd456d95c5bb115649eea2d..a62c429023550146daa2eed6a76b5f72955dd0ad 100644 (file)
@@ -102,7 +102,4 @@ class Topologynew(object):
 
 if __name__ == '__main__':
     topologynew = Topologynew()
-    # print topologynew.get_nodes_from_tree_topo(2)
-    # print topologynew.get_nodes_from_tree_topo('2')
-    print topologynew.get_nodes_from_tree_topo('(2,3)')
-    # print topologynew.get_ids_of_leaf_nodes(2,2 )#, depth)
+    print(topologynew.get_nodes_from_tree_topo('(2,3)'))
index e244c334956332de5fd624dc749c52ffe16e8654..5e0ec10941d4db0bd22752a06118013d896fed3c 100644 (file)
@@ -112,13 +112,13 @@ def execute_ssh_command(ip, username, password, command):
     use username and password of controller server for ssh and need
     karaf distribution location like /root/Documents/dist
     """
-    print "executing ssh command"
+    print("executing ssh command")
     lib = SSHLibrary()
     lib.open_connection(ip)
     lib.login(username=username, password=password)
-    print "login done"
+    print("login done")
     cmd_response = lib.execute_command(command)
-    print "command executed : " + command
+    print("command executed : " + command)
     lib.close_connection()
     return cmd_response
 
@@ -127,22 +127,22 @@ def wait_for_controller_up(ip, port="8181"):
     url = "http://" + ip + ":" + str(port) + \
           "/restconf/config/opendaylight-inventory:nodes/node/controller-config/yang-ext:mount/config:modules"
 
-    print "Waiting for controller " + ip + " up."
+    print("Waiting for controller " + ip + " up.")
     # Try 30*10s=5 minutes for the controller to be up.
     for i in xrange(30):
         try:
-            print "attempt " + str(i) + " to url " + url
+            print("attempt %s to url %s" % (str(i), url))
             resp = get(url, "admin", "admin")
-            print "attempt " + str(i) + " response is " + str(resp)
-            print resp.text
+            print("attempt %s response is %s" % (str(i), str(resp)))
+            print(resp.text)
             if ('clustering-it-provider' in resp.text):
-                print "Wait for controller " + ip + " succeeded"
+                print("Wait for controller " + ip + " succeeded")
                 return True
         except Exception as e:
-            print e
+            print(e)
         time.sleep(10)
 
-    print "Wait for controller " + ip + " failed"
+    print("Wait for controller " + ip + " failed")
     return False
 
 
@@ -192,7 +192,6 @@ def wait_for_controller_stopped(ip, username, password, karafHome):
     i = 1
     while i <= tries:
         stdout = lib.execute_command("ps -axf | grep karaf | grep -v grep | wc -l")
-        # print "stdout: "+stdout
         processCnt = stdout[0].strip('\n')
         print("processCnt: " + processCnt)
         if processCnt == '0':
@@ -203,7 +202,7 @@ def wait_for_controller_stopped(ip, username, password, karafHome):
     lib.close_connection()
 
     if i > tries:
-        print "Killing controller"
+        print("Killing controller")
         kill_controller(ip, username, password, karafHome)
 
 
@@ -234,7 +233,7 @@ def isolate_controller(controllers, username, password, isolated):
             cmd_str = base_str + controller + ' --destination ' + isolated_controller + ' -j DROP'
             execute_ssh_command(isolated_controller, username, password, cmd_str)
     ip_tables = execute_ssh_command(isolated_controller, username, password, 'sudo iptables -L')
-    print ip_tables
+    print(ip_tables)
     iso_result = 'pass'
     for controller in controllers:
         controller_regex_string = "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*"
@@ -266,7 +265,7 @@ def rejoin_controller(controllers, username, password, isolated):
             cmd_str = base_str + controller + ' --destination ' + isolated_controller + ' -j DROP'
             execute_ssh_command(isolated_controller, username, password, cmd_str)
     ip_tables = execute_ssh_command(isolated_controller, username, password, 'sudo iptables -L')
-    print ip_tables
+    print(ip_tables)
     iso_result = 'pass'
     for controller in controllers:
         controller_regex_string = "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*"
@@ -290,18 +289,18 @@ def flush_iptables(controllers, username, password):
     """
     flush_result = 'pass'
     for controller in controllers:
-        print 'Flushing ' + controller
+        print('Flushing ', controller)
         cmd_str = 'sudo iptables -v -F'
         cmd_result = execute_ssh_command(controller, username, password, cmd_str)
-        print cmd_result
+        print(cmd_result)
         success_string = "Flushing chain `INPUT'" + "\n"
         success_string += "Flushing chain `FORWARD'" + "\n"
         success_string += "Flushing chain `OUTPUT'"
         if not cmd_result == success_string:
             flush_result = "Failed to flush IPTables. Check Log."
-        print "."
-        print "."
-        print "."
+        print(".")
+        print(".")
+        print(".")
     return flush_result
 
 
index e68486dea57ee7266ff4273ebf980c53d6c93e98..531d23e1b039296bfc35ad8c6646bd1a26202553 100644 (file)
@@ -22,7 +22,7 @@ def _parse_stdout(stdout):
     regroups = re.finditer(pat, text)
     outdict = {}
     for g in regroups:
-        print g.group()
+        print(g.group())
         if g.group('key') == '_uuid':
             cntl_uuid = g.group('value')
             outdict[cntl_uuid] = {}
index 8ad386e2e533d6b4feda304b85861ac502a59b05..7c2617539ccf08e375055e2d52bc11204f7ab870 100644 (file)
@@ -218,8 +218,6 @@ class XmlComparator:
             nodeDict = XMLtoDictParserTools.parseTreeToDict(node)
             XMLtoDictParserTools.addDictValue(reportDict, index, nodeDict)
             index += 1
-            # print nodeDict
-            # print origDict
             if nodeDict == origDict:
                 return True, ''
             if nodeDict['flow']['priority'] == origDict['flow']['priority']:
@@ -229,7 +227,6 @@ class XmlComparator:
 
     def is_flow_operational2(self, requested_flow, oper_resp, check_id=False):
         def _rem_unimplemented_tags(tagpath, recurs, tdict):
-            # print "_rem_unimplemented_tags", tagpath, tdict
             if len(tagpath) > 1 and tagpath[0] in tdict:
                 _rem_unimplemented_tags(tagpath[1:], recurs, tdict[tagpath[0]])
 
@@ -246,11 +243,9 @@ class XmlComparator:
                 del tdict[tagpath[0]]
             if tdict.keys() == ['order']:
                 del tdict['order']
-            # print "leaving", tdict
 
         def _add_tags(tagpath, newtag, value, tdict):
             '''if whole tagpath exists and the tag is not present, it is added with given value'''
-            # print "_add_tags", tagpath, newtag, value, tdict
             if len(tagpath) > 0 and tagpath[0] in tdict:
                 _add_tags(tagpath[1:], newtag, value, tdict[tagpath[0]])
             elif len(tagpath) == 0 and newtag not in tdict:
@@ -258,7 +253,6 @@ class XmlComparator:
 
         def _to_be_modified_tags(tagpath, tag, related_tag, tdict):
             '''if whole tagpath exists and the tag is not present, it is added with given value'''
-            # print "_to_be_modified_tags", tagpath, tag, related_tag, tdict
             if len(tagpath) > 0 and tagpath[0] in tdict:
                 _to_be_modified_tags(tagpath[1:], tag, related_tag, tdict[tagpath[0]])
             elif len(tagpath) == 0 and tag in tdict and related_tag in tdict:
@@ -284,9 +278,6 @@ class XmlComparator:
                 ignoreList=IGNORED_TAGS_LIST)
             XMLtoDictParserTools.addDictValue(reportDict, index, nodeDict)
             index += 1
-            # print nodeDict
-            # print origDict
-            # print reportDict
             if nodeDict == origDict:
                 return True, ''
             if nodeDict['flow']['priority'] == origDict['flow']['priority']:
@@ -298,8 +289,6 @@ class XmlComparator:
                     for (p, t, rt) in TAGS_TO_MODIFY_FOR_OC:
                         _to_be_modified_tags(p, t, rt, td)
 
-                    # print "comparing1", nodeDict
-                    # print "comparing2", td
                     if nodeDict == td:
                         return True, ''
                 if nodeDict == origDict:
index 0d24764bca62e53a32ef606ddad7d585bde10dc8..b60126ac8e532371ab77a3de41b18eb85c7ffc30 100644 (file)
@@ -141,9 +141,9 @@ def prefilter_json_files_then_compare(args):
 
     if args.printDifferences:
         for patchline in differences_after_patching:
-            print json.dumps(patchline)
+            print(json.dumps(patchline))
 
-    print len(differences_after_patching)
+    print(len(differences_after_patching))
     return len(differences_after_patching)
 
 
index bf84e8090e3ca740e85078bd1d02e2819305660c..76ebf8a9439322e55aaf2840898fdbcc717abd98 100644 (file)
@@ -71,7 +71,6 @@ def normalize(x):
         g1 = m.group(1)
         subx.append(g1)
         ret = "[#%d]" % n
-        #       print "f1:", g1, ret
         return ret
 
     x = re.sub(r"[\['](\??\(.*?\))[\]']", f1, x)
@@ -86,7 +85,6 @@ def normalize(x):
     # put expressions back
     def f2(m):
         g1 = m.group(1)
-        #       print "f2:", g1
         return subx[int(g1)]
 
     x = re.sub(r"#([0-9]+)", f2, x)
@@ -130,17 +128,17 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
 
     def trace(expr, obj, path):
         if debug:
-            print "trace", expr, "/", path
+            print("trace", expr, "/", path)
         if expr:
             x = expr.split(';')
             loc = x[0]
             x = ';'.join(x[1:])
             if debug:
-                print "\t", loc, type(obj)
+                print("\t", loc, type(obj))
             if loc == "*":
                 def f03(key, loc, expr, obj, path):
                     if debug > 1:
-                        print "\tf03", key, loc, expr, path
+                        print("\tf03", key, loc, expr, path)
                     trace(s(key, expr), obj, path)
 
                 walk(loc, x, obj, path, f03)
@@ -149,7 +147,7 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
 
                 def f04(key, loc, expr, obj, path):
                     if debug > 1:
-                        print "\tf04", key, loc, expr, path
+                        print("\tf04", key, loc, expr, path)
                     if isinstance(obj, dict):
                         if key in obj:
                             trace(s('..', expr), obj[key], s(path, key))
@@ -175,7 +173,7 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
                 # [(index_expression)]
                 if loc.startswith("(") and loc.endswith(")"):
                     if debug > 1:
-                        print "index", loc
+                        print("index", loc)
                     e = evalx(loc, obj)
                     trace(s(e, x), obj, path)
                     return
@@ -183,11 +181,11 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
                 # ?(filter_expression)
                 if loc.startswith("?(") and loc.endswith(")"):
                     if debug > 1:
-                        print "filter", loc
+                        print("filter", loc)
 
                     def f05(key, loc, expr, obj, path):
                         if debug > 1:
-                            print "f05", key, loc, expr, path
+                            print("f05", key, loc, expr, path)
                         if isinstance(obj, dict):
                             eval_result = evalx(loc, obj[key])
                         else:
@@ -240,7 +238,7 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
                     # [index,index....]
                     for piece in re.split(r"'?,'?", loc):
                         if debug > 1:
-                            print "piece", piece
+                            print("piece", piece)
                         trace(s(piece, x), obj, path)
         else:
             store(path, obj)
@@ -257,7 +255,7 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
         """eval expression"""
 
         if debug:
-            print "evalx", loc
+            print("evalx", loc)
 
         # a nod to JavaScript. doesn't work for @.name.name.length
         # Write len(@.name.name) instead!!!
@@ -299,20 +297,20 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
         loc = re.sub(r'(?<!\\)@', "__obj", loc).replace(r'\@', '@')
         if not use_eval:
             if debug:
-                print "eval disabled"
+                print("eval disabled")
             raise Exception("eval disabled")
         if debug:
-            print "eval", loc
+            print("eval", loc)
         try:
             # eval w/ caller globals, w/ local "__obj"!
             v = eval(loc, caller_globals, {'__obj': obj})
-        except Exception, e:
+        except Exception as e:
             if debug:
-                print e
-            return False
+                print(e)
+        return False
 
         if debug:
-            print "->", v
+            print("->", v)
         return v
 
     # body of jsonpath()
index 7208218683e16875018dc23094e99037f9085f85..59a5e89cb7002d176e8c104d7a218aa30620ad9d 100644 (file)
@@ -1461,7 +1461,7 @@ class _BaseV6(object):
 
         try:
             # Now, parse the hextets into a 128-bit integer.
-            ip_int = 0L
+            ip_int = 0
             for i in xrange(parts_hi):
                 ip_int <<= 16
                 ip_int |= self._parse_hextet(parts[i])
index 5a61ffd289f4fa2e5db14c0f4078eab019c11ce8..7bcf6709de2b61d3369b706786bfde2f8b5c7c3f 100644 (file)
@@ -12,4 +12,4 @@ def call_dpctl():
 if __name__ == "__main__":
     flows = call_dpctl().split("recirc_id")
     for flow in flows:
-        print flow
+        print(flow)
index 4a4bd866e1769f4bb6205cf8e1b24611df02fb37..bd7c6aeb01f401ef75970d6fa6aa6dd4da2f9276 100644 (file)
@@ -42,7 +42,7 @@ def add_controller(sw, ip):
     try:
         socket.inet_aton(ip)
     except socket.error:
-        print "Error: %s is not a valid IPv4 address of controller!" % ip
+        print("Error: %s is not a valid IPv4 address of controller!" % (ip))
         os.exit(2)
 
     call(['sudo', 'ovs-vsctl', 'set-controller', sw, 'tcp:%s:6653' % ip])
@@ -61,7 +61,7 @@ def add_manager(ip):
     try:
         socket.inet_aton(ip)
     except socket.error:
-        print "Error: %s is not a valid IPv4 address of manager!" % ip
+        print("Error: %s is not a valid IPv4 address of manager!" % (ip))
         os.exit(2)
 
     cmd = ['sudo', 'ovs-vsctl', 'set-manager', 'tcp:%s:6640' % ip]
@@ -85,7 +85,7 @@ def add_switch(name, dpid=None):
             # prepending zeros to match 16-byt length, e.g. 123 -> 0000000000000123
             dpid = filler[:len(filler) - len(dpid)] + dpid
         elif len(dpid) > 16:
-            print 'DPID: %s is too long' % dpid
+            print('DPID: %s is too long' % dpid)
             sys.exit(3)
         call(['sudo', 'ovs-vsctl', 'set', 'bridge', name,
               'other-config:datapath-id=%s' % dpid])
@@ -265,57 +265,57 @@ def launch(switches, hosts, odl_ip='127.0.0.1'):
                 connect_container_to_switch(
                     sw['name'], host, containerID)
                 host['port-name'] = 'vethl-' + host['name']
-                print "Created container: %s with IP: %s. Connect using docker attach %s," \
-                    "disconnect with 'ctrl-p-q'." % (host['name'], host['ip'], host['name'])
+                print("Created container: %s with IP: %s. Connect using docker attach %s,"
+                      "disconnect with 'ctrl-p-q'." % (host['name'], host['ip'], host['name']))
 
 
 if __name__ == "__main__":
     if len(sys.argv) < 2 or len(sys.argv) > 3:
-        print "Please, specify IP of ODL and switch index in arguments."
-        print "usage: ./infrastructure_launch.py ODL_IP SWITCH_INDEX"
+        print("Please, specify IP of ODL and switch index in arguments.")
+        print("usage: ./infrastructure_launch.py ODL_IP SWITCH_INDEX")
         sys.exit(2)
 
     controller = sys.argv[1]
     try:
         socket.inet_aton(controller)
     except socket.error:
-        print "Error: %s is not a valid IPv4 address!" % controller
+        print("Error: %s is not a valid IPv4 address!" % (controller))
         sys.exit(2)
 
     sw_index = int(sys.argv[2])
-    print sw_index
-    print switches[sw_index]
+    print(sw_index)
+    print(switches[sw_index])
     if sw_index not in range(0, len(switches) + 1):
-        print len(switches) + 1
-        print "Error: %s is not a valid switch index!" % sw_index
+        print(len(switches) + 1)
+        print("Error: %s is not a valid switch index!" % (sw_index))
         sys.exit(2)
 
     sw_type = switches[sw_index]['type']
     sw_name = switches[sw_index]['name']
     if sw_type == 'gbp':
-        print "*****************************"
-        print "Configuring %s as a GBP node." % sw_name
-        print "*****************************"
+        print("*****************************")
+        print("Configuring %s as a GBP node." % (sw_name))
+        print("*****************************")
         print
         launch([switches[sw_index]], hosts, controller)
-        print "*****************************"
-        print "OVS status:"
-        print "-----------"
+        print("*****************************")
+        print("OVS status:")
+        print("-----------")
         print
         call(['sudo', 'ovs-vsctl', 'show'])
         print
-        print "Docker containers:"
-        print "------------------"
+        print("Docker containers:")
+        print("------------------")
         call(['docker', 'ps'])
-        print "*****************************"
+        print("*****************************")
     elif sw_type == 'sff':
-        print "*****************************"
-        print "Configuring %s as an SFF." % sw_name
-        print "*****************************"
+        print("*****************************")
+        print("Configuring %s as an SFF." % (sw_name))
+        print("*****************************")
         call(['sudo', 'ovs-vsctl', 'set-manager', 'tcp:%s:6640' % controller])
         print
     elif sw_type == 'sf':
-        print "*****************************"
-        print "Configuring %s as an SF." % sw_name
-        print "*****************************"
+        print("*****************************")
+        print("Configuring %s as an SF." % (sw_name))
+        print("*****************************")
         call(['%s/sf-config.sh' % os.path.dirname(os.path.realpath(__file__)), '%s' % sw_name])
index e2386d08b5aff60b509f5c6fbf24fae58af98908..1234c784585682778e87118701efb9dcd1ac1729 100644 (file)
@@ -54,8 +54,8 @@ class LacpTopo(Topo):
     net.build()
     s1.start([c0])
     s1.cmd('sudo ovs-vsctl set bridge s1 protocols=OpenFlow13')
-    print h1.cmd('./h1-bond0.sh')
-    print h2.cmd('./h2-bond0.sh')
+    print(h1.cmd('./h1-bond0.sh'))
+    print(h2.cmd('./h2-bond0.sh'))
     CLI(net)
     net.stop()
 
index d69a3cc16fc03e080bae8bb223eade6ba8f4c978..92678d14b12ad71cf24189da6497add1740bcbad 100644 (file)
@@ -91,14 +91,14 @@ def filterTransactionsByTimeToComplete(timeToComplete):
     totalTime = 0
     for txn in txns:
         if txns[txn].totalTime() > timeToComplete:
-            print txns[txn]
+            print(txns[txn])
             totalTime += txns[txn].totalTime()
 
-    print "Total time for these transactions = " + unicode(totalTime)
+    print("Total time for these transactions = ", unicode(totalTime))
 
 
 def csv():
     txns = processFiles()
-    print Transaction.csv_header()
+    print(Transaction.csv_header())
     for txn in txns:
-        print txns[txn].csv()
+        print(txns[txn].csv())
index a9819e0cfc730f21fb1647ecd68e2b428cea5103..401dcb630c9fd20cc3970c92a9fad432a39a0504 100755 (executable)
@@ -155,11 +155,11 @@ class Deployer:
                                      distribution_name)  # noqa
 
         if distribution_ver is None:
-            print distribution_name + " is not a valid distribution version." \
-                                      " (Must contain version in the form: " \
-                                      "\"<#>.<#>.<#>-<name>\" or \"<#>.<#>." \
-                                      "<#>-<name>-SR<#>\" or \"<#>.<#>.<#>" \
-                                      "-<name>-RC<#>\", e.g. 0.2.0-SNAPSHOT)"
+            print("%s is not a valid distribution version."
+                  " (Must contain version in the form: "
+                  "\"<#>.<#>.<#>-<name>\" or \"<#>.<#>."
+                  "<#>-<name>-SR<#>\" or \"<#>.<#>.<#>"
+                  "-<name>-RC<#>\", e.g. 0.2.0-SNAPSHOT)" % distribution_name)
             sys.exit(1)
         distribution_ver = distribution_ver.group()
 
@@ -206,35 +206,35 @@ class Deployer:
         # Copy the distribution to the host and unzip it
         odl_file_path = self.dir_name + "/odl.zip"
         self.remote.copy_file(self.distribution, odl_file_path)
-        self.remote.exec_cmd("unzip -o " + odl_file_path + " -d " +
-                             self.dir_name + "/")
+        self.remote.exec_cmd("unzip -o " + odl_file_path + " -d "
+                             self.dir_name + "/")
 
         # Rename the distribution directory to odl
-        self.remote.exec_cmd("mv " + self.dir_name + "/" +
-                             distribution_name + " " + self.dir_name + "/odl")
+        self.remote.exec_cmd("mv " + self.dir_name + "/"
+                             distribution_name + " " + self.dir_name + "/odl")
 
         # Copy all the generated files to the server
-        self.remote.mkdir(self.dir_name +
-                          "/odl/configuration/initial")
-        self.remote.copy_file(akka_conf, self.dir_name +
-                              "/odl/configuration/initial/")
-        self.remote.copy_file(module_shards_conf, self.dir_name +
-                              "/odl/configuration/initial/")
-        self.remote.copy_file(modules_conf, self.dir_name +
-                              "/odl/configuration/initial/")
-        self.remote.copy_file(features_cfg, self.dir_name +
-                              "/odl/etc/")
-        self.remote.copy_file(jolokia_xml, self.dir_name +
-                              "/odl/deploy/")
-        self.remote.copy_file(management_cfg, self.dir_name +
-                              "/odl/etc/")
+        self.remote.mkdir(self.dir_name
+                          "/odl/configuration/initial")
+        self.remote.copy_file(akka_conf, self.dir_name
+                              "/odl/configuration/initial/")
+        self.remote.copy_file(module_shards_conf, self.dir_name
+                              "/odl/configuration/initial/")
+        self.remote.copy_file(modules_conf, self.dir_name
+                              "/odl/configuration/initial/")
+        self.remote.copy_file(features_cfg, self.dir_name
+                              "/odl/etc/")
+        self.remote.copy_file(jolokia_xml, self.dir_name
+                              "/odl/deploy/")
+        self.remote.copy_file(management_cfg, self.dir_name
+                              "/odl/etc/")
 
         if datastore_cfg is not None:
             self.remote.copy_file(datastore_cfg, self.dir_name + "/odl/etc/")
 
         # Add symlink
-        self.remote.exec_cmd("ln -sfn " + self.dir_name + " " +
-                             args.rootdir + "/deploy/current")
+        self.remote.exec_cmd("ln -sfn " + self.dir_name + " "
+                             args.rootdir + "/deploy/current")
 
         # Run karaf
         self.remote.start_controller(self.dir_name)
@@ -243,11 +243,11 @@ class Deployer:
 def main():
     # Validate some input
     if os.path.exists(args.distribution) is False:
-        print args.distribution + " is not a valid file"
+        print("%s is not a valid file" % args.distribution)
         sys.exit(1)
 
     if os.path.exists(os.getcwd() + "/templates/" + args.template) is False:
-        print args.template + " is not a valid template"
+        print("%s is not a valid template" % args.template)
 
     # Prepare some 'global' variables
     hosts = args.hosts.split(",")
@@ -260,10 +260,10 @@ def main():
     replicas = {}
 
     for x in range(0, len(hosts)):
-        ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@" +
-                             hosts[x] + ":2550")
-        rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@" +
-                              hosts[x] + ":2551")
+        ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@"
+                             hosts[x] + ":2550")
+        rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@"
+                              hosts[x] + ":2551")
         all_replicas.append("member-" + str(x + 1))
 
     for x in range(0, 10):
index cb5d1818dcc2dc8fd0e252d7158e3447270e31e5..c85df36f866a999ecc2b96798804999baad84072 100644 (file)
@@ -23,7 +23,7 @@ class RemoteHost:
         self.lib.close_connection()
 
     def exec_cmd(self, command):
-        print "Executing command " + command + " on host " + self.host
+        print("Executing command %s on host %s" % (command, self.host))
         rc = self.lib.execute_command(command, return_rc=True)
         if rc[1] != 0:
             raise Exception('remote command failed [{0}] with exit code {1}.'
@@ -35,14 +35,14 @@ class RemoteHost:
 
     def copy_file(self, src, dest):
         if src is None:
-            print "src is None not copy anything to " + dest
+            print("src is None not copy anything to ", dest)
             return
 
         if os.path.exists(src) is False:
-            print "Src file " + src + " was not found"
+            print("Src file " + src + " was not found")
             return
 
-        print "Copying " + src + " to " + dest + " on " + self.host
+        print("Copying %s to %s on %s" % (src, dest, self.host))
         self.lib.put_file(src, dest)
 
     def kill_controller(self):
index d82cbab0073211264b61fc82eb93cddfe0fc25da..b75aa2de4ea86a61d50ce7c732058becb87b8d99 100755 (executable)
@@ -186,7 +186,7 @@ def parse_arguments():
     parser.add_argument("--skipattr", default=False, action="store_true", help=str_help)
     arguments = parser.parse_args()
     if arguments.multiplicity < 1:
-        print "Multiplicity", arguments.multiplicity, "is not positive."
+        print("Multiplicity", arguments.multiplicity, "is not positive.")
         raise SystemExit(1)
     # TODO: Are sanity checks (such as asnumber>=0) required?
     return arguments
@@ -1857,7 +1857,6 @@ class WriteTracker(object):
             :return: true if no remaining data to send
         """
         # We assume there is a msg_out to send and socket is writable.
-        # print "going to send", repr(self.msg_out)
         self.timer.snapshot()
         bytes_sent = self.socket.send(self.msg_out)
         # Forget the part of message that was sent.
@@ -2153,7 +2152,7 @@ def threaded_job(arguments):
         for t in thread_args:
             thread.start_new_thread(job, (t, rpcqueue, storage))
     except Exception:
-        print "Error: unable to start thread."
+        print("Error: unable to start thread.")
         raise SystemExit(2)
 
     if arguments.usepeerip:
index 15d6607f3903b8ca99cbd1451d1bdf7bb28a3ef6..5e7b553c0c8a44f6d0c9d55b9c7ed202e5dfb10d 100755 (executable)
@@ -65,7 +65,7 @@ def send_clear_request():
     url = BASE_URL + "operations/dsbenchmark:cleanup-store"
 
     r = requests.post(url, stream=False, auth=('admin', 'admin'))
-    print r.status_code
+    print(r.status_code)
 
 
 def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
@@ -101,7 +101,7 @@ def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner
     if r.status_code == 200:
         result = dict(result.items() + json.loads(r.content)['output'].items())
     else:
-        print 'Error %s, %s' % (r.status_code, r.content)
+        print('Error %s, %s' % (r.status_code, r.content))
     return result
 
 
@@ -115,8 +115,8 @@ def print_results(run_type, idx, res):
                 test run
     :return: None
     """
-    print '%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' % \
-          (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError'])
+    print('%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' %
+          (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError']))
 
 
 def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
@@ -138,8 +138,8 @@ def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, ou
     total_build_time = 0.0
     total_exec_time = 0.0
 
-    print "Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore,
-    print "Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx
+    print("Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore,)
+    print("Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx)
     for idx in range(warmup_runs):
         res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
         print_results('WARMUP', idx, res)
@@ -168,8 +168,8 @@ def store_result(values, tx_type, operation, data_fmt, datastore,
     :param value: The (measured) value
     :return: none
     """
-    plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/' +
-                str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
+    plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/'
+                str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
     values[plot_key] = value
 
 
@@ -228,35 +228,35 @@ if __name__ == "__main__":
     f = open('test.csv', 'wt')
     try:
         start_time = time.time()
-        print "Start time: %f " % start_time
+        print("Start time: %f " % (start_time))
 
         writer = csv.writer(f)
 
         # Determine the impact of transaction type, data format and data structure on performance.
         # Iterate over all transaction types, data formats, operation types, and different
         # list-of-lists layouts; always use a single operation in each transaction
-        print '\n#######################################'
-        print 'Tx type, data format & data structure'
-        print '#######################################'
+        print('\n#######################################')
+        print('Tx type, data format & data structure')
+        print('#######################################')
         for tx_type in TX_TYPES:
-            print '***************************************'
-            print 'Transaction Type: %s' % tx_type
-            print '***************************************'
+            print('***************************************')
+            print('Transaction Type: %s' % tx_type)
+            print('***************************************')
             writer.writerow((('%s:' % tx_type), '', ''))
 
             for fmt in DATA_FORMATS:
-                print '---------------------------------------'
-                print 'Data format: %s' % fmt
-                print '---------------------------------------'
+                print('---------------------------------------')
+                print('Data format: %s' % fmt)
+                print('---------------------------------------')
                 writer.writerow(('', ('%s:' % fmt), ''))
 
                 for datastore in DATASTORES:
                     print
-                    print 'Data store: %s' % datastore
+                    print('Data store: %s' % datastore)
                     print
 
                     for oper in OPERATIONS:
-                        print 'Operation: %s' % oper
+                        print('Operation: %s' % oper)
                         writer.writerow(('', '', '%s:' % oper))
 
                         for elem in INNER_ELEMENTS:
@@ -273,28 +273,28 @@ if __name__ == "__main__":
         # Determine the impact of number of writes per transaction on performance.
         # Iterate over all transaction types, data formats, operation types, and
         # operations-per-transaction; always use a list of lists where the inner list has one parameter
-        print '\n#######################################'
-        print 'Puts per tx'
-        print '#######################################'
+        print('\n#######################################')
+        print('Puts per tx')
+        print('#######################################')
         for tx_type in TX_TYPES:
-            print '***************************************'
-            print 'Transaction Type: %s' % tx_type
-            print '***************************************'
+            print('***************************************')
+            print('Transaction Type: %s' % tx_type)
+            print('***************************************')
             writer.writerow((('%s:' % tx_type), '', ''))
 
             for fmt in DATA_FORMATS:
-                print '---------------------------------------'
-                print 'Data format: %s' % fmt
-                print '---------------------------------------'
+                print('---------------------------------------')
+                print('Data format: %s' % fmt)
+                print('---------------------------------------')
                 writer.writerow(('', ('%s:' % fmt), ''))
 
                 for datastore in DATASTORES:
                     print
-                    print 'Data store: %s' % datastore
+                    print('Data store: %s' % datastore)
                     print
 
                     for oper in OPERATIONS:
-                        print 'Operation: %s' % oper
+                        print('Operation: %s' % oper)
                         writer.writerow(('', '', '%s:' % oper))
 
                         for wtx in OPS_PER_TX:
@@ -311,8 +311,8 @@ if __name__ == "__main__":
         write_results_to_file(PLOT2, args.outfileops, PLOT_FILTER)
 
         end_time = time.time()
-        print "End time: %f " % end_time
-        print "Total execution time: %f" % (end_time - start_time)
+        print("End time: %f " % (end_time))
+        print("Total execution time: %f" % ((end_time - start_time)))
 
     finally:
         f.close()
index 1fd649b692d1909b82b197fccdd7ed707b2a3f83..2537c649c1e5d682e9777294659100b3da91b05f 100755 (executable)
@@ -53,7 +53,7 @@ def send_test_request(producer_type, producers, listeners, payload_size, iterati
     if r.status_code == 200:
         result = dict(result.items() + json.loads(r.content)['output'].items())
     else:
-        print 'Error %s, %s' % (r.status_code, r.content)
+        print('Error %s, %s' % (r.status_code, r.content))
     return result
 
 
@@ -67,10 +67,10 @@ def print_results(run_type, idx, res):
                 test run
     :return: None
     """
-    print '%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d' % \
+    print('%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d' %
           (run_type, idx,
            res[u'producer-ok'], res[u'producer-error'], res[u'listener-ok'], res[u'producer-rate'],
-           res[u'listener-rate'], res[u'producer-elapsed-time'], res[u'listener-elapsed-time'])
+           res[u'listener-rate'], res[u'producer-elapsed-time'], res[u'listener-elapsed-time']))
 
 
 def run_test(warmup_runs, test_runs, producer_type, producers, listeners, payload_size, iterations):
@@ -145,15 +145,15 @@ if __name__ == "__main__":
             for lis in args.listeners:
                 exec_time, prate, lrate = run_test(args.warm, args.run, args.ptype, prod, lis,
                                                    args.payload, args.iterations)
-                print 'Producers: %d, Listeners: %d, prate: %d, lrate: %d' % (prod, lis, prate, lrate)
+                print('Producers: %d, Listeners: %d, prate: %d, lrate: %d' % (prod, lis, prate, lrate))
                 lrate_row.append(lrate)
                 prate_row.append(prate)
 
             lrate_matrix.append(lrate_row)
             prate_matrix.append(prate_row)
 
-        print lrate_matrix
-        print prate_matrix
+        print(lrate_matrix)
+        print(prate_matrix)
 
         # writer.writerow((('%s:' % args.ptype), '', '', ''))
         # writer.writerow(('', exec_time, prate, lrate))
index 5e091fd02ef2edbca5193f3cc9934f4748f57447..9c32ae48d9531f84dc5d0019e83f8e8d25dd4ce8 100755 (executable)
@@ -53,7 +53,7 @@ def send_test_request(operation, clients, servers, payload_size, iterations):
     if r.status_code == 200:
         result = dict(result.items() + json.loads(r.content)['output'].items())
     else:
-        print 'Error %s, %s' % (r.status_code, r.content)
+        print('Error %s, %s' % (r.status_code, r.content))
     return result
 
 
@@ -67,9 +67,9 @@ def print_results(run_type, idx, res):
                 test run
     :return: None
     """
-    print '%s #%d: Ok: %d, Error: %d, Rate: %d, Exec time: %d' % \
+    print('%s #%d: Ok: %d, Error: %d, Rate: %d, Exec time: %d' %
           (run_type, idx,
-           res[u'global-rtc-client-ok'], res[u'global-rtc-client-error'], res[u'rate'], res[u'exec-time'])
+           res[u'global-rtc-client-ok'], res[u'global-rtc-client-error'], res[u'rate'], res[u'exec-time']))
 
 
 def run_test(warmup_runs, test_runs, operation, clients, servers, payload_size, iterations):
@@ -150,7 +150,7 @@ if __name__ == "__main__":
                     run_test(args.warm, args.run, args.operation, client, svr, args.payload, args.iterations)
                 rate_row.append(rate)
             rate_matrix.append(rate_row)
-        print rate_matrix
+        print(rate_matrix)
 
         writer.writerow(('RPC Rates:', ''))
         writer.writerows(rate_matrix)
index 90b9ca862b3bcf3f4a03e1ff5a4204078efe3f37..d3c10bc6e528a644a2883ea9997aa74c140a6c0a 100644 (file)
@@ -129,7 +129,7 @@ def main():
     def handle_sigint(received_signal, frame):  # This is a closure as it refers to the counter.
         """Upon SIGINT, print counter contents and exit gracefully."""
         signal.signal(signal.SIGINT, signal.SIG_DFL)
-        print sorted_repr(counter)
+        print(sorted_repr(counter))
         sys.exit(0)
 
     signal.signal(signal.SIGINT, handle_sigint)
index 4596277807bdff48a8b9264e9a2a4a9d8b0f7100..3655800410563cd904bf159e08163074b7fefc40 100644 (file)
@@ -171,10 +171,10 @@ while request_count > 0:
     if len(responses) > 0:
         result = responses.popleft()
         if result[0] is None:
-            print "ERROR|" + result[1] + "|"
+            print("ERROR|" + result[1] + "|")
             break
         runtime = "%5.3f|%5.3f|%5.3f" % result[1]
-        print "%03d|%s|%s|" % (result[0], runtime, result[2])
+        print("%03d|%s|%s|" % ((result[0], runtime, result[2])))
         request_count -= 1
         continue
     time.sleep(args.refresh)
index 63c1a8e7f46d912e6514b6481ff4ac50639ce509..fb7e7587055ff35b8f0b2cc461522533cf0bdc72 100755 (executable)
@@ -95,10 +95,10 @@ class MappingRPCBlaster(object):
         self.start_rloc = netaddr.IPAddress(start_rloc)
         self.nmappings = nmappings
         if v == "Li" or v == "li":
-            print "Using the Lithium RPC URL"
+            print("Using the Lithium RPC URL")
             rpc_url = self.RPC_URL_LI
         else:
-            print "Using the Beryllium and later RPC URL"
+            print("Using the Beryllium and later RPC URL")
             rpc_url = self.RPC_URL_BE
 
         self.post_url_template = 'http://' + self.host + ':' \
@@ -205,4 +205,4 @@ if __name__ == "__main__":
     elif in_args.mode == "get":
         mapping_rpc_blaster.get_n_mappings()
     else:
-        print "Unsupported mode:", in_args.mode
+        print("Unsupported mode:", in_args.mode)
index 1dd17f273e2baea308766e91428d2fc99137e934..188f6437c57822d58f8c40e453f1f5edb9f96c21 100755 (executable)
@@ -56,10 +56,10 @@ if __name__ == "__main__":
         sts = cleanup_config_fl(in_args.host, in_args.port)
         exp = 204
     else:
-        print 'Unknown controller type'
+        print('Unknown controller type')
         sys.exit(-1)
 
     if sts != exp:
-        print 'Failed to delete nodes in the config space, code %d' % sts
+        print('Failed to delete nodes in the config space, code %d' % sts)
     else:
-        print 'Nodes in config space deleted.'
+        print('Nodes in config space deleted.')
index 12ee2e82fae36da3b0b8d8a6ccaa0c4535408471..154a89f3ed410a67a6c9bff223b6a7c3d4080d26 100644 (file)
@@ -15,13 +15,13 @@ for line in log.splitlines():
     res = pat_rate.search(line)
     if res is not None:
         rate.append(res.groups('rate1')[0])
-print rate
+print(rate)
 
 for line in log.splitlines():
     res = pat_time.search(line)
     if res is not None:
         time.append(res.groups('time1')[0])
-print time
+print(time)
 
 text_file = open("rates.csv", "w")
 text_file.write('Add,Delete\n')
index b0ddcda3def3a72c7144e4a6f7774eecc55a5812..57bb31ddaff9ae3cefe8eacb326beb98f00a51a8 100755 (executable)
@@ -43,21 +43,21 @@ def wait_for_stats(crawler, exp_found, timeout, delay):
     :return: None
     """
     total_delay = 0
-    print 'Waiting for stats to catch up:'
+    print('Waiting for stats to catch up:')
 
     with Timer() as t:
         while True:
             crawler.crawl_inventory()
-            print '   %d, %d' % (crawler.reported_flows, crawler.found_flows)
+            print('   %d, %d' % (crawler.reported_flows, crawler.found_flows))
             if crawler.found_flows == exp_found or total_delay > timeout:
                 break
             total_delay += delay
             time.sleep(delay)
 
     if total_delay < timeout:
-        print 'Stats collected in %d seconds.' % t.secs
+        print('Stats collected in %d seconds.' % t.secs)
     else:
-        print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay
+        print('Stats collection did not finish in %d seconds. Aborting...' % total_delay)
 
 
 if __name__ == "__main__":
@@ -131,16 +131,16 @@ if __name__ == "__main__":
     reported = ic.reported_flows
     found = ic.found_flows
 
-    print 'Baseline:'
-    print '   Reported flows: %d' % reported
-    print '   Found flows:    %d' % found
+    print('Baseline:')
+    print('   Reported flows: %d' % reported)
+    print('   Found flows:    %d' % found)
 
     # Run through <CYCLES> add cycles, where <THREADS> threads are started in
     # each cycle and <FLOWS> flows are added from each thread
     fct.add_blaster()
 
-    print '\n*** Total flows added: %d' % fct.get_ok_flows()
-    print '    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts()
+    print('\n*** Total flows added: %d' % fct.get_ok_flows())
+    print('    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts())
 
     # Wait for stats to catch up
     wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay)
@@ -149,17 +149,17 @@ if __name__ == "__main__":
     # in each cycle and <FLOWS> flows previously added in an add cycle are
     # deleted in each thread
     if in_args.bulk_delete:
-        print '\nDeleting all flows in bulk:'
+        print('\nDeleting all flows in bulk:')
         sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
         if sts != 200:
-            print '   Failed to delete flows, code %d' % sts
+            print('   Failed to delete flows, code %d' % sts)
         else:
-            print '   All flows deleted.'
+            print('   All flows deleted.')
     else:
-        print '\nDeleting flows one by one\n   ',
+        print('\nDeleting flows one by one\n   ',)
         fct.delete_blaster()
-        print '\n*** Total flows deleted: %d' % fct.get_ok_flows()
-        print '    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts()
+        print('\n*** Total flows deleted: %d' % fct.get_ok_flows())
+        print('    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts())
 
     # Wait for stats to catch up back to baseline
     wait_for_stats(ic, found, in_args.timeout, in_args.delay)
index 6fba1216998b1bf139397a185bd6267d7bcb7efe..a07748150cbe768f743b9e537069725ef9f71f87 100755 (executable)
@@ -277,7 +277,6 @@ class FlowConfigBlaster(object):
         hosts = self.host.split(",")
         host = hosts[flow_count % len(hosts)]
         flow_url = self.assemble_post_url(host, node)
-        # print flow_url
 
         if not self.auth:
             r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, timeout=self.TIMEOUT)
@@ -306,7 +305,6 @@ class FlowConfigBlaster(object):
         fmod = dict(self.flow_mode_template)
         fmod['flow'] = flow_list
         flow_data = json.dumps(fmod)
-        # print flow_data
         return flow_data
 
     def add_flows(self, start_flow_id, tid):
@@ -330,7 +328,7 @@ class FlowConfigBlaster(object):
         n_nodes = self.get_num_nodes(s)
 
         with self.print_lock:
-            print '    Thread %d:\n        Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+            print('    Thread %d:\n        Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes))
 
         nflows = 0
         nb_actions = []
@@ -359,13 +357,13 @@ class FlowConfigBlaster(object):
         ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, flow_stats, t.secs)
 
         with self.print_lock:
-            print '\n    Thread %d results (ADD): ' % tid
-            print '        Elapsed time: %.2fs,' % t.secs
-            print '        Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)
-            print '        Flows/s:    %.2f OK, %.2f Total' % (ok_fps, total_fps)
-            print '        Stats ({Requests}, {Flows}): ',
-            print rqst_stats,
-            print flow_stats
+            print('\n    Thread %d results (ADD): ' % tid)
+            print('        Elapsed time: %.2fs,' % t.secs)
+            print('        Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps))
+            print('        Flows/s:    %.2f OK, %.2f Total' % (ok_fps, total_fps))
+            print('        Stats ({Requests}, {Flows}): ')
+            print(rqst_stats,)
+            print(flow_stats)
             self.threads_done += 1
 
         s.close()
@@ -389,7 +387,6 @@ class FlowConfigBlaster(object):
         hosts = self.host.split(",")
         host = hosts[flow_count % len(hosts)]
         flow_url = self.del_url_template % (host, node, flow_id)
-        # print flow_url
 
         if not self.auth:
             r = session.delete(flow_url, headers=self.getheaders, timeout=self.TIMEOUT)
@@ -415,7 +412,7 @@ class FlowConfigBlaster(object):
         n_nodes = self.get_num_nodes(s)
 
         with self.print_lock:
-            print 'Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+            print('Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes))
 
         with Timer() as t:
             for flow in range(self.nflows):
@@ -429,12 +426,12 @@ class FlowConfigBlaster(object):
         ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, rqst_stats, t.secs)
 
         with self.print_lock:
-            print '\n    Thread %d results (DELETE): ' % tid
-            print '        Elapsed time: %.2fs,' % t.secs
-            print '        Requests/s:  %.2f OK,  %.2f Total' % (ok_rps, total_rps)
-            print '        Flows/s:     %.2f OK,  %.2f Total' % (ok_fps, total_fps)
-            print '        Stats ({Requests})',
-            print rqst_stats
+            print('\n    Thread %d results (DELETE): ' % tid)
+            print('        Elapsed time: %.2fs,' % t.secs)
+            print('        Requests/s:  %.2f OK,  %.2f Total' % (ok_rps, total_rps))
+            print('        Flows/s:     %.2f OK,  %.2f Total' % (ok_fps, total_fps))
+            print('        Stats ({Requests})',)
+            print(rqst_stats)
             self.threads_done += 1
 
         s.close()
@@ -457,7 +454,7 @@ class FlowConfigBlaster(object):
         for c in range(self.ncycles):
             self.stats = self.FcbStats()
             with self.print_lock:
-                print '\nCycle %d:' % c
+                print('\nCycle %d:' % c)
 
             threads = []
             for i in range(self.nthreads):
@@ -471,20 +468,20 @@ class FlowConfigBlaster(object):
                     thread.join()
 
             with self.print_lock:
-                print '\n*** Test summary:'
-                print '    Elapsed time:    %.2fs' % t.secs
-                print '    Peak requests/s: %.2f OK, %.2f Total' % (
-                    self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())
-                print '    Peak flows/s:    %.2f OK, %.2f Total' % (
-                    self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())
-                print '    Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+                print('\n*** Test summary:')
+                print('    Elapsed time:    %.2fs' % t.secs)
+                print('    Peak requests/s: %.2f OK, %.2f Total' % (
+                    self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate()))
+                print('    Peak flows/s:    %.2f OK, %.2f Total' % (
+                    self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate()))
+                print('    Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
                     self.stats.get_ok_rqsts() / t.secs,
                     self.stats.get_total_rqsts() / t.secs,
-                    (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate())
-                print '    Avg. flows/s:    %.2f OK, %.2f Total (%.2f%% of peak total)' % (
-                    self.stats.get_ok_flows() / t.secs,
-                    self.stats.get_total_flows() / t.secs,
-                    (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate())
+                    (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate()))
+                print('    Avg. flows/s:    %.2f OK, %.2f Total (%.2f%% of peak total)' % (
+                      self.stats.get_ok_flows() / t.secs,
+                      self.stats.get_total_flows() / t.secs,
+                      (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate()))
 
                 self.total_ok_flows += self.stats.get_ok_flows()
                 self.total_ok_rqsts += self.stats.get_ok_rqsts()
@@ -518,10 +515,10 @@ def get_json_from_file(filename):
             keys = ft['flow'][0].keys()
             if (u'cookie' in keys) and (u'flow-name' in keys) and (u'id' in keys) and (u'match' in keys):
                 if u'ipv4-destination' in ft[u'flow'][0]['match'].keys():
-                    print 'File "%s" ok to use as flow template' % filename
+                    print('File "%s" ok to use as flow template' % filename)
                     return ft
         except ValueError:
-            print 'JSON parsing of file %s failed' % filename
+            print('JSON parsing of file %s failed' % filename)
             pass
 
     return None
@@ -648,16 +645,16 @@ if __name__ == "__main__":
     # <flows> are added from each thread
     fct.add_blaster()
 
-    print '\n*** Total flows added: %s' % fct.get_ok_flows()
-    print '    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts()
+    print('\n*** Total flows added: %s' % fct.get_ok_flows())
+    print('    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts())
 
     if in_args.delay > 0:
-        print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+        print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
         time.sleep(in_args.delay)
 
     # Run through <cycles>, where <threads> are started in each cycle and
     # <flows> previously added in an add cycle are deleted in each thread
     if in_args.delete:
         fct.delete_blaster()
-        print '\n*** Total flows deleted: %s' % fct.get_ok_flows()
-        print '    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts()
+        print('\n*** Total flows deleted: %s' % fct.get_ok_flows())
+        print('    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts())
index 1fec13191a50e53515096559aa69c037409dc01d..f24d5ab06239df2ae0839208a49ca905e48609c0 100755 (executable)
@@ -78,7 +78,6 @@ class FlowConfigBulkBlaster(flow_config_blaster.FlowConfigBlaster):
             json_input = {'input': {'bulk-flow-ds-item': flow_list}}
 
         flow_data = json.dumps(json_input)
-        # print flow_data
         return flow_data
 
 
@@ -111,16 +110,16 @@ if __name__ == "__main__":
     # <flows> are added from each thread
     fcbb.add_blaster()
 
-    print '\n*** Total flows added: %s' % fcbb.get_ok_flows()
-    print '    HTTP[OK] results:  %d\n' % fcbb.get_ok_rqsts()
+    print('\n*** Total flows added: %s' % fcbb.get_ok_flows())
+    print('    HTTP[OK] results:  %d\n' % fcbb.get_ok_rqsts())
 
     if in_args.delay > 0:
-        print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+        print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
         time.sleep(in_args.delay)
 
     # Run through <cycles>, where <threads> are started in each cycle and
     # <flows> previously added in an add cycle are deleted in each thread
     if in_args.delete:
         fcbb.delete_blaster()
-        print '\n*** Total flows deleted: %s' % fcbb.get_ok_flows()
-        print '    HTTP[OK] results:    %d\n' % fcbb.get_ok_rqsts()
+        print('\n*** Total flows deleted: %s' % fcbb.get_ok_flows())
+        print('    HTTP[OK] results:    %d\n' % fcbb.get_ok_rqsts())
index b54cf5cce5a85ca5e5f5c29396940461ee767d05..cbed315ecae33a37879b1e22df49c593610f524f 100755 (executable)
@@ -103,9 +103,9 @@ class FlowConfigBlasterFLE(FlowConfigBlaster):
         clear_url = 'http://' + self.host + ":" + self.port + '/wm/staticflowpusher/clear/all/json'
         r = requests.get(clear_url)
         if r.status_code == 200:
-            print "All flows cleared before the test"
+            print("All flows cleared before the test")
         else:
-            print "Failed to clear flows from the controller, your results may vary"
+            print("Failed to clear flows from the controller, your results may vary")
 
 
 if __name__ == "__main__":
@@ -147,16 +147,16 @@ if __name__ == "__main__":
     # Run through <cycles>, where <threads> are started in each cycle and <flows> are added from each thread
     fct.add_blaster()
 
-    print '\n*** Total flows added: %s' % fct.get_ok_flows()
-    print '    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts()
+    print('\n*** Total flows added: %s' % fct.get_ok_flows())
+    print('    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts())
 
     if in_args.delay > 0:
-        print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+        print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
         time.sleep(in_args.delay)
 
     # Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
     # deleted in each thread
     if in_args.delete:
         fct.delete_blaster()
-        print '\n*** Total flows deleted: %s' % fct.get_ok_flows()
-        print '    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts()
+        print('\n*** Total flows deleted: %s' % fct.get_ok_flows())
+        print('    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts())
index 5bad661eb175ac84e5092bae4194a872071aea86..9dacaec456aca698fe1f516717bfd393ca8a6159 100755 (executable)
@@ -113,9 +113,9 @@ if __name__ == "__main__":
     reported = ic.reported_flows
     found = ic.found_flows
 
-    print 'Baseline:'
-    print '   Reported nodes: %d' % reported
-    print '   Found nodes:    %d' % found
+    print('Baseline:')
+    print('   Reported nodes: %d' % reported)
+    print('   Found nodes:    %d' % found)
 
     stats = []
     stats.append((time.time(), ic.nodes, ic.reported_flows, ic.found_flows))
@@ -123,33 +123,33 @@ if __name__ == "__main__":
     # each cycle and <FLOWS> flows are added from each thread
     fct.add_blaster()
 
-    print '\n*** Total flows added: %d' % fct.get_ok_flows()
-    print '    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts()
+    print('\n*** Total flows added: %d' % fct.get_ok_flows())
+    print('    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts())
 
     # monitor stats and save results in the list
     for stat_item in monitor_stats(ic, in_args.config_monitor, in_args.monitor_period):
-        print stat_item
+        print(stat_item)
         stats.append(stat_item)
 
     # Run through <CYCLES> delete cycles, where <THREADS> threads  are started
     # in each cycle and <FLOWS> flows previously added in an add cycle are
     # deleted in each thread
     if in_args.bulk_delete:
-        print '\nDeleting all flows in bulk:'
+        print('\nDeleting all flows in bulk:')
         sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
         if sts != 200:
-            print '   Failed to delete flows, code %d' % sts
+            print('   Failed to delete flows, code %d' % sts)
         else:
-            print '   All flows deleted.'
+            print('   All flows deleted.')
     else:
-        print '\nDeleting flows one by one\n   ',
+        print('\nDeleting flows one by one\n   ',)
         fct.delete_blaster()
-        print '\n*** Total flows deleted: %d' % fct.get_ok_flows()
-        print '    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts()
+        print('\n*** Total flows deleted: %d' % fct.get_ok_flows())
+        print('    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts())
 
     # monitor stats and append to the list
     for stat_item in monitor_stats(ic, in_args.deconfig_monitor, in_args.monitor_period):
-        print stat_item
+        print(stat_item)
         stats.append(stat_item)
 
     # if requested, write collected data into the file
index f24c0f724d2711eca790a6a08ceb892e671cb8ae..65cb5d2da81b1633edff4189600b5b5b9a5ee67f 100755 (executable)
@@ -34,7 +34,7 @@ class InventoryCrawler(object):
         """
         self.found_flows += len(flows)
         if self.plevel > 1:
-            print '             Flows found: %d\n' % len(flows)
+            print('             Flows found: %d\n' % len(flows))
             if self.plevel > 2:
                 for f in flows:
                     s = json.dumps(f, sort_keys=True, indent=4, separators=(',', ': '))
@@ -45,8 +45,8 @@ class InventoryCrawler(object):
                     s = s.rstrip('}')
                     s = s.replace('\n', '\n            ')
                     s = s.lstrip('\n')
-                    print "             Flow %s:" % f['id']
-                    print s
+                    print("             Flow %s:" % (f['id']))
+                    print(s)
 
     def crawl_table(self, table):
         """
@@ -60,14 +60,14 @@ class InventoryCrawler(object):
             if active_flows > 0:
                 self.reported_flows += active_flows
                 if self.plevel > 1:
-                    print '        Table %s:' % table['id']
+                    print('        Table %s:' % table['id'])
                     s = json.dumps(stats, sort_keys=True, indent=12, separators=(',', ': '))
                     s = s.replace('{\n', '')
                     s = s.replace('}', '')
-                    print s
+                    print(s)
         except KeyError:
             if self.plevel > 1:
-                print "        Stats for Table '%s' not available." % table['id']
+                print("        Stats for Table '%s' not available." % (table['id']))
             self.table_stats_unavailable += 1
             pass
 
@@ -85,14 +85,14 @@ class InventoryCrawler(object):
         self.nodes += 1
 
         if self.plevel > 1:
-            print "\nNode '%s':" % (node['id'])
+            print("\nNode '%s':" % ((node['id'])))
         elif self.plevel > 0:
-            print "%s" % (node['id'])
+            print("%s" % ((node['id'])))
 
         try:
             tables = node['flow-node-inventory:table']
             if self.plevel > 1:
-                print '    Tables: %d' % len(tables)
+                print('    Tables: %d' % len(tables))
 
             for t in tables:
                 self.crawl_table(t)
@@ -102,7 +102,7 @@ class InventoryCrawler(object):
 
         except KeyError:
             if self.plevel > 1:
-                print '    Data for tables not available.'
+                print('    Data for tables not available.')
 
     def crawl_inventory(self):
         """
@@ -134,12 +134,12 @@ class InventoryCrawler(object):
                     try:
                         self.crawl_node(sinv[n])
                     except:
-                        print 'Can not crawl %s' % sinv[n]['id']
+                        print('Can not crawl %s' % sinv[n]['id'])
 
             except KeyError:
-                print 'Could not retrieve inventory, response not in JSON format'
+                print('Could not retrieve inventory, response not in JSON format')
         else:
-            print 'Could not retrieve inventory, HTTP error %d' % r.status_code
+            print('Could not retrieve inventory, HTTP error %d' % r.status_code)
 
         s.close()
 
@@ -170,16 +170,16 @@ if __name__ == "__main__":
     ic = InventoryCrawler(in_args.host, in_args.port, in_args.plevel, in_args.datastore, in_args.auth,
                           in_args.debug)
 
-    print "Crawling '%s'" % ic.url
+    print("Crawling '%s'" % (ic.url))
     ic.crawl_inventory()
 
-    print '\nTotals:'
-    print '    Nodes:          %d' % ic.nodes
-    print '    Reported flows: %d' % ic.reported_flows
-    print '    Found flows:    %d' % ic.found_flows
+    print('\nTotals:')
+    print('    Nodes:          %d' % ic.nodes)
+    print('    Reported flows: %d' % ic.reported_flows)
+    print('    Found flows:    %d' % ic.found_flows)
 
     if in_args.debug:
         n_missing = len(ic.table_stats_fails)
         if n_missing > 0:
-            print '\nMissing table stats (%d nodes):' % n_missing
-            print "%s\n" % ", ".join([x for x in ic.table_stats_fails])
+            print('\nMissing table stats (%d nodes):' % n_missing)
+            print("%s\n" % (", ".join([x for x in ic.table_stats_fails])))
index 48d279f2ad9c832adc7ec0b662aeb8721ae97a46..716c644c6ac35263012dd65836b53aab92cf2798 100644 (file)
@@ -71,7 +71,7 @@ def get_inventory(tnum, url, hdrs, rnum, cond):
     results = {}
 
     with print_lock:
-        print 'Thread %d: Getting %s' % (tnum, url)
+        print('Thread %d: Getting %s' % (tnum, url))
 
     s = requests.Session()
     with Timer() as t:
@@ -95,12 +95,12 @@ def get_inventory(tnum, url, hdrs, rnum, cond):
     total_mb_rate.increment(mrate)
 
     with print_lock:
-        print '\nThread %d: ' % tnum
-        print '    Elapsed time: %.2f,' % t.secs
-        print '    Requests: %d, Requests/sec: %.2f' % (total, rate)
-        print '    Volume: %.2f MB, Rate: %.2f MByte/s' % (mbytes, mrate)
-        print '    Results: ',
-        print results
+        print('\nThread %d: ' % tnum)
+        print('    Elapsed time: %.2f,' % t.secs)
+        print('    Requests: %d, Requests/sec: %.2f' % (total, rate))
+        print('    Volume: %.2f MB, Rate: %.2f MByte/s' % (mbytes, mrate))
+        print('    Results: ')
+        print(results)
 
     with cond:
         cond.notifyAll()
@@ -139,10 +139,10 @@ if __name__ == "__main__":
             cond.wait()
             finished = finished + 1
 
-    print '\nAggregate requests: %d, Aggregate requests/sec: %.2f' % (total_requests.value,
-                                                                      total_req_rate.value)
-    print 'Aggregate Volume: %.2f MB, Aggregate Rate: %.2f MByte/s' % (total_mbytes.value,
-                                                                       total_mb_rate.value)
+    print('\nAggregate requests: %d, Aggregate requests/sec: %.2f' % (total_requests.value,
+                                                                      total_req_rate.value))
+    print('Aggregate Volume: %.2f MB, Aggregate Rate: %.2f MByte/s' % (total_mbytes.value,
+                                                                       total_mb_rate.value))
 
 #    get_inventory(url, getheaders, int(in_args.requests))
 
index 87a19ebc85dc3c53907be59c332e1406866a180a..c1d48d2476985664eff0a78295456422a7e735f5 100755 (executable)
@@ -67,7 +67,7 @@ def read(hosts, port, auth, datastore, print_lock, cycles, results_queue):
         stats[r.status_code] = stats.get(r.status_code, 0) + 1
 
     with print_lock:
-        print '   ', threading.current_thread().name, 'results:', stats
+        print('   %s results: %s' % (threading.current_thread().name, stats))
 
     results_queue.put(stats)
 
@@ -118,6 +118,6 @@ if __name__ == "__main__":
     # Aggregate the results
     stats = functools.reduce(operator.add, map(collections.Counter, results.queue))
 
-    print '\n*** Test summary:'
-    print '    Elapsed time:    %.2fs' % t.secs
-    print '    HTTP[OK] results:  %d\n' % stats[200]
+    print('\n*** Test summary:')
+    print('    Elapsed time:    %.2fs' % t.secs)
+    print('    HTTP[OK] results:  %d\n' % stats[200])
index 214e8aeaec357e30c95fe8252ed01b139989c255..0298c0a841cfcc2f978d7ee132e43638d7cca541 100644 (file)
@@ -224,7 +224,7 @@ def main(*argv):
     parser.add_argument('--outfile', default='', help='Stores add and delete flow rest api rate; default=""')
 
     in_args = parser.parse_args(*argv)
-    print in_args
+    print(in_args)
 
     # get device ids
     base_dev_ids = get_device_ids(controller=in_args.host)
@@ -236,9 +236,9 @@ def main(*argv):
 
     base_num_flows = len(base_flow_ids)
 
-    print "BASELINE:"
-    print "    devices:", len(base_dev_ids)
-    print "    flows  :", base_num_flows
+    print("BASELINE:")
+    print("    devices:", len(base_dev_ids))
+    print("    flows  :", base_num_flows)
 
     # lets fill the queue for workers
     nflows = 0
@@ -287,25 +287,25 @@ def main(*argv):
                 else:
                     result[k] += v
 
-    print "Added", in_args.flows, "flows in", tmr.secs, "seconds", result
+    print("Added", in_args.flows, "flows in", tmr.secs, "seconds", result)
     add_details = {"duration": tmr.secs, "flows": len(flow_details)}
 
     # lets print some stats
-    print "\n\nStats monitoring ..."
+    print("\n\nStats monitoring ...")
     rounds = 200
     with Timer() as t:
         for i in range(rounds):
             reported_flows = len(get_flow_ids(controller=in_args.host))
             expected_flows = base_num_flows + in_args.flows
-            print "Reported Flows: %d/%d" % (reported_flows, expected_flows)
+            print("Reported Flows: %d/%d" % ((reported_flows, expected_flows)))
             if reported_flows >= expected_flows:
                 break
             time.sleep(1)
 
     if i < rounds:
-        print "... monitoring finished in +%d seconds\n\n" % t.secs
+        print("... monitoring finished in +%d seconds\n\n" % (t.secs))
     else:
-        print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs)
+        print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
 
     if in_args.no_delete:
         return
@@ -313,7 +313,7 @@ def main(*argv):
     # sleep in between
     time.sleep(in_args.timeout)
 
-    print "Flows to be removed: %d" % len(flow_details)
+    print("Flows to be removed: %d" % (len(flow_details)))
     # lets fill the queue for workers
     sendqueue = Queue.Queue()
     for fld in flow_details:
@@ -356,37 +356,30 @@ def main(*argv):
                     else:
                         result[k] += v
 
-    print "Removed", len(flow_details), "flows in", tmr.secs, "seconds", result
+    print("Removed", len(flow_details), "flows in", tmr.secs, "seconds", result)
     del_details = {"duration": tmr.secs, "flows": len(flow_details)}
 
-#    # lets print some stats
-#    print "\n\nSome stats monitoring ...."
-#    for i in range(100):
-#        print get_flow_simple_stats(controller=in_args.host)
-#        time.sleep(5)
-#    print "... monitoring finished\n\n"
-    # lets print some stats
-    print "\n\nStats monitoring ..."
+    print("\n\nStats monitoring ...")
     rounds = 200
     with Timer() as t:
         for i in range(rounds):
             reported_flows = len(get_flow_ids(controller=in_args.host))
             expected_flows = base_num_flows
-            print "Reported Flows: %d/%d" % (reported_flows, expected_flows)
+            print("Reported Flows: %d/%d" % ((reported_flows, expected_flows)))
             if reported_flows <= expected_flows:
                 break
             time.sleep(1)
 
     if i < rounds:
-        print "... monitoring finished in +%d seconds\n\n" % t.secs
+        print("... monitoring finished in +%d seconds\n\n" % (t.secs))
     else:
-        print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs)
+        print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
 
     if in_args.outfile != "":
         addrate = add_details['flows'] / add_details['duration']
         delrate = del_details['flows'] / del_details['duration']
-        print "addrate", addrate
-        print "delrate", delrate
+        print("addrate", addrate)
+        print("delrate", delrate)
 
         with open(in_args.outfile, "wt") as fd:
             fd.write("AddRate,DeleteRate\n")
index 86d860f5384a8ea4b0235de5afe354f5078aef68..06654b8ff6da075cf41f4711a0b61c2c23746f1d 100644 (file)
@@ -217,14 +217,10 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]):
     if rsp.status_code != 200:
         return
     flows = json.loads(rsp.content)['flows']
-    # print "Flows", flows
-    # print "Details", flow_details
     for dev_id, ip in flow_details:
-        # print "looking for details", dev_id, ip
         for f in flows:
             # lets identify if it is our flow
             if f["treatment"]["instructions"][0]["type"] != "DROP":
-                # print "NOT DROP"
                 continue
             if f["deviceId"] == dev_id:
                 if "ip" in f["selector"]["criteria"][0]:
@@ -233,9 +229,7 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]):
                     item_idx = 1
                 else:
                     continue
-                # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip))
                 if f["selector"]["criteria"][item_idx]["ip"] == '%s/32' % str(netaddr.IPAddress(ip)):
-                    # print dev_id, ip, f
                     yield dev_id, f["id"]
                     break
 
@@ -246,13 +240,10 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181):
     if rsp.status_code != 200:
         return
     flows = json.loads(rsp.content)['flows']
-    # print "Flows", flows
-    # print "Details", flow_details
 
     for f in flows:
         # lets identify if it is our flow
         if f["treatment"]["instructions"][0]["type"] != "NOACTION":
-            # print "NOT DROP"
             continue
         if "ip" in f["selector"]["criteria"][0]:
             item_idx = 0
@@ -260,10 +251,8 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181):
             item_idx = 1
         else:
             continue
-            # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip))
         ipstr = f["selector"]["criteria"][item_idx]["ip"]
         if '10.' in ipstr and '/32' in ipstr:
-            # print dev_id, ip, f
             yield (f["deviceId"], f["id"])
 
 
@@ -278,7 +267,7 @@ def main(*argv):
                         help='Port on which onos\'s RESTCONF is listening (default is 8181)')
 
     in_args = parser.parse_args(*argv)
-    print in_args
+    print(in_args)
 
     # get device ids
     base_dev_ids = get_device_ids(controller=in_args.host)
@@ -288,13 +277,13 @@ def main(*argv):
     # prepare func
     preparefnc = _prepare_post  # noqa  # FIXME: This script seems to be unfinished!
 
-    print "BASELINE:"
-    print "    devices:", len(base_dev_ids)
-    print "    flows  :", len(base_flow_ids)
+    print("BASELINE:")
+    print("    devices:", len(base_dev_ids))
+    print("    flows  :", len(base_flow_ids))
 
     # lets print some stats
-    print "\n\nSome stats monitoring ...."
-    print get_flow_simple_stats(controller=in_args.host)
+    print("\n\nSome stats monitoring ....")
+    print(get_flow_simple_stats(controller=in_args.host))
 
 
 if __name__ == "__main__":
index 0e00655dfef3b305f635b2f8c8afb21cee784716..c9e0d3f7b1cddbca0572c17684553a9550a5a806 100644 (file)
@@ -231,14 +231,10 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]):
     if rsp.status_code != 200:
         return
     flows = json.loads(rsp.content)['flows']
-    # print "Flows", flows
-    # print "Details", flow_details
     for dev_id, ip in flow_details:
-        # print "looking for details", dev_id, ip
         for f in flows:
             # lets identify if it is our flow
             if f["treatment"]["instructions"][0]["type"] != "DROP":
-                # print "NOT DROP"
                 continue
             if f["deviceId"] == dev_id:
                 if "ip" in f["selector"]["criteria"][0]:
@@ -247,9 +243,7 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]):
                     item_idx = 1
                 else:
                     continue
-                # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip))
                 if f["selector"]["criteria"][item_idx]["ip"] == '%s/32' % str(netaddr.IPAddress(ip)):
-                    # print dev_id, ip, f
                     yield dev_id, f["id"]
                     break
 
@@ -260,13 +254,10 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181):
     if rsp.status_code != 200:
         return
     flows = json.loads(rsp.content)['flows']
-    # print "Flows", flows
-    # print "Details", flow_details
 
     for f in flows:
         # lets identify if it is our flow
         if f["treatment"]["instructions"][0]["type"] != "NOACTION":
-            # print "NOT DROP"
             continue
         if "ip" in f["selector"]["criteria"][0]:
             item_idx = 0
@@ -274,10 +265,8 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181):
             item_idx = 1
         else:
             continue
-            # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip))
         ipstr = f["selector"]["criteria"][item_idx]["ip"]
         if '10.' in ipstr and '/32' in ipstr:
-            # print dev_id, ip, f
             yield (f["deviceId"], f["id"])
 
 
@@ -307,7 +296,7 @@ def main(*argv):
     parser.add_argument('--outfile', default='', help='Stores add and delete flow rest api rate; default=""')
 
     in_args = parser.parse_args(*argv)
-    print in_args
+    print(in_args)
 
     # get device ids
     base_dev_ids = get_device_ids(controller=in_args.host)
@@ -319,9 +308,9 @@ def main(*argv):
 
     base_num_flows = len(base_flow_ids)
 
-    print "BASELINE:"
-    print "    devices:", len(base_dev_ids)
-    print "    flows  :", base_num_flows
+    print("BASELINE:")
+    print("    devices:", len(base_dev_ids))
+    print("    flows  :", base_num_flows)
 
     # lets fill the queue for workers
     nflows = 0
@@ -369,16 +358,16 @@ def main(*argv):
                 else:
                     result[k] += v
 
-    print "Added", in_args.flows, "flows in", tmr.secs, "seconds", result
+    print("Added", in_args.flows, "flows in", tmr.secs, "seconds", result)
     add_details = {"duration": tmr.secs, "flows": len(flow_details)}
 
     # lets print some stats
-    print "\n\nStats monitoring ..."
+    print("\n\nStats monitoring ...")
     rounds = 200
     with Timer() as t:
         for i in range(rounds):
             flow_stats = get_flow_simple_stats(controller=in_args.host)
-            print flow_stats
+            print(flow_stats)
             try:
                 pending_adds = int(flow_stats[u'PENDING_ADD'])  # noqa  # FIXME: Print this somewhere.
             except KeyError:
@@ -386,9 +375,9 @@ def main(*argv):
             time.sleep(1)
 
     if i < rounds:
-        print "... monitoring finished in +%d seconds\n\n" % t.secs
+        print("... monitoring finished in +%d seconds\n\n" % (t.secs))
     else:
-        print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs)
+        print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
 
     if in_args.no_delete:
         return
@@ -402,7 +391,7 @@ def main(*argv):
     # for a in get_flow_device_pairs(controller=in_args.host, flow_details=flow_details):
     for a in get_flow_to_remove(controller=in_args.host):
         flows_remove_details.append(a)
-    print "Flows to be removed: ", len(flows_remove_details)
+    print("Flows to be removed: ", len(flows_remove_details))
 
     # lets fill the queue for workers
     nflows = 0
@@ -448,22 +437,15 @@ def main(*argv):
                 else:
                     result[k] += v
 
-    print "Removed", len(flows_remove_details), "flows in", tmr.secs, "seconds", result
+    print("Removed", len(flows_remove_details), "flows in", tmr.secs, "seconds", result)
     del_details = {"duration": tmr.secs, "flows": len(flows_remove_details)}
 
-#    # lets print some stats
-#    print "\n\nSome stats monitoring ...."
-#    for i in range(100):
-#        print get_flow_simple_stats(controller=in_args.host)
-#        time.sleep(5)
-#    print "... monitoring finished\n\n"
-    # lets print some stats
-    print "\n\nStats monitoring ..."
+    print("\n\nStats monitoring ...")
     rounds = 200
     with Timer() as t:
         for i in range(rounds):
             flow_stats = get_flow_simple_stats(controller=in_args.host)
-            print flow_stats
+            print(flow_stats)
             try:
                 pending_rems = int(flow_stats[u'PENDING_REMOVE'])  # noqa  # FIXME: Print this somewhere.
             except KeyError:
@@ -471,15 +453,15 @@ def main(*argv):
             time.sleep(1)
 
     if i < rounds:
-        print "... monitoring finished in +%d seconds\n\n" % t.secs
+        print("... monitoring finished in +%d seconds\n\n" % (t.secs))
     else:
-        print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs)
+        print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
 
     if in_args.outfile != "":
         addrate = add_details['flows'] / add_details['duration']
         delrate = del_details['flows'] / del_details['duration']
-        print "addrate", addrate
-        print "delrate", delrate
+        print("addrate", addrate)
+        print("delrate", delrate)
 
         with open(in_args.outfile, "wt") as fd:
             fd.write("AddRate,DeleteRate\n")
index 8d503a30e444cb228027ff68e68a8681eb9e79eb..bde0aa23ed9f29f304500fabe03f7d32bf35d2cd 100755 (executable)
@@ -14,4 +14,4 @@ if __name__ == "__main__":
     data = sys.stdin.readlines()
     payload = json.loads(data.pop(0))
     s = json.dumps(payload, sort_keys=True, indent=4, separators=(',', ': '))
-    print '%s\n\n' % s
+    print('%s\n\n' % s)
index e6fcf50879e451fe40d973f2bd125c854f482d0f..1b2a28025cbffddd15d4c5e2f1fdc2d2079693b4 100755 (executable)
@@ -104,7 +104,7 @@ class ShardPerformanceTester(object):
         s = requests.Session()
 
         with self.print_lock:
-            print '    Thread %d: Performing %d requests' % (tid, self.requests)
+            print('    Thread %d: Performing %d requests' % (tid, self.requests))
 
         with Timer() as t:
             for r in range(self.requests):
@@ -118,11 +118,11 @@ class ShardPerformanceTester(object):
         total_rate = sum(res.values()) / t.secs
 
         with self.print_lock:
-            print 'Thread %d done:' % tid
-            print '    Time: %.2f,' % t.secs
-            print '    Success rate:  %.2f, Total rate: %.2f' % (ok_rate, total_rate)
-            print '    Per-thread stats: ',
-            print res
+            print('Thread %d done:' % tid)
+            print('    Time: %.2f,' % t.secs)
+            print('    Success rate:  %.2f, Total rate: %.2f' % (ok_rate, total_rate))
+            print('    Per-thread stats: ',)
+            print(res)
             self.threads_done += 1
             self.total_rate += total_rate
 
@@ -160,17 +160,17 @@ class ShardPerformanceTester(object):
                     self.cond.wait()
 
         # Print summary results. Each worker prints its owns results too.
-        print '\nSummary Results:'
-        print '    Requests/sec (total_sum): %.2f' % ((self.threads * self.requests) / t.secs)
-        print '    Requests/sec (measured):  %.2f' % ((self.threads * self.requests) / t.secs)
-        print '    Time: %.2f' % t.secs
+        print('\nSummary Results:')
+        print('    Requests/sec (total_sum): %.2f' % ((self.threads * self.requests) / t.secs))
+        print('    Requests/sec (measured):  %.2f' % ((self.threads * self.requests) / t.secs))
+        print('    Time: %.2f' % t.secs)
         self.threads_done = 0
 
         if self.plevel > 0:
-            print '    Per URL Counts: ',
+            print('    Per URL Counts: ',)
             for i in range(len(urls)):
-                print '%d' % self.url_counters[i].value,
-            print '\n'
+                print('%d' % self.url_counters[i].value)
+            print('\n')
 
 
 class TestUrlGenerator(object):
@@ -199,7 +199,7 @@ class TestUrlGenerator(object):
         :param data: Bulk resource data (JSON) from which to generate the URLs
         :return: List of generated Resources
         """
-        print "Abstract class '%s' should never be used standalone" % self.__class__.__name__
+        print("Abstract class '%s' should never be used standalone" % (self.__class__.__name__))
         return []
 
     def generate(self):
@@ -218,12 +218,12 @@ class TestUrlGenerator(object):
             r = requests.get(t_url, headers=headers, stream=False, auth=('admin', 'admin'))
 
         if r.status_code != 200:
-            print "Failed to get HTTP response from '%s', code %d" % (t_url, r.status_code)
+            print("Failed to get HTTP response from '%s', code %d" % ((t_url, r.status_code)))
         else:
             try:
                 r_url = self.url_generator(json.loads(r.content))
             except:
-                print "Failed to get json from '%s'. Please make sure you are connected to mininet." % r_url
+                print("Failed to get json from '%s'. Please make sure you are connected to mininet." % (r_url))
 
         return r_url
 
@@ -251,7 +251,7 @@ class TopoUrlGenerator(TestUrlGenerator):
                     url_list.append(t_url)
             return url_list
         except KeyError:
-            print 'Error parsing topology json'
+            print('Error parsing topology json')
             return []
 
 
@@ -278,7 +278,7 @@ class InvUrlGenerator(TestUrlGenerator):
                     url_list.append(i_url)
             return url_list
         except KeyError:
-            print 'Error parsing inventory json'
+            print('Error parsing inventory json')
             return []
 
 
@@ -311,7 +311,7 @@ if __name__ == "__main__":
         tg = TopoUrlGenerator(in_args.host, in_args.port, in_args.auth)
         topo_urls += tg.generate()
         if len(topo_urls) == 0:
-            print 'Failed to generate topology URLs'
+            print('Failed to generate topology URLs')
             sys.exit(-1)
 
     # If required, get inventory resource URLs
@@ -319,32 +319,32 @@ if __name__ == "__main__":
         ig = InvUrlGenerator(in_args.host, in_args.port, in_args.auth)
         inv_urls += ig.generate()
         if len(inv_urls) == 0:
-            print 'Failed to generate inventory URLs'
+            print('Failed to generate inventory URLs')
             sys.exit(-1)
 
     if in_args.resource == 'topo+inv' or in_args.resource == 'all':
         # To have balanced test results, the number of URLs for topology and inventory must be the same
         if len(topo_urls) != len(inv_urls):
-            print "The number of topology and inventory URLs don't match"
+            print("The number of topology and inventory URLs don't match")
             sys.exit(-1)
 
     st = ShardPerformanceTester(in_args.host, in_args.port, in_args.auth, in_args.threads, in_args.requests,
                                 in_args.plevel)
 
     if in_args.resource == 'all' or in_args.resource == 'topo':
-        print '==================================='
-        print 'Testing topology shard performance:'
-        print '==================================='
+        print('===================================')
+        print('Testing topology shard performance:')
+        print('===================================')
         st.run_test(topo_urls)
 
     if in_args.resource == 'all' or in_args.resource == 'inv':
-        print '===================================='
-        print 'Testing inventory shard performance:'
-        print '===================================='
+        print('====================================')
+        print('Testing inventory shard performance:')
+        print('====================================')
         st.run_test(inv_urls)
 
     if in_args.resource == 'topo+inv' or in_args.resource == 'all':
-        print '==============================================='
-        print 'Testing combined shards (topo+inv) performance:'
-        print '==============================================='
+        print('===============================================')
+        print('Testing combined shards (topo+inv) performance:')
+        print('===============================================')
         st.run_test(topo_urls + inv_urls)
index 95b008dc54248fc5056b9a7f15da6f7361d84610..5f4973ba79329f68e3d6df9b0040da7da00bd277 100644 (file)
@@ -69,8 +69,8 @@ $ENTRIES
         data = patch_data_template.substitute(mapping)
         response = session.put(url=url, auth=auth, headers=headers, data=data)
         if response.status_code not in [200, 201, 204]:
-            print "status: {}".format(response.status_code)
-            print "text: {}".format(response.text)
+            print("status: {}".format(response.status_code))
+            print("text: {}".format(response.text))
             sys.exit(1)
 
 
index b3731bc7319e645390dcebf9a22eeac2f33280f5..24a0fdc77e5bcc9b6e4e82ecf1e9d44f643ebf14 100644 (file)
@@ -113,16 +113,16 @@ class OvsdbConfigBlaster (object):
                 'node-id': 'ovsdb://%s:%s'
                            % (vswitch_ip,
                               vswitch_ovsdb_port),
-                'post-url': urlprefix +
-                OvsdbConfigBlaster.return_ovsdb_url(
+                'post-url': urlprefix
+                OvsdbConfigBlaster.return_ovsdb_url(
                     vswitch_ip,
                     vswitch_ovsdb_port),
-                'get-config-url': urlprefix +
-                OvsdbConfigBlaster.return_ovsdb_url(
+                'get-config-url': urlprefix
+                OvsdbConfigBlaster.return_ovsdb_url(
                     vswitch_ip,
                     vswitch_ovsdb_port),
-                'get-oper-url': urlprefix +
-                OvsdbConfigBlaster.return_ovsdb_url(
+                'get-oper-url': urlprefix
+                OvsdbConfigBlaster.return_ovsdb_url(
                     vswitch_ip,
                     vswitch_ovsdb_port)}})
 
@@ -188,9 +188,9 @@ class OvsdbConfigBlaster (object):
             }
             self.send_rest(self.session,
                            self.vswitch_dict[vswitch_name]
-                           .get('post-url') +
-                           '%2Fbridge%2F' +
-                           bridge_name,
+                           .get('post-url')
+                           + '%2Fbridge%2F'
+                           bridge_name,
                            add_bridge_body)
         self.session.close()
 
@@ -262,9 +262,9 @@ class OvsdbConfigBlaster (object):
                 bridge_name = unicode('br-' + str(br_num) + '-test')
                 self.send_rest_del(self.session,
                                    self.vswitch_dict[vswitch_names]
-                                   .get('post-url') +
-                                   '%2Fbridge%2F' +
-                                   bridge_name)
+                                   .get('post-url')
+                                   + '%2Fbridge%2F'
+                                   bridge_name)
             self.session.close()
 
     def delete_port(self, num_ports):
@@ -387,5 +387,5 @@ if __name__ == "__main__":
         else:
             ovsdb_config_blaster.add_port()
     else:
-        print "please use: python ovsdbconfigblaster.py --help " \
-              "\nUnsupported mode: ", args.mode
+        print("please use: python ovsdbconfigblaster.py --help "
+              "\nUnsupported mode: ", args.mode)
index e5a0c427447b741c4739f1bd45f86a020777ed84..159398fae08f902fb0bbf03be0494ec7a9bd5844 100644 (file)
@@ -121,7 +121,6 @@ def iterable_msg(pccs, lsps, workers, hop):
             list_data[1] = pcc_ip
             list_data[4] = pcc_ip
             whole_data = ''.join(list_data)
-            # print 'DEBUG:', whole_data + '\n'
             worker = (lsp * pccs + pcc) % workers
             post_kwargs = {"data": whole_data, "headers": headers}
             yield worker, post_kwargs
@@ -147,15 +146,11 @@ def queued_send(session, queue_messages, queue_responses):
 
 def classify(resp_tuple):
     """Return 'pass' or a reason what is wrong with response."""
-    # print 'DEBUG: received', response
     prepend = ''
     status = resp_tuple[0]
-    # print 'DEBUG: verifying status', status
     if (status != 200) and (status != 204):  # is it int?
-        # print 'DEBUG:', response.content
         prepend = 'status: ' + str(status) + ' '
     content = resp_tuple[1]
-    # print 'DEBUG: verifying content', content
     if prepend or (content != expected and content != ''):
         return prepend + 'content: ' + str(content)
     return 'pass'
@@ -164,7 +159,6 @@ def classify(resp_tuple):
 # Main.
 list_q_msg = [collections.deque() for _ in range(args.workers)]
 for worker, post_kwargs in iterable_msg(args.pccs, args.lsps, args.workers, args.hop):
-    # print 'DEBUG: worker', repr(worker), 'message', repr(message)
     list_q_msg[worker].append(post_kwargs)
 queue_responses = collections.deque()  # thread safe
 threads = []
@@ -177,7 +171,7 @@ for worker in range(args.workers):
     threads.append(thread)
 tasks = sum(map(len, list_q_msg))  # fancy way of counting, should equal to pccs*lsps.
 counter = CounterDown(tasks)
-print 'work is going to start with', tasks, 'tasks'
+print('work is going to start with %s tasks' % tasks)
 time_start = time.time()
 for thread in threads:
     thread.start()
@@ -206,12 +200,10 @@ while 1:
             continue
         left = len(queue_responses)
         if left:
-            print 'error: more responses left inqueue', left
+            print('error: more responses left inqueue', left)
     else:
-        print 'Time is up!'
+        print('Time is up!')
         left = len(queue_responses)  # can be still increasing
-        # if left:
-        #     print 'WARNING: left', left
         for _ in range(left):
             resp_tuple = queue_responses.popleft()  # thread safe
             result = classify(resp_tuple)
@@ -219,7 +211,7 @@ while 1:
     break  # may leave late items in queue_reponses
 time_stop = time.time()
 timedelta_duration = time_stop - time_start
-print 'took', timedelta_duration
-print repr(counter.counter)
+print('took', timedelta_duration)
+print(repr(counter.counter))
 # for message in debug_list:
 #     print message
index 2065064e82a998d23086315cd63f88ef65909b5a..30586783753c4b467cd363a0303f98d3ada6e8dd 100755 (executable)
@@ -355,7 +355,7 @@ if args.graphs or args.all_graphs:
         pyplot.subplots_adjust(hspace=.7)
     else:
         pyplot.subplots_adjust(hspace=.7)
-        print "WARNING: That's a lot of graphs. Add a second column?"
+        print("WARNING: That's a lot of graphs. Add a second column?")
     pyplot.show()
 
 # Print stats