From b2cd74063cbcdfadcbe20746116ec276e27ea0cb Mon Sep 17 00:00:00 2001 From: Jamo Luhrsen Date: Thu, 26 Mar 2020 17:22:06 -0700 Subject: [PATCH] Fix Flake8 errors flake8 now running with python3 was turning up a lot of errors about print statements. This fixes some of those errors, and seems to be enough to get flake8 passing again. Signed-off-by: Jamo Luhrsen Change-Id: Ie02f5266391702e425147b2981aad4bb828bdb0a Signed-off-by: Jamo Luhrsen --- csit/libraries/AAAJsonUtils.py | 16 ++-- csit/libraries/AuthStandalone.py | 2 - csit/libraries/ClusterStateLibrary.py | 12 +-- csit/libraries/Counter.py | 2 +- csit/libraries/CrudLibrary.py | 2 +- csit/libraries/DynamicMininet.py | 52 ++++++------- csit/libraries/JsonGenerator.py | 10 +-- .../libraries/MininetTopo/create_fullymesh.py | 12 +-- csit/libraries/ScaleClient.py | 3 +- csit/libraries/Topology.py | 4 +- csit/libraries/Topologynew.py | 5 +- csit/libraries/UtilLibrary.py | 37 +++++----- csit/libraries/VsctlListParser.py | 2 +- csit/libraries/XmlComparator.py | 11 --- csit/libraries/backuprestore/JsonDiffTool.py | 4 +- csit/libraries/backuprestore/jsonpathl.py | 32 ++++---- csit/libraries/ipaddr.py | 2 +- .../common_scripts/dpdumpflows.py | 2 +- .../common_scripts/infrastructure_launch.py | 54 +++++++------- .../lacp/Lacp_Feature_OF13/LACP_custom1.py | 4 +- .../transaction-tracking/process.py | 8 +- tools/clustering/cluster-deployer/deploy.py | 62 ++++++++-------- .../cluster-deployer/remote_host.py | 8 +- tools/fastbgp/play.py | 5 +- tools/mdsal_benchmark/dsbenchmark.py | 66 ++++++++--------- tools/mdsal_benchmark/ntfbenchmark.py | 12 +-- tools/mdsal_benchmark/rpcbenchmark.py | 8 +- tools/netconf_tools/configurer.py | 2 +- tools/netconf_tools/getter.py | 4 +- .../mapping_blaster.py | 6 +- .../config_cleanup.py | 6 +- .../create_plot_data_files.py | 4 +- .../flow_add_delete_test.py | 30 ++++---- .../flow_config_blaster.py | 73 +++++++++---------- .../flow_config_blaster_bulk.py | 11 ++- .../flow_config_blaster_fle.py | 14 ++-- .../flow_stats_stability_monitor.py | 26 +++---- .../inventory_crawler.py | 40 +++++----- .../inventory_perf.py | 22 +++--- .../inventory_read_blaster.py | 8 +- .../clustering-performance-test/odl_tester.py | 41 +++++------ .../clustering-performance-test/onos_stats.py | 23 ++---- .../onos_tester.py | 52 +++++-------- .../pretty_print.py | 2 +- .../shard_perf_test.py | 60 +++++++-------- .../replace_cars.py | 4 +- .../ovsdbconfigblaster.py | 28 +++---- tools/pcep_updater/updater.py | 18 ++--- tools/wcbench/stats.py | 2 +- 49 files changed, 422 insertions(+), 491 deletions(-) diff --git a/csit/libraries/AAAJsonUtils.py b/csit/libraries/AAAJsonUtils.py index 3db0088a54..80391e35e9 100644 --- a/csit/libraries/AAAJsonUtils.py +++ b/csit/libraries/AAAJsonUtils.py @@ -29,7 +29,7 @@ def countnodes(args): try: jsonobj = json.loads(args['jsonblob']) except KeyError: - print "countnodes: json blob to parse not found" + print("countnodes: json blob to parse not found") raise if 'subnode' in args: @@ -159,13 +159,13 @@ def get_id_by_name(args): try: jsonobj = json.loads(str(args['jsonblob'])) except KeyError: - print "get_id_by_name: json blob not specified:" + print("get_id_by_name: json blob not specified:") raise try: name = args['name'] except KeyError: - print "get_id_by_name: name [usr, domain, role] not specified in args" + print("get_id_by_name: name [usr, domain, role] not specified in args") raise if 'head' in args: @@ -178,7 +178,7 @@ def get_id_by_name(args): try: datatype = args['typeval'] except KeyError: - print "get_id_by_name: need a type arg to process correct name for id" + print("get_id_by_name: need a type arg to process correct name for id") raise try: @@ -231,13 +231,13 @@ def get_attribute_by_id(args): try: jsonobj = json.loads(args['jsonblob']) except KeyError: - print "get_attribute_by_id: json blob not specified:" + print("get_attribute_by_id: json blob not specified:") raise try: nodeid = args['id'] except KeyError: - print "get_attribute_by_id: id to look for not specified in parameters" + print("get_attribute_by_id: id to look for not specified in parameters") raise if 'attr' in args: @@ -261,13 +261,13 @@ def get_attribute_by_id(args): try: datatype = args['typeval'] except KeyError: - print "get_attribute_by_id: need type arg to process name for id" + print("get_attribute_by_id: need type arg to process name for id") raise try: size = args['size'] except KeyError: - print "get_attribute_by_id: specify number of records we need" + print("get_attribute_by_id: specify number of records we need") raise typename = datatype + 'id' diff --git a/csit/libraries/AuthStandalone.py b/csit/libraries/AuthStandalone.py index 2435423bce..1ba99c1f17 100644 --- a/csit/libraries/AuthStandalone.py +++ b/csit/libraries/AuthStandalone.py @@ -152,7 +152,6 @@ class _TokenReusingSession(object): raise RuntimeError("Parse failed: " + resp.text) self.token = token # TODO: Use logging so that callers could see token refreshes. - # print "DEBUG: token:", token # We keep self.session to use for the following restconf requests. def oneshot_method(self, method, uri, **kwargs): @@ -200,7 +199,6 @@ class _TokenClosingSession(object): raise RuntimeError("Parse failed: " + resp.text) self.token = token # TODO: Use logging so that callers could see token refreshes. - # print "DEBUG: token:", token # We keep self.session to use for the following restconf requests. def oneshot_method(self, method, uri, **kwargs): diff --git a/csit/libraries/ClusterStateLibrary.py b/csit/libraries/ClusterStateLibrary.py index ab5a643bb2..815e6e3dcf 100644 --- a/csit/libraries/ClusterStateLibrary.py +++ b/csit/libraries/ClusterStateLibrary.py @@ -22,18 +22,18 @@ def getClusterRoles(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesI for ip in ips: i = 1 dict[ip] = None - print "numOfShards => " + str(numOfShards) + print("numOfShards => ", str(numOfShards)) while i <= numOfShards: shardMemberName = "member-" + str(i) + "-" + shardName j = 1 - print 'j => ' + str(j) - print 'numOfTries => ' + str(numOfTries) + print('j => ', str(j)) + print('numOfTries => ', str(numOfTries)) while int(j) <= int(numOfTries): print("Try number " + str(j)) try: print("getting role of " + ip + " for shardName = " + shardMemberName) url = SettingsLibrary.getJolokiaURL(ip, str(port), str(i), shardName) - print url + print(url) resp = UtilLibrary.get(url) print(resp) if resp.status_code != 200: @@ -92,7 +92,7 @@ def getFollowers(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesInSe for ip in dict.keys(): if dict[ip] == 'Follower': result.append(ip) - print "i=", i, "result=", result + print("i=%s result=%s" % (i, result)) if (len(result) == (len(ips) - 1)): break sleep(1) @@ -116,7 +116,7 @@ def testGetClusterRoles(): def testGetLeader(): leader = getLeader("shard-inventory-config", 3, 1, 1, 8181, "10.194.126.116", "10.194.126.117", "10.194.126.118") - print leader + print(leader) return leader diff --git a/csit/libraries/Counter.py b/csit/libraries/Counter.py index 6740020403..4f327ee3fd 100644 --- a/csit/libraries/Counter.py +++ b/csit/libraries/Counter.py @@ -187,4 +187,4 @@ class Counter(dict): if __name__ == '__main__': import doctest - print doctest.testmod() + print(doctest.testmod()) diff --git a/csit/libraries/CrudLibrary.py b/csit/libraries/CrudLibrary.py index e92bf56116..d5979ea585 100644 --- a/csit/libraries/CrudLibrary.py +++ b/csit/libraries/CrudLibrary.py @@ -142,7 +142,7 @@ def buyCar(hostname, port, numberOfCarBuyers, start=0): """ - print "Buying " + str(numberOfCarBuyers) + " Cars" + print("Buying " + str(numberOfCarBuyers) + " Cars") for x in range(start, start + numberOfCarBuyers): strId = str(x + 1) diff --git a/csit/libraries/DynamicMininet.py b/csit/libraries/DynamicMininet.py index 6e274c6da6..8be374e4ff 100644 --- a/csit/libraries/DynamicMininet.py +++ b/csit/libraries/DynamicMininet.py @@ -53,7 +53,7 @@ class DynamicMininet(cmd.Cmd): :param num: initial number of switches in the topology """ if self._running: - print 'Mininet topology is already active' + print('Mininet topology is already active') return cntl, numsw = line.split() self._topo = mininet.topo.Topo() @@ -68,10 +68,10 @@ class DynamicMininet(cmd.Cmd): def help_start(self): """Provide help message for start command""" - print 'Starts mininet' - print 'Usage: start ' - print '\tcontroller_ip - controllers ip or host name' - print '\tnum - number of switches at start' + print('Starts mininet') + print('Usage: start ') + print('\tcontroller_ip - controllers ip or host name') + print('\tnum - number of switches at start') def do_start_with_cluster(self, line): """Starts mininet network with initial number of switches @@ -81,7 +81,7 @@ class DynamicMininet(cmd.Cmd): e.g. 1.1.1.1,2.2.2.2,3.3.3.3 (no spaces) """ if self._running: - print 'Mininet topology is already active' + print('Mininet topology is already active') return cntls = line.split(',') @@ -93,7 +93,7 @@ class DynamicMininet(cmd.Cmd): for i, cntl_ip in enumerate(cntls): cnt = self._net.addController('c{0}'.format(i), controller=RemoteController, ip=cntl_ip, port=6633) controllers.append(cnt) - print "contrller {0} created".format(cnt) + print("contrller {0} created".format(cnt)) self._net.buildFromTopo(topo=self._topo) self._net.start() @@ -101,9 +101,9 @@ class DynamicMininet(cmd.Cmd): def help_start_with_cluster(self): """Provide help message for start_with_cluster command""" - print 'Starts mininet with one switch' - print 'Usage: start ' - print '\tcontroller_ips - comma separated list of controllers ip or host names' + print('Starts mininet with one switch') + print('Usage: start ') + print('\tcontroller_ips - comma separated list of controllers ip or host names') def do_start_switches_with_cluster(self, line): """Starts mininet network with initial number of switches @@ -114,7 +114,7 @@ class DynamicMininet(cmd.Cmd): e.g. 1.1.1.1,2.2.2.2,3.3.3.3 (no spaces) """ if self._running: - print 'Mininet topology is already active' + print('Mininet topology is already active') return num, contls = line.split() cntls = contls.split(',') @@ -127,7 +127,7 @@ class DynamicMininet(cmd.Cmd): for i, cntl_ip in enumerate(cntls): cnt = self._net.addController('c{0}'.format(i), controller=RemoteController, ip=cntl_ip, port=6633) controllers.append(cnt) - print "contrller {0} created".format(cnt) + print("contrller {0} created".format(cnt)) self._net.buildFromTopo(topo=self._topo) self._net.start() @@ -135,10 +135,10 @@ class DynamicMininet(cmd.Cmd): def help_start_switches_with_cluster(self): """Provide help message for start_with_cluster command""" - print 'Starts mininet with one switch' - print 'Usage: start ' - print '\tswnt - number of switches in topology' - print '\tcontroller_ips - comma separated list of controllers ip or host names' + print('Starts mininet with one switch') + print('Usage: start ') + print('\tswnt - number of switches in topology') + print('\tcontroller_ips - comma separated list of controllers ip or host names') def do_add_switch(self, line): """Adds one switch to the network @@ -157,8 +157,8 @@ class DynamicMininet(cmd.Cmd): def help_add_switch(self): """Provide help message for add_switch command""" - print 'Adds one sinle switch to the running topology' - print 'Usage: add_switch' + print('Adds one sinle switch to the running topology') + print('Usage: add_switch') def do_add_switches(self, line): """Adds switches to the network @@ -170,9 +170,9 @@ class DynamicMininet(cmd.Cmd): def help_add_switches(self): """Provide help message for add_switch command""" - print 'Adds one sinle switch to the running topology' - print 'Usage: add_switches ' - print '\tnum - number of switches tp be added' + print('Adds one sinle switch to the running topology') + print('Usage: add_switches ') + print('\tnum - number of switches tp be added') def do_exit(self, line): """Stops mininet""" @@ -183,8 +183,8 @@ class DynamicMininet(cmd.Cmd): def help_exit(self): """Provide help message for exit command""" - print 'Exit mininet cli' - print 'Usage: exit' + print('Exit mininet cli') + print('Usage: exit') def do_sh(self, line): """Run an external shell command @@ -195,9 +195,9 @@ class DynamicMininet(cmd.Cmd): def help_sh(self, line): """Provide help message for sh command""" - print 'Executes given commandAdds one sinle switch to the running topology' - print 'Usage: sh ' - print '\tline - command to be executed(e.g. ps -e' + print('Executes given commandAdds one sinle switch to the running topology') + print('Usage: sh ') + print('\tline - command to be executed(e.g. ps -e') def emptyline(self): pass diff --git a/csit/libraries/JsonGenerator.py b/csit/libraries/JsonGenerator.py index 4722b61439..94caad67bc 100644 --- a/csit/libraries/JsonGenerator.py +++ b/csit/libraries/JsonGenerator.py @@ -64,7 +64,7 @@ def copy_eid(objA, objB): try: setattr(objA, name, value) except AttributeError: - print name, "giving attribute error in", objA + print("%s giving attribute error in %s" % (name, objA)) def copy_rloc(objA, objB): @@ -80,7 +80,7 @@ def copy_rloc(objA, objB): try: setattr(objA, name, value) except AttributeError: - print name, "giving attribute error in", objA + print(" %s giving attribute error in" % (name, objA)) def clean_hops(obj): @@ -262,7 +262,7 @@ def Get_LispAddress_JSON_And_Wrap_input(eid_string, vni=None): return Wrap_input(Get_LispAddress_JSON(eid_string, vni)) -def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=001, loc_id="ISP1"): +def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1"): """ Description: Returns locator record object from pyangbind generated classes Returns: locator record object Params: @@ -290,7 +290,7 @@ def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=001, loc_id="ISP1" return lrecord_obj -def Get_LocatorRecord_JSON(rloc, weights='1/1/255/0', flags=001, loc_id="ISP1"): +def Get_LocatorRecord_JSON(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1"): """ Description: Returns locator record dictionary Returns: python dictionary Params: @@ -330,7 +330,7 @@ def Get_MappingRecord_Object(eid, locators, ttl=1440, authoritative=True, action loc_id = loc.keys()[0] loc_obj = loc[loc_id] if loc_id in loc_ids: - print "Locator objects should have different keys" + print("Locator objects should have different keys") break # TODO: Locator-id, currently in the format of loc_id0, loc_id1 mrecord_obj.LocatorRecord.add(loc_id) diff --git a/csit/libraries/MininetTopo/create_fullymesh.py b/csit/libraries/MininetTopo/create_fullymesh.py index b1191a1746..9019ab1adc 100644 --- a/csit/libraries/MininetTopo/create_fullymesh.py +++ b/csit/libraries/MininetTopo/create_fullymesh.py @@ -24,10 +24,10 @@ __created__ = "19 March 2014" if len(sys.argv) < 5: print("Please povide correct inputs. Exiting!!!") - print "{0} \ - ".format(sys.argv[0].split('/')[-1]) - print "Dpid of switches is derived from base mac and \ - host ip address is derived from base ip" + print("{0} \ + ".format(sys.argv[0].split('/')[-1])) + print("Dpid of switches is derived from base mac and \ + host ip address is derived from base ip") sys.exit(1) switch_count = int(sys.argv[1]) @@ -99,8 +99,8 @@ if __name__ == "__main__": \nHence generating this python file dynamically\"\"\" \ \nfrom mininet.topo import Topo\nclass DemoTopo(Topo): \ \n'.format(switch_count, switch_count * host_per_switch, sys.argv[0])) - print "This topology has %d switches %d hosts" \ - % (switch_count, switch_count * host_per_switch) + print("This topology has %d switches %d hosts" + % (switch_count, switch_count * host_per_switch)) configfile.write(" def __init__(self):\n ") configfile.write(" # Initialize topology\n") configfile.write(" Topo.__init__(self)\n") diff --git a/csit/libraries/ScaleClient.py b/csit/libraries/ScaleClient.py index fd9f39f1ee..10ec961d6e 100644 --- a/csit/libraries/ScaleClient.py +++ b/csit/libraries/ScaleClient.py @@ -647,7 +647,6 @@ def flow_stats_collected(controller=''): Returns: :returns (switches, flows_reported, flows-found): tupple with counts of switches, reported and found flows """ - # print type(flow_details), flow_details active_flows = 0 found_flows = 0 switches = _get_operational_inventory_of_switches(controller) @@ -659,7 +658,7 @@ def flow_stats_collected(controller=''): active_flows += t['opendaylight-flow-table-statistics:flow-table-statistics']['active-flows'] if 'flow' in t: found_flows += len(t['flow']) - print "Switches,ActiveFlows(reported)/FlowsFound", len(switches), active_flows, found_flows + print("Switches,ActiveFlows(reported)/FlowsFound", len(switches), active_flows, found_flows) return len(switches), active_flows, found_flows diff --git a/csit/libraries/Topology.py b/csit/libraries/Topology.py index 4c2b066004..b96e48e364 100644 --- a/csit/libraries/Topology.py +++ b/csit/libraries/Topology.py @@ -43,5 +43,5 @@ class Topology(object): if __name__ == '__main__': topology = Topology() - print topology.get_nodes_from_topology(2) - print topology.get_nodes_from_topology('2') + print(topology.get_nodes_from_topology(2)) + print(topology.get_nodes_from_topology('2')) diff --git a/csit/libraries/Topologynew.py b/csit/libraries/Topologynew.py index 80c83f0a8a..a62c429023 100644 --- a/csit/libraries/Topologynew.py +++ b/csit/libraries/Topologynew.py @@ -102,7 +102,4 @@ class Topologynew(object): if __name__ == '__main__': topologynew = Topologynew() - # print topologynew.get_nodes_from_tree_topo(2) - # print topologynew.get_nodes_from_tree_topo('2') - print topologynew.get_nodes_from_tree_topo('(2,3)') - # print topologynew.get_ids_of_leaf_nodes(2,2 )#, depth) + print(topologynew.get_nodes_from_tree_topo('(2,3)')) diff --git a/csit/libraries/UtilLibrary.py b/csit/libraries/UtilLibrary.py index e244c33495..5e0ec10941 100644 --- a/csit/libraries/UtilLibrary.py +++ b/csit/libraries/UtilLibrary.py @@ -112,13 +112,13 @@ def execute_ssh_command(ip, username, password, command): use username and password of controller server for ssh and need karaf distribution location like /root/Documents/dist """ - print "executing ssh command" + print("executing ssh command") lib = SSHLibrary() lib.open_connection(ip) lib.login(username=username, password=password) - print "login done" + print("login done") cmd_response = lib.execute_command(command) - print "command executed : " + command + print("command executed : " + command) lib.close_connection() return cmd_response @@ -127,22 +127,22 @@ def wait_for_controller_up(ip, port="8181"): url = "http://" + ip + ":" + str(port) + \ "/restconf/config/opendaylight-inventory:nodes/node/controller-config/yang-ext:mount/config:modules" - print "Waiting for controller " + ip + " up." + print("Waiting for controller " + ip + " up.") # Try 30*10s=5 minutes for the controller to be up. for i in xrange(30): try: - print "attempt " + str(i) + " to url " + url + print("attempt %s to url %s" % (str(i), url)) resp = get(url, "admin", "admin") - print "attempt " + str(i) + " response is " + str(resp) - print resp.text + print("attempt %s response is %s" % (str(i), str(resp))) + print(resp.text) if ('clustering-it-provider' in resp.text): - print "Wait for controller " + ip + " succeeded" + print("Wait for controller " + ip + " succeeded") return True except Exception as e: - print e + print(e) time.sleep(10) - print "Wait for controller " + ip + " failed" + print("Wait for controller " + ip + " failed") return False @@ -192,7 +192,6 @@ def wait_for_controller_stopped(ip, username, password, karafHome): i = 1 while i <= tries: stdout = lib.execute_command("ps -axf | grep karaf | grep -v grep | wc -l") - # print "stdout: "+stdout processCnt = stdout[0].strip('\n') print("processCnt: " + processCnt) if processCnt == '0': @@ -203,7 +202,7 @@ def wait_for_controller_stopped(ip, username, password, karafHome): lib.close_connection() if i > tries: - print "Killing controller" + print("Killing controller") kill_controller(ip, username, password, karafHome) @@ -234,7 +233,7 @@ def isolate_controller(controllers, username, password, isolated): cmd_str = base_str + controller + ' --destination ' + isolated_controller + ' -j DROP' execute_ssh_command(isolated_controller, username, password, cmd_str) ip_tables = execute_ssh_command(isolated_controller, username, password, 'sudo iptables -L') - print ip_tables + print(ip_tables) iso_result = 'pass' for controller in controllers: controller_regex_string = "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*" @@ -266,7 +265,7 @@ def rejoin_controller(controllers, username, password, isolated): cmd_str = base_str + controller + ' --destination ' + isolated_controller + ' -j DROP' execute_ssh_command(isolated_controller, username, password, cmd_str) ip_tables = execute_ssh_command(isolated_controller, username, password, 'sudo iptables -L') - print ip_tables + print(ip_tables) iso_result = 'pass' for controller in controllers: controller_regex_string = "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*" @@ -290,18 +289,18 @@ def flush_iptables(controllers, username, password): """ flush_result = 'pass' for controller in controllers: - print 'Flushing ' + controller + print('Flushing ', controller) cmd_str = 'sudo iptables -v -F' cmd_result = execute_ssh_command(controller, username, password, cmd_str) - print cmd_result + print(cmd_result) success_string = "Flushing chain `INPUT'" + "\n" success_string += "Flushing chain `FORWARD'" + "\n" success_string += "Flushing chain `OUTPUT'" if not cmd_result == success_string: flush_result = "Failed to flush IPTables. Check Log." - print "." - print "." - print "." + print(".") + print(".") + print(".") return flush_result diff --git a/csit/libraries/VsctlListParser.py b/csit/libraries/VsctlListParser.py index e68486dea5..531d23e1b0 100644 --- a/csit/libraries/VsctlListParser.py +++ b/csit/libraries/VsctlListParser.py @@ -22,7 +22,7 @@ def _parse_stdout(stdout): regroups = re.finditer(pat, text) outdict = {} for g in regroups: - print g.group() + print(g.group()) if g.group('key') == '_uuid': cntl_uuid = g.group('value') outdict[cntl_uuid] = {} diff --git a/csit/libraries/XmlComparator.py b/csit/libraries/XmlComparator.py index 8ad386e2e5..7c2617539c 100644 --- a/csit/libraries/XmlComparator.py +++ b/csit/libraries/XmlComparator.py @@ -218,8 +218,6 @@ class XmlComparator: nodeDict = XMLtoDictParserTools.parseTreeToDict(node) XMLtoDictParserTools.addDictValue(reportDict, index, nodeDict) index += 1 - # print nodeDict - # print origDict if nodeDict == origDict: return True, '' if nodeDict['flow']['priority'] == origDict['flow']['priority']: @@ -229,7 +227,6 @@ class XmlComparator: def is_flow_operational2(self, requested_flow, oper_resp, check_id=False): def _rem_unimplemented_tags(tagpath, recurs, tdict): - # print "_rem_unimplemented_tags", tagpath, tdict if len(tagpath) > 1 and tagpath[0] in tdict: _rem_unimplemented_tags(tagpath[1:], recurs, tdict[tagpath[0]]) @@ -246,11 +243,9 @@ class XmlComparator: del tdict[tagpath[0]] if tdict.keys() == ['order']: del tdict['order'] - # print "leaving", tdict def _add_tags(tagpath, newtag, value, tdict): '''if whole tagpath exists and the tag is not present, it is added with given value''' - # print "_add_tags", tagpath, newtag, value, tdict if len(tagpath) > 0 and tagpath[0] in tdict: _add_tags(tagpath[1:], newtag, value, tdict[tagpath[0]]) elif len(tagpath) == 0 and newtag not in tdict: @@ -258,7 +253,6 @@ class XmlComparator: def _to_be_modified_tags(tagpath, tag, related_tag, tdict): '''if whole tagpath exists and the tag is not present, it is added with given value''' - # print "_to_be_modified_tags", tagpath, tag, related_tag, tdict if len(tagpath) > 0 and tagpath[0] in tdict: _to_be_modified_tags(tagpath[1:], tag, related_tag, tdict[tagpath[0]]) elif len(tagpath) == 0 and tag in tdict and related_tag in tdict: @@ -284,9 +278,6 @@ class XmlComparator: ignoreList=IGNORED_TAGS_LIST) XMLtoDictParserTools.addDictValue(reportDict, index, nodeDict) index += 1 - # print nodeDict - # print origDict - # print reportDict if nodeDict == origDict: return True, '' if nodeDict['flow']['priority'] == origDict['flow']['priority']: @@ -298,8 +289,6 @@ class XmlComparator: for (p, t, rt) in TAGS_TO_MODIFY_FOR_OC: _to_be_modified_tags(p, t, rt, td) - # print "comparing1", nodeDict - # print "comparing2", td if nodeDict == td: return True, '' if nodeDict == origDict: diff --git a/csit/libraries/backuprestore/JsonDiffTool.py b/csit/libraries/backuprestore/JsonDiffTool.py index 0d24764bca..b60126ac8e 100644 --- a/csit/libraries/backuprestore/JsonDiffTool.py +++ b/csit/libraries/backuprestore/JsonDiffTool.py @@ -141,9 +141,9 @@ def prefilter_json_files_then_compare(args): if args.printDifferences: for patchline in differences_after_patching: - print json.dumps(patchline) + print(json.dumps(patchline)) - print len(differences_after_patching) + print(len(differences_after_patching)) return len(differences_after_patching) diff --git a/csit/libraries/backuprestore/jsonpathl.py b/csit/libraries/backuprestore/jsonpathl.py index bf84e8090e..76ebf8a943 100644 --- a/csit/libraries/backuprestore/jsonpathl.py +++ b/csit/libraries/backuprestore/jsonpathl.py @@ -71,7 +71,6 @@ def normalize(x): g1 = m.group(1) subx.append(g1) ret = "[#%d]" % n - # print "f1:", g1, ret return ret x = re.sub(r"[\['](\??\(.*?\))[\]']", f1, x) @@ -86,7 +85,6 @@ def normalize(x): # put expressions back def f2(m): g1 = m.group(1) - # print "f2:", g1 return subx[int(g1)] x = re.sub(r"#([0-9]+)", f2, x) @@ -130,17 +128,17 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True): def trace(expr, obj, path): if debug: - print "trace", expr, "/", path + print("trace", expr, "/", path) if expr: x = expr.split(';') loc = x[0] x = ';'.join(x[1:]) if debug: - print "\t", loc, type(obj) + print("\t", loc, type(obj)) if loc == "*": def f03(key, loc, expr, obj, path): if debug > 1: - print "\tf03", key, loc, expr, path + print("\tf03", key, loc, expr, path) trace(s(key, expr), obj, path) walk(loc, x, obj, path, f03) @@ -149,7 +147,7 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True): def f04(key, loc, expr, obj, path): if debug > 1: - print "\tf04", key, loc, expr, path + print("\tf04", key, loc, expr, path) if isinstance(obj, dict): if key in obj: trace(s('..', expr), obj[key], s(path, key)) @@ -175,7 +173,7 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True): # [(index_expression)] if loc.startswith("(") and loc.endswith(")"): if debug > 1: - print "index", loc + print("index", loc) e = evalx(loc, obj) trace(s(e, x), obj, path) return @@ -183,11 +181,11 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True): # ?(filter_expression) if loc.startswith("?(") and loc.endswith(")"): if debug > 1: - print "filter", loc + print("filter", loc) def f05(key, loc, expr, obj, path): if debug > 1: - print "f05", key, loc, expr, path + print("f05", key, loc, expr, path) if isinstance(obj, dict): eval_result = evalx(loc, obj[key]) else: @@ -240,7 +238,7 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True): # [index,index....] for piece in re.split(r"'?,'?", loc): if debug > 1: - print "piece", piece + print("piece", piece) trace(s(piece, x), obj, path) else: store(path, obj) @@ -257,7 +255,7 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True): """eval expression""" if debug: - print "evalx", loc + print("evalx", loc) # a nod to JavaScript. doesn't work for @.name.name.length # Write len(@.name.name) instead!!! @@ -299,20 +297,20 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True): loc = re.sub(r'(?", v + print("->", v) return v # body of jsonpath() diff --git a/csit/libraries/ipaddr.py b/csit/libraries/ipaddr.py index 7208218683..59a5e89cb7 100644 --- a/csit/libraries/ipaddr.py +++ b/csit/libraries/ipaddr.py @@ -1461,7 +1461,7 @@ class _BaseV6(object): try: # Now, parse the hextets into a 128-bit integer. - ip_int = 0L + ip_int = 0 for i in xrange(parts_hi): ip_int <<= 16 ip_int |= self._parse_hextet(parts[i]) diff --git a/csit/suites/groupbasedpolicy/common_scripts/dpdumpflows.py b/csit/suites/groupbasedpolicy/common_scripts/dpdumpflows.py index 5a61ffd289..7bcf6709de 100644 --- a/csit/suites/groupbasedpolicy/common_scripts/dpdumpflows.py +++ b/csit/suites/groupbasedpolicy/common_scripts/dpdumpflows.py @@ -12,4 +12,4 @@ def call_dpctl(): if __name__ == "__main__": flows = call_dpctl().split("recirc_id") for flow in flows: - print flow + print(flow) diff --git a/csit/suites/groupbasedpolicy/common_scripts/infrastructure_launch.py b/csit/suites/groupbasedpolicy/common_scripts/infrastructure_launch.py index 4a4bd866e1..bd7c6aeb01 100644 --- a/csit/suites/groupbasedpolicy/common_scripts/infrastructure_launch.py +++ b/csit/suites/groupbasedpolicy/common_scripts/infrastructure_launch.py @@ -42,7 +42,7 @@ def add_controller(sw, ip): try: socket.inet_aton(ip) except socket.error: - print "Error: %s is not a valid IPv4 address of controller!" % ip + print("Error: %s is not a valid IPv4 address of controller!" % (ip)) os.exit(2) call(['sudo', 'ovs-vsctl', 'set-controller', sw, 'tcp:%s:6653' % ip]) @@ -61,7 +61,7 @@ def add_manager(ip): try: socket.inet_aton(ip) except socket.error: - print "Error: %s is not a valid IPv4 address of manager!" % ip + print("Error: %s is not a valid IPv4 address of manager!" % (ip)) os.exit(2) cmd = ['sudo', 'ovs-vsctl', 'set-manager', 'tcp:%s:6640' % ip] @@ -85,7 +85,7 @@ def add_switch(name, dpid=None): # prepending zeros to match 16-byt length, e.g. 123 -> 0000000000000123 dpid = filler[:len(filler) - len(dpid)] + dpid elif len(dpid) > 16: - print 'DPID: %s is too long' % dpid + print('DPID: %s is too long' % dpid) sys.exit(3) call(['sudo', 'ovs-vsctl', 'set', 'bridge', name, 'other-config:datapath-id=%s' % dpid]) @@ -265,57 +265,57 @@ def launch(switches, hosts, odl_ip='127.0.0.1'): connect_container_to_switch( sw['name'], host, containerID) host['port-name'] = 'vethl-' + host['name'] - print "Created container: %s with IP: %s. Connect using docker attach %s," \ - "disconnect with 'ctrl-p-q'." % (host['name'], host['ip'], host['name']) + print("Created container: %s with IP: %s. Connect using docker attach %s," + "disconnect with 'ctrl-p-q'." % (host['name'], host['ip'], host['name'])) if __name__ == "__main__": if len(sys.argv) < 2 or len(sys.argv) > 3: - print "Please, specify IP of ODL and switch index in arguments." - print "usage: ./infrastructure_launch.py ODL_IP SWITCH_INDEX" + print("Please, specify IP of ODL and switch index in arguments.") + print("usage: ./infrastructure_launch.py ODL_IP SWITCH_INDEX") sys.exit(2) controller = sys.argv[1] try: socket.inet_aton(controller) except socket.error: - print "Error: %s is not a valid IPv4 address!" % controller + print("Error: %s is not a valid IPv4 address!" % (controller)) sys.exit(2) sw_index = int(sys.argv[2]) - print sw_index - print switches[sw_index] + print(sw_index) + print(switches[sw_index]) if sw_index not in range(0, len(switches) + 1): - print len(switches) + 1 - print "Error: %s is not a valid switch index!" % sw_index + print(len(switches) + 1) + print("Error: %s is not a valid switch index!" % (sw_index)) sys.exit(2) sw_type = switches[sw_index]['type'] sw_name = switches[sw_index]['name'] if sw_type == 'gbp': - print "*****************************" - print "Configuring %s as a GBP node." % sw_name - print "*****************************" + print("*****************************") + print("Configuring %s as a GBP node." % (sw_name)) + print("*****************************") print launch([switches[sw_index]], hosts, controller) - print "*****************************" - print "OVS status:" - print "-----------" + print("*****************************") + print("OVS status:") + print("-----------") print call(['sudo', 'ovs-vsctl', 'show']) print - print "Docker containers:" - print "------------------" + print("Docker containers:") + print("------------------") call(['docker', 'ps']) - print "*****************************" + print("*****************************") elif sw_type == 'sff': - print "*****************************" - print "Configuring %s as an SFF." % sw_name - print "*****************************" + print("*****************************") + print("Configuring %s as an SFF." % (sw_name)) + print("*****************************") call(['sudo', 'ovs-vsctl', 'set-manager', 'tcp:%s:6640' % controller]) print elif sw_type == 'sf': - print "*****************************" - print "Configuring %s as an SF." % sw_name - print "*****************************" + print("*****************************") + print("Configuring %s as an SF." % (sw_name)) + print("*****************************") call(['%s/sf-config.sh' % os.path.dirname(os.path.realpath(__file__)), '%s' % sw_name]) diff --git a/csit/suites/lacp/Lacp_Feature_OF13/LACP_custom1.py b/csit/suites/lacp/Lacp_Feature_OF13/LACP_custom1.py index e2386d08b5..1234c78458 100644 --- a/csit/suites/lacp/Lacp_Feature_OF13/LACP_custom1.py +++ b/csit/suites/lacp/Lacp_Feature_OF13/LACP_custom1.py @@ -54,8 +54,8 @@ class LacpTopo(Topo): net.build() s1.start([c0]) s1.cmd('sudo ovs-vsctl set bridge s1 protocols=OpenFlow13') - print h1.cmd('./h1-bond0.sh') - print h2.cmd('./h2-bond0.sh') + print(h1.cmd('./h1-bond0.sh')) + print(h2.cmd('./h2-bond0.sh')) CLI(net) net.stop() diff --git a/tools/clustering/cluster-debugging/transaction-tracking/process.py b/tools/clustering/cluster-debugging/transaction-tracking/process.py index d69a3cc16f..92678d14b1 100644 --- a/tools/clustering/cluster-debugging/transaction-tracking/process.py +++ b/tools/clustering/cluster-debugging/transaction-tracking/process.py @@ -91,14 +91,14 @@ def filterTransactionsByTimeToComplete(timeToComplete): totalTime = 0 for txn in txns: if txns[txn].totalTime() > timeToComplete: - print txns[txn] + print(txns[txn]) totalTime += txns[txn].totalTime() - print "Total time for these transactions = " + unicode(totalTime) + print("Total time for these transactions = ", unicode(totalTime)) def csv(): txns = processFiles() - print Transaction.csv_header() + print(Transaction.csv_header()) for txn in txns: - print txns[txn].csv() + print(txns[txn].csv()) diff --git a/tools/clustering/cluster-deployer/deploy.py b/tools/clustering/cluster-deployer/deploy.py index a9819e0cfc..401dcb630c 100755 --- a/tools/clustering/cluster-deployer/deploy.py +++ b/tools/clustering/cluster-deployer/deploy.py @@ -155,11 +155,11 @@ class Deployer: distribution_name) # noqa if distribution_ver is None: - print distribution_name + " is not a valid distribution version." \ - " (Must contain version in the form: " \ - "\"<#>.<#>.<#>-\" or \"<#>.<#>." \ - "<#>--SR<#>\" or \"<#>.<#>.<#>" \ - "--RC<#>\", e.g. 0.2.0-SNAPSHOT)" + print("%s is not a valid distribution version." + " (Must contain version in the form: " + "\"<#>.<#>.<#>-\" or \"<#>.<#>." + "<#>--SR<#>\" or \"<#>.<#>.<#>" + "--RC<#>\", e.g. 0.2.0-SNAPSHOT)" % distribution_name) sys.exit(1) distribution_ver = distribution_ver.group() @@ -206,35 +206,35 @@ class Deployer: # Copy the distribution to the host and unzip it odl_file_path = self.dir_name + "/odl.zip" self.remote.copy_file(self.distribution, odl_file_path) - self.remote.exec_cmd("unzip -o " + odl_file_path + " -d " + - self.dir_name + "/") + self.remote.exec_cmd("unzip -o " + odl_file_path + " -d " + + self.dir_name + "/") # Rename the distribution directory to odl - self.remote.exec_cmd("mv " + self.dir_name + "/" + - distribution_name + " " + self.dir_name + "/odl") + self.remote.exec_cmd("mv " + self.dir_name + "/" + + distribution_name + " " + self.dir_name + "/odl") # Copy all the generated files to the server - self.remote.mkdir(self.dir_name + - "/odl/configuration/initial") - self.remote.copy_file(akka_conf, self.dir_name + - "/odl/configuration/initial/") - self.remote.copy_file(module_shards_conf, self.dir_name + - "/odl/configuration/initial/") - self.remote.copy_file(modules_conf, self.dir_name + - "/odl/configuration/initial/") - self.remote.copy_file(features_cfg, self.dir_name + - "/odl/etc/") - self.remote.copy_file(jolokia_xml, self.dir_name + - "/odl/deploy/") - self.remote.copy_file(management_cfg, self.dir_name + - "/odl/etc/") + self.remote.mkdir(self.dir_name + + "/odl/configuration/initial") + self.remote.copy_file(akka_conf, self.dir_name + + "/odl/configuration/initial/") + self.remote.copy_file(module_shards_conf, self.dir_name + + "/odl/configuration/initial/") + self.remote.copy_file(modules_conf, self.dir_name + + "/odl/configuration/initial/") + self.remote.copy_file(features_cfg, self.dir_name + + "/odl/etc/") + self.remote.copy_file(jolokia_xml, self.dir_name + + "/odl/deploy/") + self.remote.copy_file(management_cfg, self.dir_name + + "/odl/etc/") if datastore_cfg is not None: self.remote.copy_file(datastore_cfg, self.dir_name + "/odl/etc/") # Add symlink - self.remote.exec_cmd("ln -sfn " + self.dir_name + " " + - args.rootdir + "/deploy/current") + self.remote.exec_cmd("ln -sfn " + self.dir_name + " " + + args.rootdir + "/deploy/current") # Run karaf self.remote.start_controller(self.dir_name) @@ -243,11 +243,11 @@ class Deployer: def main(): # Validate some input if os.path.exists(args.distribution) is False: - print args.distribution + " is not a valid file" + print("%s is not a valid file" % args.distribution) sys.exit(1) if os.path.exists(os.getcwd() + "/templates/" + args.template) is False: - print args.template + " is not a valid template" + print("%s is not a valid template" % args.template) # Prepare some 'global' variables hosts = args.hosts.split(",") @@ -260,10 +260,10 @@ def main(): replicas = {} for x in range(0, len(hosts)): - ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@" + - hosts[x] + ":2550") - rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@" + - hosts[x] + ":2551") + ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@" + + hosts[x] + ":2550") + rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@" + + hosts[x] + ":2551") all_replicas.append("member-" + str(x + 1)) for x in range(0, 10): diff --git a/tools/clustering/cluster-deployer/remote_host.py b/tools/clustering/cluster-deployer/remote_host.py index cb5d1818dc..c85df36f86 100644 --- a/tools/clustering/cluster-deployer/remote_host.py +++ b/tools/clustering/cluster-deployer/remote_host.py @@ -23,7 +23,7 @@ class RemoteHost: self.lib.close_connection() def exec_cmd(self, command): - print "Executing command " + command + " on host " + self.host + print("Executing command %s on host %s" % (command, self.host)) rc = self.lib.execute_command(command, return_rc=True) if rc[1] != 0: raise Exception('remote command failed [{0}] with exit code {1}.' @@ -35,14 +35,14 @@ class RemoteHost: def copy_file(self, src, dest): if src is None: - print "src is None not copy anything to " + dest + print("src is None not copy anything to ", dest) return if os.path.exists(src) is False: - print "Src file " + src + " was not found" + print("Src file " + src + " was not found") return - print "Copying " + src + " to " + dest + " on " + self.host + print("Copying %s to %s on %s" % (src, dest, self.host)) self.lib.put_file(src, dest) def kill_controller(self): diff --git a/tools/fastbgp/play.py b/tools/fastbgp/play.py index d82cbab007..b75aa2de4e 100755 --- a/tools/fastbgp/play.py +++ b/tools/fastbgp/play.py @@ -186,7 +186,7 @@ def parse_arguments(): parser.add_argument("--skipattr", default=False, action="store_true", help=str_help) arguments = parser.parse_args() if arguments.multiplicity < 1: - print "Multiplicity", arguments.multiplicity, "is not positive." + print("Multiplicity", arguments.multiplicity, "is not positive.") raise SystemExit(1) # TODO: Are sanity checks (such as asnumber>=0) required? return arguments @@ -1857,7 +1857,6 @@ class WriteTracker(object): :return: true if no remaining data to send """ # We assume there is a msg_out to send and socket is writable. - # print "going to send", repr(self.msg_out) self.timer.snapshot() bytes_sent = self.socket.send(self.msg_out) # Forget the part of message that was sent. @@ -2153,7 +2152,7 @@ def threaded_job(arguments): for t in thread_args: thread.start_new_thread(job, (t, rpcqueue, storage)) except Exception: - print "Error: unable to start thread." + print("Error: unable to start thread.") raise SystemExit(2) if arguments.usepeerip: diff --git a/tools/mdsal_benchmark/dsbenchmark.py b/tools/mdsal_benchmark/dsbenchmark.py index 15d6607f39..5e7b553c0c 100755 --- a/tools/mdsal_benchmark/dsbenchmark.py +++ b/tools/mdsal_benchmark/dsbenchmark.py @@ -65,7 +65,7 @@ def send_clear_request(): url = BASE_URL + "operations/dsbenchmark:cleanup-store" r = requests.post(url, stream=False, auth=('admin', 'admin')) - print r.status_code + print(r.status_code) def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx): @@ -101,7 +101,7 @@ def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner if r.status_code == 200: result = dict(result.items() + json.loads(r.content)['output'].items()) else: - print 'Error %s, %s' % (r.status_code, r.content) + print('Error %s, %s' % (r.status_code, r.content)) return result @@ -115,8 +115,8 @@ def print_results(run_type, idx, res): test run :return: None """ - print '%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' % \ - (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError']) + print('%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' % + (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError'])) def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx): @@ -138,8 +138,8 @@ def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, ou total_build_time = 0.0 total_exec_time = 0.0 - print "Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore, - print "Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx + print("Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore,) + print("Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx) for idx in range(warmup_runs): res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx) print_results('WARMUP', idx, res) @@ -168,8 +168,8 @@ def store_result(values, tx_type, operation, data_fmt, datastore, :param value: The (measured) value :return: none """ - plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/' + - str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name) + plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/' + + str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name) values[plot_key] = value @@ -228,35 +228,35 @@ if __name__ == "__main__": f = open('test.csv', 'wt') try: start_time = time.time() - print "Start time: %f " % start_time + print("Start time: %f " % (start_time)) writer = csv.writer(f) # Determine the impact of transaction type, data format and data structure on performance. # Iterate over all transaction types, data formats, operation types, and different # list-of-lists layouts; always use a single operation in each transaction - print '\n#######################################' - print 'Tx type, data format & data structure' - print '#######################################' + print('\n#######################################') + print('Tx type, data format & data structure') + print('#######################################') for tx_type in TX_TYPES: - print '***************************************' - print 'Transaction Type: %s' % tx_type - print '***************************************' + print('***************************************') + print('Transaction Type: %s' % tx_type) + print('***************************************') writer.writerow((('%s:' % tx_type), '', '')) for fmt in DATA_FORMATS: - print '---------------------------------------' - print 'Data format: %s' % fmt - print '---------------------------------------' + print('---------------------------------------') + print('Data format: %s' % fmt) + print('---------------------------------------') writer.writerow(('', ('%s:' % fmt), '')) for datastore in DATASTORES: print - print 'Data store: %s' % datastore + print('Data store: %s' % datastore) print for oper in OPERATIONS: - print 'Operation: %s' % oper + print('Operation: %s' % oper) writer.writerow(('', '', '%s:' % oper)) for elem in INNER_ELEMENTS: @@ -273,28 +273,28 @@ if __name__ == "__main__": # Determine the impact of number of writes per transaction on performance. # Iterate over all transaction types, data formats, operation types, and # operations-per-transaction; always use a list of lists where the inner list has one parameter - print '\n#######################################' - print 'Puts per tx' - print '#######################################' + print('\n#######################################') + print('Puts per tx') + print('#######################################') for tx_type in TX_TYPES: - print '***************************************' - print 'Transaction Type: %s' % tx_type - print '***************************************' + print('***************************************') + print('Transaction Type: %s' % tx_type) + print('***************************************') writer.writerow((('%s:' % tx_type), '', '')) for fmt in DATA_FORMATS: - print '---------------------------------------' - print 'Data format: %s' % fmt - print '---------------------------------------' + print('---------------------------------------') + print('Data format: %s' % fmt) + print('---------------------------------------') writer.writerow(('', ('%s:' % fmt), '')) for datastore in DATASTORES: print - print 'Data store: %s' % datastore + print('Data store: %s' % datastore) print for oper in OPERATIONS: - print 'Operation: %s' % oper + print('Operation: %s' % oper) writer.writerow(('', '', '%s:' % oper)) for wtx in OPS_PER_TX: @@ -311,8 +311,8 @@ if __name__ == "__main__": write_results_to_file(PLOT2, args.outfileops, PLOT_FILTER) end_time = time.time() - print "End time: %f " % end_time - print "Total execution time: %f" % (end_time - start_time) + print("End time: %f " % (end_time)) + print("Total execution time: %f" % ((end_time - start_time))) finally: f.close() diff --git a/tools/mdsal_benchmark/ntfbenchmark.py b/tools/mdsal_benchmark/ntfbenchmark.py index 1fd649b692..2537c649c1 100755 --- a/tools/mdsal_benchmark/ntfbenchmark.py +++ b/tools/mdsal_benchmark/ntfbenchmark.py @@ -53,7 +53,7 @@ def send_test_request(producer_type, producers, listeners, payload_size, iterati if r.status_code == 200: result = dict(result.items() + json.loads(r.content)['output'].items()) else: - print 'Error %s, %s' % (r.status_code, r.content) + print('Error %s, %s' % (r.status_code, r.content)) return result @@ -67,10 +67,10 @@ def print_results(run_type, idx, res): test run :return: None """ - print '%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d' % \ + print('%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d' % (run_type, idx, res[u'producer-ok'], res[u'producer-error'], res[u'listener-ok'], res[u'producer-rate'], - res[u'listener-rate'], res[u'producer-elapsed-time'], res[u'listener-elapsed-time']) + res[u'listener-rate'], res[u'producer-elapsed-time'], res[u'listener-elapsed-time'])) def run_test(warmup_runs, test_runs, producer_type, producers, listeners, payload_size, iterations): @@ -145,15 +145,15 @@ if __name__ == "__main__": for lis in args.listeners: exec_time, prate, lrate = run_test(args.warm, args.run, args.ptype, prod, lis, args.payload, args.iterations) - print 'Producers: %d, Listeners: %d, prate: %d, lrate: %d' % (prod, lis, prate, lrate) + print('Producers: %d, Listeners: %d, prate: %d, lrate: %d' % (prod, lis, prate, lrate)) lrate_row.append(lrate) prate_row.append(prate) lrate_matrix.append(lrate_row) prate_matrix.append(prate_row) - print lrate_matrix - print prate_matrix + print(lrate_matrix) + print(prate_matrix) # writer.writerow((('%s:' % args.ptype), '', '', '')) # writer.writerow(('', exec_time, prate, lrate)) diff --git a/tools/mdsal_benchmark/rpcbenchmark.py b/tools/mdsal_benchmark/rpcbenchmark.py index 5e091fd02e..9c32ae48d9 100755 --- a/tools/mdsal_benchmark/rpcbenchmark.py +++ b/tools/mdsal_benchmark/rpcbenchmark.py @@ -53,7 +53,7 @@ def send_test_request(operation, clients, servers, payload_size, iterations): if r.status_code == 200: result = dict(result.items() + json.loads(r.content)['output'].items()) else: - print 'Error %s, %s' % (r.status_code, r.content) + print('Error %s, %s' % (r.status_code, r.content)) return result @@ -67,9 +67,9 @@ def print_results(run_type, idx, res): test run :return: None """ - print '%s #%d: Ok: %d, Error: %d, Rate: %d, Exec time: %d' % \ + print('%s #%d: Ok: %d, Error: %d, Rate: %d, Exec time: %d' % (run_type, idx, - res[u'global-rtc-client-ok'], res[u'global-rtc-client-error'], res[u'rate'], res[u'exec-time']) + res[u'global-rtc-client-ok'], res[u'global-rtc-client-error'], res[u'rate'], res[u'exec-time'])) def run_test(warmup_runs, test_runs, operation, clients, servers, payload_size, iterations): @@ -150,7 +150,7 @@ if __name__ == "__main__": run_test(args.warm, args.run, args.operation, client, svr, args.payload, args.iterations) rate_row.append(rate) rate_matrix.append(rate_row) - print rate_matrix + print(rate_matrix) writer.writerow(('RPC Rates:', '')) writer.writerows(rate_matrix) diff --git a/tools/netconf_tools/configurer.py b/tools/netconf_tools/configurer.py index 90b9ca862b..d3c10bc6e5 100644 --- a/tools/netconf_tools/configurer.py +++ b/tools/netconf_tools/configurer.py @@ -129,7 +129,7 @@ def main(): def handle_sigint(received_signal, frame): # This is a closure as it refers to the counter. """Upon SIGINT, print counter contents and exit gracefully.""" signal.signal(signal.SIGINT, signal.SIG_DFL) - print sorted_repr(counter) + print(sorted_repr(counter)) sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) diff --git a/tools/netconf_tools/getter.py b/tools/netconf_tools/getter.py index 4596277807..3655800410 100644 --- a/tools/netconf_tools/getter.py +++ b/tools/netconf_tools/getter.py @@ -171,10 +171,10 @@ while request_count > 0: if len(responses) > 0: result = responses.popleft() if result[0] is None: - print "ERROR|" + result[1] + "|" + print("ERROR|" + result[1] + "|") break runtime = "%5.3f|%5.3f|%5.3f" % result[1] - print "%03d|%s|%s|" % (result[0], runtime, result[2]) + print("%03d|%s|%s|" % ((result[0], runtime, result[2]))) request_count -= 1 continue time.sleep(args.refresh) diff --git a/tools/odl-lispflowmapping-performance-tests/mapping_blaster.py b/tools/odl-lispflowmapping-performance-tests/mapping_blaster.py index 63c1a8e7f4..fb7e758705 100755 --- a/tools/odl-lispflowmapping-performance-tests/mapping_blaster.py +++ b/tools/odl-lispflowmapping-performance-tests/mapping_blaster.py @@ -95,10 +95,10 @@ class MappingRPCBlaster(object): self.start_rloc = netaddr.IPAddress(start_rloc) self.nmappings = nmappings if v == "Li" or v == "li": - print "Using the Lithium RPC URL" + print("Using the Lithium RPC URL") rpc_url = self.RPC_URL_LI else: - print "Using the Beryllium and later RPC URL" + print("Using the Beryllium and later RPC URL") rpc_url = self.RPC_URL_BE self.post_url_template = 'http://' + self.host + ':' \ @@ -205,4 +205,4 @@ if __name__ == "__main__": elif in_args.mode == "get": mapping_rpc_blaster.get_n_mappings() else: - print "Unsupported mode:", in_args.mode + print("Unsupported mode:", in_args.mode) diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py index 1dd17f273e..188f6437c5 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py @@ -56,10 +56,10 @@ if __name__ == "__main__": sts = cleanup_config_fl(in_args.host, in_args.port) exp = 204 else: - print 'Unknown controller type' + print('Unknown controller type') sys.exit(-1) if sts != exp: - print 'Failed to delete nodes in the config space, code %d' % sts + print('Failed to delete nodes in the config space, code %d' % sts) else: - print 'Nodes in config space deleted.' + print('Nodes in config space deleted.') diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/create_plot_data_files.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/create_plot_data_files.py index 12ee2e82fa..154a89f3ed 100644 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/create_plot_data_files.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/create_plot_data_files.py @@ -15,13 +15,13 @@ for line in log.splitlines(): res = pat_rate.search(line) if res is not None: rate.append(res.groups('rate1')[0]) -print rate +print(rate) for line in log.splitlines(): res = pat_time.search(line) if res is not None: time.append(res.groups('time1')[0]) -print time +print(time) text_file = open("rates.csv", "w") text_file.write('Add,Delete\n') diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py index b0ddcda3de..57bb31ddaf 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py @@ -43,21 +43,21 @@ def wait_for_stats(crawler, exp_found, timeout, delay): :return: None """ total_delay = 0 - print 'Waiting for stats to catch up:' + print('Waiting for stats to catch up:') with Timer() as t: while True: crawler.crawl_inventory() - print ' %d, %d' % (crawler.reported_flows, crawler.found_flows) + print(' %d, %d' % (crawler.reported_flows, crawler.found_flows)) if crawler.found_flows == exp_found or total_delay > timeout: break total_delay += delay time.sleep(delay) if total_delay < timeout: - print 'Stats collected in %d seconds.' % t.secs + print('Stats collected in %d seconds.' % t.secs) else: - print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay + print('Stats collection did not finish in %d seconds. Aborting...' % total_delay) if __name__ == "__main__": @@ -131,16 +131,16 @@ if __name__ == "__main__": reported = ic.reported_flows found = ic.found_flows - print 'Baseline:' - print ' Reported flows: %d' % reported - print ' Found flows: %d' % found + print('Baseline:') + print(' Reported flows: %d' % reported) + print(' Found flows: %d' % found) # Run through add cycles, where threads are started in # each cycle and flows are added from each thread fct.add_blaster() - print '\n*** Total flows added: %d' % fct.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + print('\n*** Total flows added: %d' % fct.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()) # Wait for stats to catch up wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay) @@ -149,17 +149,17 @@ if __name__ == "__main__": # in each cycle and flows previously added in an add cycle are # deleted in each thread if in_args.bulk_delete: - print '\nDeleting all flows in bulk:' + print('\nDeleting all flows in bulk:') sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth) if sts != 200: - print ' Failed to delete flows, code %d' % sts + print(' Failed to delete flows, code %d' % sts) else: - print ' All flows deleted.' + print(' All flows deleted.') else: - print '\nDeleting flows one by one\n ', + print('\nDeleting flows one by one\n ',) fct.delete_blaster() - print '\n*** Total flows deleted: %d' % fct.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + print('\n*** Total flows deleted: %d' % fct.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()) # Wait for stats to catch up back to baseline wait_for_stats(ic, found, in_args.timeout, in_args.delay) diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py index 6fba121699..a07748150c 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py @@ -277,7 +277,6 @@ class FlowConfigBlaster(object): hosts = self.host.split(",") host = hosts[flow_count % len(hosts)] flow_url = self.assemble_post_url(host, node) - # print flow_url if not self.auth: r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, timeout=self.TIMEOUT) @@ -306,7 +305,6 @@ class FlowConfigBlaster(object): fmod = dict(self.flow_mode_template) fmod['flow'] = flow_list flow_data = json.dumps(fmod) - # print flow_data return flow_data def add_flows(self, start_flow_id, tid): @@ -330,7 +328,7 @@ class FlowConfigBlaster(object): n_nodes = self.get_num_nodes(s) with self.print_lock: - print ' Thread %d:\n Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes) + print(' Thread %d:\n Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes)) nflows = 0 nb_actions = [] @@ -359,13 +357,13 @@ class FlowConfigBlaster(object): ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, flow_stats, t.secs) with self.print_lock: - print '\n Thread %d results (ADD): ' % tid - print ' Elapsed time: %.2fs,' % t.secs - print ' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps) - print ' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps) - print ' Stats ({Requests}, {Flows}): ', - print rqst_stats, - print flow_stats + print('\n Thread %d results (ADD): ' % tid) + print(' Elapsed time: %.2fs,' % t.secs) + print(' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)) + print(' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps)) + print(' Stats ({Requests}, {Flows}): ') + print(rqst_stats,) + print(flow_stats) self.threads_done += 1 s.close() @@ -389,7 +387,6 @@ class FlowConfigBlaster(object): hosts = self.host.split(",") host = hosts[flow_count % len(hosts)] flow_url = self.del_url_template % (host, node, flow_id) - # print flow_url if not self.auth: r = session.delete(flow_url, headers=self.getheaders, timeout=self.TIMEOUT) @@ -415,7 +412,7 @@ class FlowConfigBlaster(object): n_nodes = self.get_num_nodes(s) with self.print_lock: - print 'Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes) + print('Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes)) with Timer() as t: for flow in range(self.nflows): @@ -429,12 +426,12 @@ class FlowConfigBlaster(object): ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, rqst_stats, t.secs) with self.print_lock: - print '\n Thread %d results (DELETE): ' % tid - print ' Elapsed time: %.2fs,' % t.secs - print ' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps) - print ' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps) - print ' Stats ({Requests})', - print rqst_stats + print('\n Thread %d results (DELETE): ' % tid) + print(' Elapsed time: %.2fs,' % t.secs) + print(' Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps)) + print(' Flows/s: %.2f OK, %.2f Total' % (ok_fps, total_fps)) + print(' Stats ({Requests})',) + print(rqst_stats) self.threads_done += 1 s.close() @@ -457,7 +454,7 @@ class FlowConfigBlaster(object): for c in range(self.ncycles): self.stats = self.FcbStats() with self.print_lock: - print '\nCycle %d:' % c + print('\nCycle %d:' % c) threads = [] for i in range(self.nthreads): @@ -471,20 +468,20 @@ class FlowConfigBlaster(object): thread.join() with self.print_lock: - print '\n*** Test summary:' - print ' Elapsed time: %.2fs' % t.secs - print ' Peak requests/s: %.2f OK, %.2f Total' % ( - self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate()) - print ' Peak flows/s: %.2f OK, %.2f Total' % ( - self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate()) - print ' Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % ( + print('\n*** Test summary:') + print(' Elapsed time: %.2fs' % t.secs) + print(' Peak requests/s: %.2f OK, %.2f Total' % ( + self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())) + print(' Peak flows/s: %.2f OK, %.2f Total' % ( + self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())) + print(' Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % ( self.stats.get_ok_rqsts() / t.secs, self.stats.get_total_rqsts() / t.secs, - (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate()) - print ' Avg. flows/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % ( - self.stats.get_ok_flows() / t.secs, - self.stats.get_total_flows() / t.secs, - (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate()) + (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate())) + print(' Avg. flows/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % ( + self.stats.get_ok_flows() / t.secs, + self.stats.get_total_flows() / t.secs, + (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate())) self.total_ok_flows += self.stats.get_ok_flows() self.total_ok_rqsts += self.stats.get_ok_rqsts() @@ -518,10 +515,10 @@ def get_json_from_file(filename): keys = ft['flow'][0].keys() if (u'cookie' in keys) and (u'flow-name' in keys) and (u'id' in keys) and (u'match' in keys): if u'ipv4-destination' in ft[u'flow'][0]['match'].keys(): - print 'File "%s" ok to use as flow template' % filename + print('File "%s" ok to use as flow template' % filename) return ft except ValueError: - print 'JSON parsing of file %s failed' % filename + print('JSON parsing of file %s failed' % filename) pass return None @@ -648,16 +645,16 @@ if __name__ == "__main__": # are added from each thread fct.add_blaster() - print '\n*** Total flows added: %s' % fct.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + print('\n*** Total flows added: %s' % fct.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()) if in_args.delay > 0: - print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay + print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay) time.sleep(in_args.delay) # Run through , where are started in each cycle and # previously added in an add cycle are deleted in each thread if in_args.delete: fct.delete_blaster() - print '\n*** Total flows deleted: %s' % fct.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + print('\n*** Total flows deleted: %s' % fct.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()) diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_bulk.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_bulk.py index 1fec13191a..f24d5ab062 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_bulk.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_bulk.py @@ -78,7 +78,6 @@ class FlowConfigBulkBlaster(flow_config_blaster.FlowConfigBlaster): json_input = {'input': {'bulk-flow-ds-item': flow_list}} flow_data = json.dumps(json_input) - # print flow_data return flow_data @@ -111,16 +110,16 @@ if __name__ == "__main__": # are added from each thread fcbb.add_blaster() - print '\n*** Total flows added: %s' % fcbb.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fcbb.get_ok_rqsts() + print('\n*** Total flows added: %s' % fcbb.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fcbb.get_ok_rqsts()) if in_args.delay > 0: - print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay + print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay) time.sleep(in_args.delay) # Run through , where are started in each cycle and # previously added in an add cycle are deleted in each thread if in_args.delete: fcbb.delete_blaster() - print '\n*** Total flows deleted: %s' % fcbb.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fcbb.get_ok_rqsts() + print('\n*** Total flows deleted: %s' % fcbb.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fcbb.get_ok_rqsts()) diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py index b54cf5cce5..cbed315eca 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py @@ -103,9 +103,9 @@ class FlowConfigBlasterFLE(FlowConfigBlaster): clear_url = 'http://' + self.host + ":" + self.port + '/wm/staticflowpusher/clear/all/json' r = requests.get(clear_url) if r.status_code == 200: - print "All flows cleared before the test" + print("All flows cleared before the test") else: - print "Failed to clear flows from the controller, your results may vary" + print("Failed to clear flows from the controller, your results may vary") if __name__ == "__main__": @@ -147,16 +147,16 @@ if __name__ == "__main__": # Run through , where are started in each cycle and are added from each thread fct.add_blaster() - print '\n*** Total flows added: %s' % fct.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + print('\n*** Total flows added: %s' % fct.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()) if in_args.delay > 0: - print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay + print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay) time.sleep(in_args.delay) # Run through , where are started in each cycle and previously added in an add cycle are # deleted in each thread if in_args.delete: fct.delete_blaster() - print '\n*** Total flows deleted: %s' % fct.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + print('\n*** Total flows deleted: %s' % fct.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()) diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py index 5bad661eb1..9dacaec456 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py @@ -113,9 +113,9 @@ if __name__ == "__main__": reported = ic.reported_flows found = ic.found_flows - print 'Baseline:' - print ' Reported nodes: %d' % reported - print ' Found nodes: %d' % found + print('Baseline:') + print(' Reported nodes: %d' % reported) + print(' Found nodes: %d' % found) stats = [] stats.append((time.time(), ic.nodes, ic.reported_flows, ic.found_flows)) @@ -123,33 +123,33 @@ if __name__ == "__main__": # each cycle and flows are added from each thread fct.add_blaster() - print '\n*** Total flows added: %d' % fct.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + print('\n*** Total flows added: %d' % fct.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()) # monitor stats and save results in the list for stat_item in monitor_stats(ic, in_args.config_monitor, in_args.monitor_period): - print stat_item + print(stat_item) stats.append(stat_item) # Run through delete cycles, where threads are started # in each cycle and flows previously added in an add cycle are # deleted in each thread if in_args.bulk_delete: - print '\nDeleting all flows in bulk:' + print('\nDeleting all flows in bulk:') sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth) if sts != 200: - print ' Failed to delete flows, code %d' % sts + print(' Failed to delete flows, code %d' % sts) else: - print ' All flows deleted.' + print(' All flows deleted.') else: - print '\nDeleting flows one by one\n ', + print('\nDeleting flows one by one\n ',) fct.delete_blaster() - print '\n*** Total flows deleted: %d' % fct.get_ok_flows() - print ' HTTP[OK] results: %d\n' % fct.get_ok_rqsts() + print('\n*** Total flows deleted: %d' % fct.get_ok_flows()) + print(' HTTP[OK] results: %d\n' % fct.get_ok_rqsts()) # monitor stats and append to the list for stat_item in monitor_stats(ic, in_args.deconfig_monitor, in_args.monitor_period): - print stat_item + print(stat_item) stats.append(stat_item) # if requested, write collected data into the file diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py index f24c0f724d..65cb5d2da8 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py @@ -34,7 +34,7 @@ class InventoryCrawler(object): """ self.found_flows += len(flows) if self.plevel > 1: - print ' Flows found: %d\n' % len(flows) + print(' Flows found: %d\n' % len(flows)) if self.plevel > 2: for f in flows: s = json.dumps(f, sort_keys=True, indent=4, separators=(',', ': ')) @@ -45,8 +45,8 @@ class InventoryCrawler(object): s = s.rstrip('}') s = s.replace('\n', '\n ') s = s.lstrip('\n') - print " Flow %s:" % f['id'] - print s + print(" Flow %s:" % (f['id'])) + print(s) def crawl_table(self, table): """ @@ -60,14 +60,14 @@ class InventoryCrawler(object): if active_flows > 0: self.reported_flows += active_flows if self.plevel > 1: - print ' Table %s:' % table['id'] + print(' Table %s:' % table['id']) s = json.dumps(stats, sort_keys=True, indent=12, separators=(',', ': ')) s = s.replace('{\n', '') s = s.replace('}', '') - print s + print(s) except KeyError: if self.plevel > 1: - print " Stats for Table '%s' not available." % table['id'] + print(" Stats for Table '%s' not available." % (table['id'])) self.table_stats_unavailable += 1 pass @@ -85,14 +85,14 @@ class InventoryCrawler(object): self.nodes += 1 if self.plevel > 1: - print "\nNode '%s':" % (node['id']) + print("\nNode '%s':" % ((node['id']))) elif self.plevel > 0: - print "%s" % (node['id']) + print("%s" % ((node['id']))) try: tables = node['flow-node-inventory:table'] if self.plevel > 1: - print ' Tables: %d' % len(tables) + print(' Tables: %d' % len(tables)) for t in tables: self.crawl_table(t) @@ -102,7 +102,7 @@ class InventoryCrawler(object): except KeyError: if self.plevel > 1: - print ' Data for tables not available.' + print(' Data for tables not available.') def crawl_inventory(self): """ @@ -134,12 +134,12 @@ class InventoryCrawler(object): try: self.crawl_node(sinv[n]) except: - print 'Can not crawl %s' % sinv[n]['id'] + print('Can not crawl %s' % sinv[n]['id']) except KeyError: - print 'Could not retrieve inventory, response not in JSON format' + print('Could not retrieve inventory, response not in JSON format') else: - print 'Could not retrieve inventory, HTTP error %d' % r.status_code + print('Could not retrieve inventory, HTTP error %d' % r.status_code) s.close() @@ -170,16 +170,16 @@ if __name__ == "__main__": ic = InventoryCrawler(in_args.host, in_args.port, in_args.plevel, in_args.datastore, in_args.auth, in_args.debug) - print "Crawling '%s'" % ic.url + print("Crawling '%s'" % (ic.url)) ic.crawl_inventory() - print '\nTotals:' - print ' Nodes: %d' % ic.nodes - print ' Reported flows: %d' % ic.reported_flows - print ' Found flows: %d' % ic.found_flows + print('\nTotals:') + print(' Nodes: %d' % ic.nodes) + print(' Reported flows: %d' % ic.reported_flows) + print(' Found flows: %d' % ic.found_flows) if in_args.debug: n_missing = len(ic.table_stats_fails) if n_missing > 0: - print '\nMissing table stats (%d nodes):' % n_missing - print "%s\n" % ", ".join([x for x in ic.table_stats_fails]) + print('\nMissing table stats (%d nodes):' % n_missing) + print("%s\n" % (", ".join([x for x in ic.table_stats_fails]))) diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py index 48d279f2ad..716c644c6a 100644 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py @@ -71,7 +71,7 @@ def get_inventory(tnum, url, hdrs, rnum, cond): results = {} with print_lock: - print 'Thread %d: Getting %s' % (tnum, url) + print('Thread %d: Getting %s' % (tnum, url)) s = requests.Session() with Timer() as t: @@ -95,12 +95,12 @@ def get_inventory(tnum, url, hdrs, rnum, cond): total_mb_rate.increment(mrate) with print_lock: - print '\nThread %d: ' % tnum - print ' Elapsed time: %.2f,' % t.secs - print ' Requests: %d, Requests/sec: %.2f' % (total, rate) - print ' Volume: %.2f MB, Rate: %.2f MByte/s' % (mbytes, mrate) - print ' Results: ', - print results + print('\nThread %d: ' % tnum) + print(' Elapsed time: %.2f,' % t.secs) + print(' Requests: %d, Requests/sec: %.2f' % (total, rate)) + print(' Volume: %.2f MB, Rate: %.2f MByte/s' % (mbytes, mrate)) + print(' Results: ') + print(results) with cond: cond.notifyAll() @@ -139,10 +139,10 @@ if __name__ == "__main__": cond.wait() finished = finished + 1 - print '\nAggregate requests: %d, Aggregate requests/sec: %.2f' % (total_requests.value, - total_req_rate.value) - print 'Aggregate Volume: %.2f MB, Aggregate Rate: %.2f MByte/s' % (total_mbytes.value, - total_mb_rate.value) + print('\nAggregate requests: %d, Aggregate requests/sec: %.2f' % (total_requests.value, + total_req_rate.value)) + print('Aggregate Volume: %.2f MB, Aggregate Rate: %.2f MByte/s' % (total_mbytes.value, + total_mb_rate.value)) # get_inventory(url, getheaders, int(in_args.requests)) diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_read_blaster.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_read_blaster.py index 87a19ebc85..c1d48d2476 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_read_blaster.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_read_blaster.py @@ -67,7 +67,7 @@ def read(hosts, port, auth, datastore, print_lock, cycles, results_queue): stats[r.status_code] = stats.get(r.status_code, 0) + 1 with print_lock: - print ' ', threading.current_thread().name, 'results:', stats + print(' %s results: %s' % (threading.current_thread().name, stats)) results_queue.put(stats) @@ -118,6 +118,6 @@ if __name__ == "__main__": # Aggregate the results stats = functools.reduce(operator.add, map(collections.Counter, results.queue)) - print '\n*** Test summary:' - print ' Elapsed time: %.2fs' % t.secs - print ' HTTP[OK] results: %d\n' % stats[200] + print('\n*** Test summary:') + print(' Elapsed time: %.2fs' % t.secs) + print(' HTTP[OK] results: %d\n' % stats[200]) diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/odl_tester.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/odl_tester.py index 214e8aeaec..0298c0a841 100644 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/odl_tester.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/odl_tester.py @@ -224,7 +224,7 @@ def main(*argv): parser.add_argument('--outfile', default='', help='Stores add and delete flow rest api rate; default=""') in_args = parser.parse_args(*argv) - print in_args + print(in_args) # get device ids base_dev_ids = get_device_ids(controller=in_args.host) @@ -236,9 +236,9 @@ def main(*argv): base_num_flows = len(base_flow_ids) - print "BASELINE:" - print " devices:", len(base_dev_ids) - print " flows :", base_num_flows + print("BASELINE:") + print(" devices:", len(base_dev_ids)) + print(" flows :", base_num_flows) # lets fill the queue for workers nflows = 0 @@ -287,25 +287,25 @@ def main(*argv): else: result[k] += v - print "Added", in_args.flows, "flows in", tmr.secs, "seconds", result + print("Added", in_args.flows, "flows in", tmr.secs, "seconds", result) add_details = {"duration": tmr.secs, "flows": len(flow_details)} # lets print some stats - print "\n\nStats monitoring ..." + print("\n\nStats monitoring ...") rounds = 200 with Timer() as t: for i in range(rounds): reported_flows = len(get_flow_ids(controller=in_args.host)) expected_flows = base_num_flows + in_args.flows - print "Reported Flows: %d/%d" % (reported_flows, expected_flows) + print("Reported Flows: %d/%d" % ((reported_flows, expected_flows))) if reported_flows >= expected_flows: break time.sleep(1) if i < rounds: - print "... monitoring finished in +%d seconds\n\n" % t.secs + print("... monitoring finished in +%d seconds\n\n" % (t.secs)) else: - print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs) + print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs))) if in_args.no_delete: return @@ -313,7 +313,7 @@ def main(*argv): # sleep in between time.sleep(in_args.timeout) - print "Flows to be removed: %d" % len(flow_details) + print("Flows to be removed: %d" % (len(flow_details))) # lets fill the queue for workers sendqueue = Queue.Queue() for fld in flow_details: @@ -356,37 +356,30 @@ def main(*argv): else: result[k] += v - print "Removed", len(flow_details), "flows in", tmr.secs, "seconds", result + print("Removed", len(flow_details), "flows in", tmr.secs, "seconds", result) del_details = {"duration": tmr.secs, "flows": len(flow_details)} -# # lets print some stats -# print "\n\nSome stats monitoring ...." -# for i in range(100): -# print get_flow_simple_stats(controller=in_args.host) -# time.sleep(5) -# print "... monitoring finished\n\n" - # lets print some stats - print "\n\nStats monitoring ..." + print("\n\nStats monitoring ...") rounds = 200 with Timer() as t: for i in range(rounds): reported_flows = len(get_flow_ids(controller=in_args.host)) expected_flows = base_num_flows - print "Reported Flows: %d/%d" % (reported_flows, expected_flows) + print("Reported Flows: %d/%d" % ((reported_flows, expected_flows))) if reported_flows <= expected_flows: break time.sleep(1) if i < rounds: - print "... monitoring finished in +%d seconds\n\n" % t.secs + print("... monitoring finished in +%d seconds\n\n" % (t.secs)) else: - print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs) + print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs))) if in_args.outfile != "": addrate = add_details['flows'] / add_details['duration'] delrate = del_details['flows'] / del_details['duration'] - print "addrate", addrate - print "delrate", delrate + print("addrate", addrate) + print("delrate", delrate) with open(in_args.outfile, "wt") as fd: fd.write("AddRate,DeleteRate\n") diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_stats.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_stats.py index 86d860f538..06654b8ff6 100644 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_stats.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_stats.py @@ -217,14 +217,10 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]): if rsp.status_code != 200: return flows = json.loads(rsp.content)['flows'] - # print "Flows", flows - # print "Details", flow_details for dev_id, ip in flow_details: - # print "looking for details", dev_id, ip for f in flows: # lets identify if it is our flow if f["treatment"]["instructions"][0]["type"] != "DROP": - # print "NOT DROP" continue if f["deviceId"] == dev_id: if "ip" in f["selector"]["criteria"][0]: @@ -233,9 +229,7 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]): item_idx = 1 else: continue - # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip)) if f["selector"]["criteria"][item_idx]["ip"] == '%s/32' % str(netaddr.IPAddress(ip)): - # print dev_id, ip, f yield dev_id, f["id"] break @@ -246,13 +240,10 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181): if rsp.status_code != 200: return flows = json.loads(rsp.content)['flows'] - # print "Flows", flows - # print "Details", flow_details for f in flows: # lets identify if it is our flow if f["treatment"]["instructions"][0]["type"] != "NOACTION": - # print "NOT DROP" continue if "ip" in f["selector"]["criteria"][0]: item_idx = 0 @@ -260,10 +251,8 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181): item_idx = 1 else: continue - # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip)) ipstr = f["selector"]["criteria"][item_idx]["ip"] if '10.' in ipstr and '/32' in ipstr: - # print dev_id, ip, f yield (f["deviceId"], f["id"]) @@ -278,7 +267,7 @@ def main(*argv): help='Port on which onos\'s RESTCONF is listening (default is 8181)') in_args = parser.parse_args(*argv) - print in_args + print(in_args) # get device ids base_dev_ids = get_device_ids(controller=in_args.host) @@ -288,13 +277,13 @@ def main(*argv): # prepare func preparefnc = _prepare_post # noqa # FIXME: This script seems to be unfinished! - print "BASELINE:" - print " devices:", len(base_dev_ids) - print " flows :", len(base_flow_ids) + print("BASELINE:") + print(" devices:", len(base_dev_ids)) + print(" flows :", len(base_flow_ids)) # lets print some stats - print "\n\nSome stats monitoring ...." - print get_flow_simple_stats(controller=in_args.host) + print("\n\nSome stats monitoring ....") + print(get_flow_simple_stats(controller=in_args.host)) if __name__ == "__main__": diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py index 0e00655dfe..c9e0d3f7b1 100644 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py @@ -231,14 +231,10 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]): if rsp.status_code != 200: return flows = json.loads(rsp.content)['flows'] - # print "Flows", flows - # print "Details", flow_details for dev_id, ip in flow_details: - # print "looking for details", dev_id, ip for f in flows: # lets identify if it is our flow if f["treatment"]["instructions"][0]["type"] != "DROP": - # print "NOT DROP" continue if f["deviceId"] == dev_id: if "ip" in f["selector"]["criteria"][0]: @@ -247,9 +243,7 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]): item_idx = 1 else: continue - # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip)) if f["selector"]["criteria"][item_idx]["ip"] == '%s/32' % str(netaddr.IPAddress(ip)): - # print dev_id, ip, f yield dev_id, f["id"] break @@ -260,13 +254,10 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181): if rsp.status_code != 200: return flows = json.loads(rsp.content)['flows'] - # print "Flows", flows - # print "Details", flow_details for f in flows: # lets identify if it is our flow if f["treatment"]["instructions"][0]["type"] != "NOACTION": - # print "NOT DROP" continue if "ip" in f["selector"]["criteria"][0]: item_idx = 0 @@ -274,10 +265,8 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181): item_idx = 1 else: continue - # print "Comparing", '%s/32' % str(netaddr.IPAddress(ip)) ipstr = f["selector"]["criteria"][item_idx]["ip"] if '10.' in ipstr and '/32' in ipstr: - # print dev_id, ip, f yield (f["deviceId"], f["id"]) @@ -307,7 +296,7 @@ def main(*argv): parser.add_argument('--outfile', default='', help='Stores add and delete flow rest api rate; default=""') in_args = parser.parse_args(*argv) - print in_args + print(in_args) # get device ids base_dev_ids = get_device_ids(controller=in_args.host) @@ -319,9 +308,9 @@ def main(*argv): base_num_flows = len(base_flow_ids) - print "BASELINE:" - print " devices:", len(base_dev_ids) - print " flows :", base_num_flows + print("BASELINE:") + print(" devices:", len(base_dev_ids)) + print(" flows :", base_num_flows) # lets fill the queue for workers nflows = 0 @@ -369,16 +358,16 @@ def main(*argv): else: result[k] += v - print "Added", in_args.flows, "flows in", tmr.secs, "seconds", result + print("Added", in_args.flows, "flows in", tmr.secs, "seconds", result) add_details = {"duration": tmr.secs, "flows": len(flow_details)} # lets print some stats - print "\n\nStats monitoring ..." + print("\n\nStats monitoring ...") rounds = 200 with Timer() as t: for i in range(rounds): flow_stats = get_flow_simple_stats(controller=in_args.host) - print flow_stats + print(flow_stats) try: pending_adds = int(flow_stats[u'PENDING_ADD']) # noqa # FIXME: Print this somewhere. except KeyError: @@ -386,9 +375,9 @@ def main(*argv): time.sleep(1) if i < rounds: - print "... monitoring finished in +%d seconds\n\n" % t.secs + print("... monitoring finished in +%d seconds\n\n" % (t.secs)) else: - print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs) + print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs))) if in_args.no_delete: return @@ -402,7 +391,7 @@ def main(*argv): # for a in get_flow_device_pairs(controller=in_args.host, flow_details=flow_details): for a in get_flow_to_remove(controller=in_args.host): flows_remove_details.append(a) - print "Flows to be removed: ", len(flows_remove_details) + print("Flows to be removed: ", len(flows_remove_details)) # lets fill the queue for workers nflows = 0 @@ -448,22 +437,15 @@ def main(*argv): else: result[k] += v - print "Removed", len(flows_remove_details), "flows in", tmr.secs, "seconds", result + print("Removed", len(flows_remove_details), "flows in", tmr.secs, "seconds", result) del_details = {"duration": tmr.secs, "flows": len(flows_remove_details)} -# # lets print some stats -# print "\n\nSome stats monitoring ...." -# for i in range(100): -# print get_flow_simple_stats(controller=in_args.host) -# time.sleep(5) -# print "... monitoring finished\n\n" - # lets print some stats - print "\n\nStats monitoring ..." + print("\n\nStats monitoring ...") rounds = 200 with Timer() as t: for i in range(rounds): flow_stats = get_flow_simple_stats(controller=in_args.host) - print flow_stats + print(flow_stats) try: pending_rems = int(flow_stats[u'PENDING_REMOVE']) # noqa # FIXME: Print this somewhere. except KeyError: @@ -471,15 +453,15 @@ def main(*argv): time.sleep(1) if i < rounds: - print "... monitoring finished in +%d seconds\n\n" % t.secs + print("... monitoring finished in +%d seconds\n\n" % (t.secs)) else: - print "... monitoring aborted after %d rounds, elapsed time %d\n\n" % (rounds, t.secs) + print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs))) if in_args.outfile != "": addrate = add_details['flows'] / add_details['duration'] delrate = del_details['flows'] / del_details['duration'] - print "addrate", addrate - print "delrate", delrate + print("addrate", addrate) + print("delrate", delrate) with open(in_args.outfile, "wt") as fd: fd.write("AddRate,DeleteRate\n") diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/pretty_print.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/pretty_print.py index 8d503a30e4..bde0aa23ed 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/pretty_print.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/pretty_print.py @@ -14,4 +14,4 @@ if __name__ == "__main__": data = sys.stdin.readlines() payload = json.loads(data.pop(0)) s = json.dumps(payload, sort_keys=True, indent=4, separators=(',', ': ')) - print '%s\n\n' % s + print('%s\n\n' % s) diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/shard_perf_test.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/shard_perf_test.py index e6fcf50879..1b2a28025c 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/shard_perf_test.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/shard_perf_test.py @@ -104,7 +104,7 @@ class ShardPerformanceTester(object): s = requests.Session() with self.print_lock: - print ' Thread %d: Performing %d requests' % (tid, self.requests) + print(' Thread %d: Performing %d requests' % (tid, self.requests)) with Timer() as t: for r in range(self.requests): @@ -118,11 +118,11 @@ class ShardPerformanceTester(object): total_rate = sum(res.values()) / t.secs with self.print_lock: - print 'Thread %d done:' % tid - print ' Time: %.2f,' % t.secs - print ' Success rate: %.2f, Total rate: %.2f' % (ok_rate, total_rate) - print ' Per-thread stats: ', - print res + print('Thread %d done:' % tid) + print(' Time: %.2f,' % t.secs) + print(' Success rate: %.2f, Total rate: %.2f' % (ok_rate, total_rate)) + print(' Per-thread stats: ',) + print(res) self.threads_done += 1 self.total_rate += total_rate @@ -160,17 +160,17 @@ class ShardPerformanceTester(object): self.cond.wait() # Print summary results. Each worker prints its owns results too. - print '\nSummary Results:' - print ' Requests/sec (total_sum): %.2f' % ((self.threads * self.requests) / t.secs) - print ' Requests/sec (measured): %.2f' % ((self.threads * self.requests) / t.secs) - print ' Time: %.2f' % t.secs + print('\nSummary Results:') + print(' Requests/sec (total_sum): %.2f' % ((self.threads * self.requests) / t.secs)) + print(' Requests/sec (measured): %.2f' % ((self.threads * self.requests) / t.secs)) + print(' Time: %.2f' % t.secs) self.threads_done = 0 if self.plevel > 0: - print ' Per URL Counts: ', + print(' Per URL Counts: ',) for i in range(len(urls)): - print '%d' % self.url_counters[i].value, - print '\n' + print('%d' % self.url_counters[i].value) + print('\n') class TestUrlGenerator(object): @@ -199,7 +199,7 @@ class TestUrlGenerator(object): :param data: Bulk resource data (JSON) from which to generate the URLs :return: List of generated Resources """ - print "Abstract class '%s' should never be used standalone" % self.__class__.__name__ + print("Abstract class '%s' should never be used standalone" % (self.__class__.__name__)) return [] def generate(self): @@ -218,12 +218,12 @@ class TestUrlGenerator(object): r = requests.get(t_url, headers=headers, stream=False, auth=('admin', 'admin')) if r.status_code != 200: - print "Failed to get HTTP response from '%s', code %d" % (t_url, r.status_code) + print("Failed to get HTTP response from '%s', code %d" % ((t_url, r.status_code))) else: try: r_url = self.url_generator(json.loads(r.content)) except: - print "Failed to get json from '%s'. Please make sure you are connected to mininet." % r_url + print("Failed to get json from '%s'. Please make sure you are connected to mininet." % (r_url)) return r_url @@ -251,7 +251,7 @@ class TopoUrlGenerator(TestUrlGenerator): url_list.append(t_url) return url_list except KeyError: - print 'Error parsing topology json' + print('Error parsing topology json') return [] @@ -278,7 +278,7 @@ class InvUrlGenerator(TestUrlGenerator): url_list.append(i_url) return url_list except KeyError: - print 'Error parsing inventory json' + print('Error parsing inventory json') return [] @@ -311,7 +311,7 @@ if __name__ == "__main__": tg = TopoUrlGenerator(in_args.host, in_args.port, in_args.auth) topo_urls += tg.generate() if len(topo_urls) == 0: - print 'Failed to generate topology URLs' + print('Failed to generate topology URLs') sys.exit(-1) # If required, get inventory resource URLs @@ -319,32 +319,32 @@ if __name__ == "__main__": ig = InvUrlGenerator(in_args.host, in_args.port, in_args.auth) inv_urls += ig.generate() if len(inv_urls) == 0: - print 'Failed to generate inventory URLs' + print('Failed to generate inventory URLs') sys.exit(-1) if in_args.resource == 'topo+inv' or in_args.resource == 'all': # To have balanced test results, the number of URLs for topology and inventory must be the same if len(topo_urls) != len(inv_urls): - print "The number of topology and inventory URLs don't match" + print("The number of topology and inventory URLs don't match") sys.exit(-1) st = ShardPerformanceTester(in_args.host, in_args.port, in_args.auth, in_args.threads, in_args.requests, in_args.plevel) if in_args.resource == 'all' or in_args.resource == 'topo': - print '===================================' - print 'Testing topology shard performance:' - print '===================================' + print('===================================') + print('Testing topology shard performance:') + print('===================================') st.run_test(topo_urls) if in_args.resource == 'all' or in_args.resource == 'inv': - print '====================================' - print 'Testing inventory shard performance:' - print '====================================' + print('====================================') + print('Testing inventory shard performance:') + print('====================================') st.run_test(inv_urls) if in_args.resource == 'topo+inv' or in_args.resource == 'all': - print '===============================================' - print 'Testing combined shards (topo+inv) performance:' - print '===============================================' + print('===============================================') + print('Testing combined shards (topo+inv) performance:') + print('===============================================') st.run_test(topo_urls + inv_urls) diff --git a/tools/odl-mdsal-clustering-tests/replace_cars.py b/tools/odl-mdsal-clustering-tests/replace_cars.py index 95b008dc54..5f4973ba79 100644 --- a/tools/odl-mdsal-clustering-tests/replace_cars.py +++ b/tools/odl-mdsal-clustering-tests/replace_cars.py @@ -69,8 +69,8 @@ $ENTRIES data = patch_data_template.substitute(mapping) response = session.put(url=url, auth=auth, headers=headers, data=data) if response.status_code not in [200, 201, 204]: - print "status: {}".format(response.status_code) - print "text: {}".format(response.text) + print("status: {}".format(response.status_code)) + print("text: {}".format(response.text)) sys.exit(1) diff --git a/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py b/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py index b3731bc731..24a0fdc77e 100644 --- a/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py +++ b/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py @@ -113,16 +113,16 @@ class OvsdbConfigBlaster (object): 'node-id': 'ovsdb://%s:%s' % (vswitch_ip, vswitch_ovsdb_port), - 'post-url': urlprefix + - OvsdbConfigBlaster.return_ovsdb_url( + 'post-url': urlprefix + + OvsdbConfigBlaster.return_ovsdb_url( vswitch_ip, vswitch_ovsdb_port), - 'get-config-url': urlprefix + - OvsdbConfigBlaster.return_ovsdb_url( + 'get-config-url': urlprefix + + OvsdbConfigBlaster.return_ovsdb_url( vswitch_ip, vswitch_ovsdb_port), - 'get-oper-url': urlprefix + - OvsdbConfigBlaster.return_ovsdb_url( + 'get-oper-url': urlprefix + + OvsdbConfigBlaster.return_ovsdb_url( vswitch_ip, vswitch_ovsdb_port)}}) @@ -188,9 +188,9 @@ class OvsdbConfigBlaster (object): } self.send_rest(self.session, self.vswitch_dict[vswitch_name] - .get('post-url') + - '%2Fbridge%2F' + - bridge_name, + .get('post-url') + + '%2Fbridge%2F' + + bridge_name, add_bridge_body) self.session.close() @@ -262,9 +262,9 @@ class OvsdbConfigBlaster (object): bridge_name = unicode('br-' + str(br_num) + '-test') self.send_rest_del(self.session, self.vswitch_dict[vswitch_names] - .get('post-url') + - '%2Fbridge%2F' + - bridge_name) + .get('post-url') + + '%2Fbridge%2F' + + bridge_name) self.session.close() def delete_port(self, num_ports): @@ -387,5 +387,5 @@ if __name__ == "__main__": else: ovsdb_config_blaster.add_port() else: - print "please use: python ovsdbconfigblaster.py --help " \ - "\nUnsupported mode: ", args.mode + print("please use: python ovsdbconfigblaster.py --help " + "\nUnsupported mode: ", args.mode) diff --git a/tools/pcep_updater/updater.py b/tools/pcep_updater/updater.py index e5a0c42744..159398fae0 100644 --- a/tools/pcep_updater/updater.py +++ b/tools/pcep_updater/updater.py @@ -121,7 +121,6 @@ def iterable_msg(pccs, lsps, workers, hop): list_data[1] = pcc_ip list_data[4] = pcc_ip whole_data = ''.join(list_data) - # print 'DEBUG:', whole_data + '\n' worker = (lsp * pccs + pcc) % workers post_kwargs = {"data": whole_data, "headers": headers} yield worker, post_kwargs @@ -147,15 +146,11 @@ def queued_send(session, queue_messages, queue_responses): def classify(resp_tuple): """Return 'pass' or a reason what is wrong with response.""" - # print 'DEBUG: received', response prepend = '' status = resp_tuple[0] - # print 'DEBUG: verifying status', status if (status != 200) and (status != 204): # is it int? - # print 'DEBUG:', response.content prepend = 'status: ' + str(status) + ' ' content = resp_tuple[1] - # print 'DEBUG: verifying content', content if prepend or (content != expected and content != ''): return prepend + 'content: ' + str(content) return 'pass' @@ -164,7 +159,6 @@ def classify(resp_tuple): # Main. list_q_msg = [collections.deque() for _ in range(args.workers)] for worker, post_kwargs in iterable_msg(args.pccs, args.lsps, args.workers, args.hop): - # print 'DEBUG: worker', repr(worker), 'message', repr(message) list_q_msg[worker].append(post_kwargs) queue_responses = collections.deque() # thread safe threads = [] @@ -177,7 +171,7 @@ for worker in range(args.workers): threads.append(thread) tasks = sum(map(len, list_q_msg)) # fancy way of counting, should equal to pccs*lsps. counter = CounterDown(tasks) -print 'work is going to start with', tasks, 'tasks' +print('work is going to start with %s tasks' % tasks) time_start = time.time() for thread in threads: thread.start() @@ -206,12 +200,10 @@ while 1: continue left = len(queue_responses) if left: - print 'error: more responses left inqueue', left + print('error: more responses left inqueue', left) else: - print 'Time is up!' + print('Time is up!') left = len(queue_responses) # can be still increasing - # if left: - # print 'WARNING: left', left for _ in range(left): resp_tuple = queue_responses.popleft() # thread safe result = classify(resp_tuple) @@ -219,7 +211,7 @@ while 1: break # may leave late items in queue_reponses time_stop = time.time() timedelta_duration = time_stop - time_start -print 'took', timedelta_duration -print repr(counter.counter) +print('took', timedelta_duration) +print(repr(counter.counter)) # for message in debug_list: # print message diff --git a/tools/wcbench/stats.py b/tools/wcbench/stats.py index 2065064e82..3058678375 100755 --- a/tools/wcbench/stats.py +++ b/tools/wcbench/stats.py @@ -355,7 +355,7 @@ if args.graphs or args.all_graphs: pyplot.subplots_adjust(hspace=.7) else: pyplot.subplots_adjust(hspace=.7) - print "WARNING: That's a lot of graphs. Add a second column?" + print("WARNING: That's a lot of graphs. Add a second column?") pyplot.show() # Print stats -- 2.36.6