From 2054a254a8608303b4f69aa5181285686cab5c6e Mon Sep 17 00:00:00 2001 From: Sangwook Ha Date: Fri, 25 Mar 2022 00:16:17 -0700 Subject: [PATCH 1/1] Bump pre-commit black to 22.1.0 Update black from 20.8b1 to 22.1.0: https://github.com/psf/black/releases and apply new format changes enforced by the new version. Change-Id: I1281481b2d4561d72f5edcfeb3665a774108468d Signed-off-by: Sangwook Ha --- .pre-commit-config.yaml | 2 +- csit/libraries/BGPCEP/ipaddr.py | 8 +-- csit/libraries/Common.py | 2 +- csit/libraries/ConfGen.py | 2 +- csit/libraries/Topology.py | 8 +-- csit/libraries/Topologynew.py | 8 +-- csit/variables/Variables.py | 2 +- tools/clustering/cluster-deployer/deploy.py | 2 +- tools/mdsal_benchmark/dsbenchmark.py | 12 ++-- tools/mdsal_benchmark/ntfbenchmark.py | 16 ++--- tools/mdsal_benchmark/rpcbenchmark.py | 10 ++-- .../create_lisp_control_plane_pcap.py | 4 +- .../mapping_blaster.py | 42 +++++++------- .../flow_config_blaster.py | 48 +++++++-------- .../onos_tester.py | 4 +- .../ovsdbconfigblaster.py | 58 +++++++++---------- 16 files changed, 114 insertions(+), 114 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 83ee03ff36..e940f86350 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - id: flake8 - repo: https://github.com/psf/black - rev: 20.8b1 + rev: 22.1.0 hooks: - id: black diff --git a/csit/libraries/BGPCEP/ipaddr.py b/csit/libraries/BGPCEP/ipaddr.py index 485e26388b..c0fb69448f 100644 --- a/csit/libraries/BGPCEP/ipaddr.py +++ b/csit/libraries/BGPCEP/ipaddr.py @@ -146,7 +146,7 @@ def v6_int_to_packed(address): Returns: The binary representation of this address. """ - return Bytes(struct.pack("!QQ", address >> 64, address & (2 ** 64 - 1))) + return Bytes(struct.pack("!QQ", address >> 64, address & (2**64 - 1))) def _find_address_range(addresses): @@ -255,7 +255,7 @@ def summarize_address_range(first, last): nbits = _count_righthand_zero_bits(first_int, ip_bits) current = None while nbits >= 0: - addend = 2 ** nbits - 1 + addend = 2**nbits - 1 current = first_int + addend nbits -= 1 if current <= last_int: @@ -1094,7 +1094,7 @@ class _BaseV4(object): """ # Equivalent to 255.255.255.255 or 32 bits of 1's. - _ALL_ONES = (2 ** IPV4LENGTH) - 1 + _ALL_ONES = (2**IPV4LENGTH) - 1 _DECIMAL_DIGITS = frozenset("0123456789") def __init__(self, address): @@ -1410,7 +1410,7 @@ class _BaseV6(object): """ - _ALL_ONES = (2 ** IPV6LENGTH) - 1 + _ALL_ONES = (2**IPV6LENGTH) - 1 _HEXTET_COUNT = 8 _HEX_DIGITS = frozenset("0123456789ABCDEFabcdef") diff --git a/csit/libraries/Common.py b/csit/libraries/Common.py index 448815406e..d8bc1255cf 100644 --- a/csit/libraries/Common.py +++ b/csit/libraries/Common.py @@ -62,7 +62,7 @@ def num_of_nodes(depth, fanout): """ result = 0 for i in range(depth): - result += fanout ** i + result += fanout**i return result diff --git a/csit/libraries/ConfGen.py b/csit/libraries/ConfGen.py index a181314f02..a15a30e9a1 100644 --- a/csit/libraries/ConfGen.py +++ b/csit/libraries/ConfGen.py @@ -39,7 +39,7 @@ def generate_akka(original_file, node_idx=1, nodes_ip_list=["127.0.0.1"]): "hostname" ] = nodes_ip_list[node_idx - 1] seed_nodes = [ - u"akka.tcp://opendaylight-cluster-data@{}:2550".format(ip) + "akka.tcp://opendaylight-cluster-data@{}:2550".format(ip) for ip in nodes_ip_list ] conf["odl-cluster-data"]["akka"]["cluster"]["seed-nodes"] = seed_nodes diff --git a/csit/libraries/Topology.py b/csit/libraries/Topology.py index ce26581dec..b82ff1d1df 100644 --- a/csit/libraries/Topology.py +++ b/csit/libraries/Topology.py @@ -13,11 +13,11 @@ class Topology(object): topo_nodes_db = [ [], - [{u"type": u"OF", u"id": u"00:00:00:00:00:00:00:01"}], + [{"type": "OF", "id": "00:00:00:00:00:00:00:01"}], [ - {u"type": u"OF", u"id": u"00:00:00:00:00:00:00:01"}, - {u"type": u"OF", u"id": u"00:00:00:00:00:00:00:02"}, - {u"type": u"OF", u"id": u"00:00:00:00:00:00:00:03"}, + {"type": "OF", "id": "00:00:00:00:00:00:00:01"}, + {"type": "OF", "id": "00:00:00:00:00:00:00:02"}, + {"type": "OF", "id": "00:00:00:00:00:00:00:03"}, ], ] diff --git a/csit/libraries/Topologynew.py b/csit/libraries/Topologynew.py index f8c984112d..d882e64e79 100644 --- a/csit/libraries/Topologynew.py +++ b/csit/libraries/Topologynew.py @@ -17,11 +17,11 @@ class Topologynew(object): topo_nodes_db = [ [], - [{u"type": u"MD_SAL", u"id": u"openflow:1"}], + [{"type": "MD_SAL", "id": "openflow:1"}], [ - {u"type": u"MD_SAL", u"id": u"openflow:1"}, - {u"type": u"MD_SAL", u"id": u"openflow:2"}, - {u"type": u"MD_SAL", u"id": u"openflow:3"}, + {"type": "MD_SAL", "id": "openflow:1"}, + {"type": "MD_SAL", "id": "openflow:2"}, + {"type": "MD_SAL", "id": "openflow:3"}, ], ] diff --git a/csit/variables/Variables.py b/csit/variables/Variables.py index 9f519658f0..b4043b73c9 100644 --- a/csit/variables/Variables.py +++ b/csit/variables/Variables.py @@ -189,7 +189,7 @@ USER = ( # TODO: who is using this? Can we make it more specific? (e.g. RESTC ) PWD = "admin" PASSWORD = "EMPTY" -AUTH = [u"admin", u"admin"] +AUTH = ["admin", "admin"] SCOPE = "sdn" HEADERS = {"Content-Type": "application/json"} # FIXME: keep it as 'application/json' to make it work for both Bierman02 diff --git a/tools/clustering/cluster-deployer/deploy.py b/tools/clustering/cluster-deployer/deploy.py index ed14b00179..11a56e0099 100755 --- a/tools/clustering/cluster-deployer/deploy.py +++ b/tools/clustering/cluster-deployer/deploy.py @@ -112,7 +112,7 @@ class TemplateRenderer: with open(self.template_root + template_path, "r") as myfile: data = myfile.read() - parsed = pystache.parse(u"%(data)s" % locals()) + parsed = pystache.parse("%(data)s" % locals()) renderer = pystache.Renderer() output = renderer.render(parsed, variables) diff --git a/tools/mdsal_benchmark/dsbenchmark.py b/tools/mdsal_benchmark/dsbenchmark.py index 32888ddbfd..4dbc004d34 100755 --- a/tools/mdsal_benchmark/dsbenchmark.py +++ b/tools/mdsal_benchmark/dsbenchmark.py @@ -174,7 +174,7 @@ def send_test_request( r = requests.post( url, data, headers=postheaders, stream=False, auth=("admin", "admin") ) - result = {u"http-status": r.status_code} + result = {"http-status": r.status_code} if r.status_code == 200: result = dict(result.items() + json.loads(r.content)["output"].items()) else: @@ -197,11 +197,11 @@ def print_results(run_type, idx, res): % ( run_type, idx, - res[u"status"], - res[u"listBuildTime"], - res[u"execTime"], - res[u"txOk"], - res[u"txError"], + res["status"], + res["listBuildTime"], + res["execTime"], + res["txOk"], + res["txError"], ) ) diff --git a/tools/mdsal_benchmark/ntfbenchmark.py b/tools/mdsal_benchmark/ntfbenchmark.py index c386aa6fa7..6c5f730c46 100755 --- a/tools/mdsal_benchmark/ntfbenchmark.py +++ b/tools/mdsal_benchmark/ntfbenchmark.py @@ -57,7 +57,7 @@ def send_test_request(producer_type, producers, listeners, payload_size, iterati r = requests.post( url, data, headers=postheaders, stream=False, auth=("admin", "admin") ) - result = {u"http-status": r.status_code} + result = {"http-status": r.status_code} if r.status_code == 200: result = dict(result.items() + json.loads(r.content)["output"].items()) else: @@ -80,13 +80,13 @@ def print_results(run_type, idx, res): % ( run_type, idx, - res[u"producer-ok"], - res[u"producer-error"], - res[u"listener-ok"], - res[u"producer-rate"], - res[u"listener-rate"], - res[u"producer-elapsed-time"], - res[u"listener-elapsed-time"], + res["producer-ok"], + res["producer-error"], + res["listener-ok"], + res["producer-rate"], + res["listener-rate"], + res["producer-elapsed-time"], + res["listener-elapsed-time"], ) ) diff --git a/tools/mdsal_benchmark/rpcbenchmark.py b/tools/mdsal_benchmark/rpcbenchmark.py index 563ac95caf..1310e8f855 100755 --- a/tools/mdsal_benchmark/rpcbenchmark.py +++ b/tools/mdsal_benchmark/rpcbenchmark.py @@ -57,7 +57,7 @@ def send_test_request(operation, clients, servers, payload_size, iterations): r = requests.post( url, data, headers=postheaders, stream=False, auth=("admin", "admin") ) - result = {u"http-status": r.status_code} + result = {"http-status": r.status_code} if r.status_code == 200: result = dict(result.items() + json.loads(r.content)["output"].items()) else: @@ -80,10 +80,10 @@ def print_results(run_type, idx, res): % ( run_type, idx, - res[u"global-rtc-client-ok"], - res[u"global-rtc-client-error"], - res[u"rate"], - res[u"exec-time"], + res["global-rtc-client-ok"], + res["global-rtc-client-error"], + res["rate"], + res["exec-time"], ) ) diff --git a/tools/odl-lispflowmapping-performance-tests/create_lisp_control_plane_pcap.py b/tools/odl-lispflowmapping-performance-tests/create_lisp_control_plane_pcap.py index 00ed0e70f5..30a01b0b67 100755 --- a/tools/odl-lispflowmapping-performance-tests/create_lisp_control_plane_pcap.py +++ b/tools/odl-lispflowmapping-performance-tests/create_lisp_control_plane_pcap.py @@ -58,7 +58,7 @@ def generate_map_request(eid): """ sport1 = random.randint(60000, 65000) sport2 = random.randint(60000, 65000) - rnonce = random.randint(0, 2 ** 63) + rnonce = random.randint(0, 2**63) itr_rloc = [lisp.LISP_AFI_Address(address=src_rloc, afi=1)] record = [ @@ -92,7 +92,7 @@ def generate_map_register(eid, rloc, key_id): :return : returns a Scapy Map-Request packet object """ sport1 = random.randint(60000, 65000) - rnonce = random.randint(0, 2 ** 63) + rnonce = random.randint(0, 2**63) rlocs = [ lisp.LISP_Locator_Record( diff --git a/tools/odl-lispflowmapping-performance-tests/mapping_blaster.py b/tools/odl-lispflowmapping-performance-tests/mapping_blaster.py index 2565e19831..203f10f165 100755 --- a/tools/odl-lispflowmapping-performance-tests/mapping_blaster.py +++ b/tools/odl-lispflowmapping-performance-tests/mapping_blaster.py @@ -33,28 +33,28 @@ class MappingRPCBlaster(object): # Template for adding mappings add_mapping_template = { - u"input": { - u"mapping-record": { - u"recordTtl": 60, - u"action": u"NoAction", - u"authoritative": True, - u"eid": { - u"address-type": u"ietf-lisp-address-types:ipv4-prefix-afi", - u"ipv4-prefix": u"10.0.0.0/32", + "input": { + "mapping-record": { + "recordTtl": 60, + "action": "NoAction", + "authoritative": True, + "eid": { + "address-type": "ietf-lisp-address-types:ipv4-prefix-afi", + "ipv4-prefix": "10.0.0.0/32", }, - u"LocatorRecord": [ + "LocatorRecord": [ { - u"locator-id": u"ipv4:172.16.0.0", - u"priority": 1, - u"weight": 1, - u"multicastPriority": 255, - u"multicastWeight": 0, - u"localLocator": True, - u"rlocProbed": False, - u"routed": True, - u"rloc": { - u"address-type": u"ietf-lisp-address-types:ipv4-afi", - u"ipv4": u"172.16.0.0", + "locator-id": "ipv4:172.16.0.0", + "priority": 1, + "weight": 1, + "multicastPriority": 255, + "multicastWeight": 0, + "localLocator": True, + "rlocProbed": False, + "routed": True, + "rloc": { + "address-type": "ietf-lisp-address-types:ipv4-afi", + "ipv4": "172.16.0.0", }, } ], @@ -63,7 +63,7 @@ class MappingRPCBlaster(object): } # Template for getting mappings - get_mapping_template = {u"input": {u"eid": {u"ipv4-prefix": u"10.0.0.0"}}} + get_mapping_template = {"input": {"eid": {"ipv4-prefix": "10.0.0.0"}}} def __init__(self, host, port, start_eid, mask, start_rloc, nmappings, v): """ diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py index 62844f7ee6..09a25dabd9 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py @@ -64,28 +64,28 @@ class FlowConfigBlaster(object): # The "built-in" flow template flow_mode_template = { - u"flow": [ + "flow": [ { - u"hard-timeout": 65000, - u"idle-timeout": 65000, - u"cookie_mask": 4294967295, - u"flow-name": u"FLOW-NAME-TEMPLATE", - u"priority": 2, - u"strict": False, - u"cookie": 0, - u"table_id": 0, - u"installHw": False, - u"id": u"FLOW-ID-TEMPLATE", - u"match": { - u"ipv4-destination": u"0.0.0.0/32", - u"ethernet-match": {u"ethernet-type": {u"type": 2048}}, + "hard-timeout": 65000, + "idle-timeout": 65000, + "cookie_mask": 4294967295, + "flow-name": "FLOW-NAME-TEMPLATE", + "priority": 2, + "strict": False, + "cookie": 0, + "table_id": 0, + "installHw": False, + "id": "FLOW-ID-TEMPLATE", + "match": { + "ipv4-destination": "0.0.0.0/32", + "ethernet-match": {"ethernet-type": {"type": 2048}}, }, - u"instructions": { - u"instruction": [ + "instructions": { + "instruction": [ { - u"order": 0, - u"apply-actions": { - u"action": [{u"drop-action": {}, u"order": 0}] + "order": 0, + "apply-actions": { + "action": [{"drop-action": {}, "order": 0}] }, } ] @@ -583,12 +583,12 @@ def get_json_from_file(filename): ft = json.load(f) keys = ft["flow"][0].keys() if ( - (u"cookie" in keys) - and (u"flow-name" in keys) - and (u"id" in keys) - and (u"match" in keys) + ("cookie" in keys) + and ("flow-name" in keys) + and ("id" in keys) + and ("match" in keys) ): - if u"ipv4-destination" in ft[u"flow"][0]["match"].keys(): + if "ipv4-destination" in ft["flow"][0]["match"].keys(): print('File "%s" ok to use as flow template' % filename) return ft except ValueError: diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py index f305186051..380a29bb1f 100644 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py @@ -433,7 +433,7 @@ def main(*argv): print(flow_stats) try: pending_adds = int( - flow_stats[u"PENDING_ADD"] + flow_stats["PENDING_ADD"] ) # noqa # FIXME: Print this somewhere. except KeyError: break @@ -524,7 +524,7 @@ def main(*argv): print(flow_stats) try: pending_rems = int( - flow_stats[u"PENDING_REMOVE"] + flow_stats["PENDING_REMOVE"] ) # noqa # FIXME: Print this somewhere. except KeyError: break diff --git a/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py b/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py index ba5558e4d9..92fba500ae 100644 --- a/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py +++ b/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py @@ -137,12 +137,12 @@ class OvsdbConfigBlaster(object): an instance of Open vSwitch """ connect_ovs_body = { - u"network-topology:node": [ + "network-topology:node": [ { - u"node-id": unicode(vswitch_dict["node-id"]), - u"connection-info": { - u"ovsdb:remote-port": unicode(vswitch_dict["ovsdb-port"]), - u"ovsdb:remote-ip": unicode(vswitch_dict["ip"]), + "node-id": unicode(vswitch_dict["node-id"]), + "connection-info": { + "ovsdb:remote-port": unicode(vswitch_dict["ovsdb-port"]), + "ovsdb:remote-ip": unicode(vswitch_dict["ip"]), }, } ] @@ -160,30 +160,30 @@ class OvsdbConfigBlaster(object): for i in range(num_instances): bridge_name = unicode("br-" + str(i) + "-test") add_bridge_body = { - u"network-topology:node": [ + "network-topology:node": [ { - u"node-id": u"%s/bridge/%s" + "node-id": "%s/bridge/%s" % ( unicode(self.vswitch_dict[vswitch_name].get("node-id")), unicode(bridge_name), ), - u"ovsdb:bridge-name": unicode(bridge_name), - u"ovsdb:datapath-id": u"00:00:b2:bf:48:25:f2:4b", - u"ovsdb:protocol-entry": [ - {u"protocol": u"ovsdb:ovsdb-bridge-protocol-openflow-13"} + "ovsdb:bridge-name": unicode(bridge_name), + "ovsdb:datapath-id": "00:00:b2:bf:48:25:f2:4b", + "ovsdb:protocol-entry": [ + {"protocol": "ovsdb:ovsdb-bridge-protocol-openflow-13"} ], - u"ovsdb:controller-entry": [ + "ovsdb:controller-entry": [ { - u"target": u"tcp:%s:%s" + "target": "tcp:%s:%s" % (self.controller_ip, self.controller_port) } ], - u"ovsdb:managed-by": u"/network-topology:network-topology/" - u"network-topology:topology" - u"[network-topology:topology-id" - u"='ovsdb:1']/network-topology:node" - u"[network-topology:node-id=" - u"'%s']" + "ovsdb:managed-by": "/network-topology:network-topology/" + "network-topology:topology" + "[network-topology:topology-id" + "='ovsdb:1']/network-topology:node" + "[network-topology:node-id=" + "'%s']" % unicode(self.vswitch_dict[vswitch_name].get("node-id")), } ] @@ -230,22 +230,22 @@ class OvsdbConfigBlaster(object): port_name = port_prefix + str(instance) + "-test-" + vswitch.get("ip") body = { "tp-body": { - u"network-topology:termination-point": [ + "network-topology:termination-point": [ { - u"ovsdb:options": [ + "ovsdb:options": [ { - u"ovsdb:option": u"remote_ip", - u"ovsdb:value": unicode( + "ovsdb:option": "remote_ip", + "ovsdb:value": unicode( vswitch.get("remote-ip") ), } ], - u"ovsdb:name": unicode(port_name), - u"ovsdb:interface-type": unicode(port_type), - u"tp-id": unicode(port_name), - u"vlan-tag": unicode(instance + 1), - u"trunks": [{u"trunk": u"5"}], - u"vlan-mode": u"access", + "ovsdb:name": unicode(port_name), + "ovsdb:interface-type": unicode(port_type), + "tp-id": unicode(port_name), + "vlan-tag": unicode(instance + 1), + "trunks": [{"trunk": "5"}], + "vlan-mode": "access", } ] }, -- 2.36.6