From: Jozef Behran Date: Thu, 3 Mar 2016 14:30:27 +0000 (+0100) Subject: Make pep8 more picky X-Git-Tag: release/beryllium-sr1~42 X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=integration%2Ftest.git;a=commitdiff_plain;h=9a1a86700fb341c0f5f0700c5024da7975fd60ec Make pep8 more picky Several guidelines were disabled and as a result I was slipping into my old coding style, incompatible with the Python sources here. Enabling them turned out to uncover I was not the only one slipping. As a matter of fact all checks are enabled now except E133 which is not compatible with E123. Change-Id: I754c740f49a213360df5b187897e776266e6653f Signed-off-by: Jozef Behran --- diff --git a/csit/libraries/ClusterStateLibrary.py b/csit/libraries/ClusterStateLibrary.py index dd006cb56a..ab5a643bb2 100644 --- a/csit/libraries/ClusterStateLibrary.py +++ b/csit/libraries/ClusterStateLibrary.py @@ -60,7 +60,7 @@ def getClusterRoles(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesI return dict -def isRole(role, shardName, ipAddress, numOfShards=3, numOfRetries=1, sleepFor=3, port=8181): +def isRole(role, shardName, ipAddress, numOfShards=3, numOfRetries=1, sleepFor=3, port=8181): """Given a role (Leader, Follower, Candidate, or IsolatedLeader), shardname (e.g. shard-inventory-config), controller IP address, and number of shards on the controller,this function determines if the controller, diff --git a/csit/libraries/Common.py b/csit/libraries/Common.py index 424320c5ba..7cd9a0e30e 100644 --- a/csit/libraries/Common.py +++ b/csit/libraries/Common.py @@ -77,7 +77,7 @@ def num_of_links_for_node(nodeid, leaflist, fanout): ''' if nodeid in leaflist: return 1 - return (fanout+1) + return (fanout + 1) if __name__ == '__main__': print(num_of_nodes(3, 4)) diff --git a/csit/libraries/CrudLibrary.py b/csit/libraries/CrudLibrary.py index 6198ddd7a0..5fdf77db23 100644 --- a/csit/libraries/CrudLibrary.py +++ b/csit/libraries/CrudLibrary.py @@ -68,7 +68,7 @@ def addPerson(hostname, port, numberOfPersons, *expected): return resp genderToggle = "Male" - for x in range(1, numberOfPersons+1): + for x in range(1, numberOfPersons + 1): if(genderToggle == "Male"): genderToggle = "Female" else: @@ -113,7 +113,7 @@ def addCarPerson(hostname, port, numberOfCarPersons): return resp - for x in range(1, numberOfCarPersons+1): + for x in range(1, numberOfCarPersons + 1): strId = str(x) payload = SettingsLibrary.add_car_person_template.substitute(Id=strId, personId="user" + strId) @@ -143,8 +143,8 @@ def buyCar(hostname, port, numberOfCarBuyers, start=0): """ print "Buying " + str(numberOfCarBuyers) + " Cars" - for x in range(start, start+numberOfCarBuyers): - strId = str(x+1) + for x in range(start, start + numberOfCarBuyers): + strId = str(x + 1) payload = SettingsLibrary.buy_car_rpc_template.substitute(personId="user" + strId, carId=strId) diff --git a/csit/libraries/ScaleClient.py b/csit/libraries/ScaleClient.py index d7ef8ec828..fd9f39f1ee 100644 --- a/csit/libraries/ScaleClient.py +++ b/csit/libraries/ScaleClient.py @@ -141,7 +141,7 @@ def _randomize(spread, maxn): while True: if spread == 'gauss': ga = abs(random.gauss(0, 1)) - rv = int(ga*float(maxn)/3) + rv = int(ga * float(maxn) / 3) if rv < maxn: return rv elif spread == 'linear': @@ -160,7 +160,7 @@ def generate_new_flow_details(flows=10, switches=1, swspread='gauss', tables=250 It also returns a dictionary with statsistics.""" swflows = [_randomize(swspread, switches) for f in range(int(flows))] # we have to increse the switch index because mininet start indexing switches from 1 (not 0) - fltables = [(s+1, _randomize(tabspread, tables), idx) for idx, s in enumerate(swflows)] + fltables = [(s + 1, _randomize(tabspread, tables), idx) for idx, s in enumerate(swflows)] notes = _get_notes(fltables) return fltables, notes diff --git a/csit/libraries/Topology.py b/csit/libraries/Topology.py index 11a5c71fec..dad09ce08b 100644 --- a/csit/libraries/Topology.py +++ b/csit/libraries/Topology.py @@ -16,7 +16,7 @@ class Topology(object): [{u'type': u'OF', u'id': u'00:00:00:00:00:00:00:01'}, {u'type': u'OF', u'id': u'00:00:00:00:00:00:00:02'}, {u'type': u'OF', u'id': u'00:00:00:00:00:00:00:03'}] - ] + ] def __init__(self): self.builtin = BuiltIn() diff --git a/csit/libraries/Topologynew.py b/csit/libraries/Topologynew.py index 1ab31f3593..b3c6020f41 100644 --- a/csit/libraries/Topologynew.py +++ b/csit/libraries/Topologynew.py @@ -20,7 +20,7 @@ class Topologynew(object): [{u'type': u'MD_SAL', u'id': u'openflow:1'}, {u'type': u'MD_SAL', u'id': u'openflow:2'}, {u'type': u'MD_SAL', u'id': u'openflow:3'}] - ] + ] def __init__(self): self.builtin = BuiltIn() @@ -71,7 +71,7 @@ class Topologynew(object): num_nodes = Common.num_of_nodes(depth, fanout) nodelist = [] - for i in xrange(1, num_nodes+1): + for i in xrange(1, num_nodes + 1): temp = {"id": "00:00:00:00:00:00:00:%s" % format(i, '02x'), "type": "OF"} nodelist.append(temp) if int(exceptroot): @@ -87,7 +87,7 @@ class Topologynew(object): @return leafnodes: list of ids of leaf nodes ''' leafnodes = [] - self._enumerate_nodes(0, 1, 1, fanout, depth-1, leafnodes) + self._enumerate_nodes(0, 1, 1, fanout, depth - 1, leafnodes) return leafnodes def _enumerate_nodes(self, currentdepth, nodeid, currentbranch, fanout, depth, leafnodes): @@ -95,8 +95,8 @@ class Topologynew(object): leafnodes.append("00:00:00:00:00:00:00:%s" % format(nodeid, '02x')) return 1 nodes = 1 - for i in xrange(1, fanout+1): - nodes += self._enumerate_nodes(currentdepth+1, nodeid+nodes, i, fanout, depth, leafnodes) + for i in xrange(1, fanout + 1): + nodes += self._enumerate_nodes(currentdepth + 1, nodeid + nodes, i, fanout, depth, leafnodes) return nodes if __name__ == '__main__': diff --git a/csit/libraries/UtilLibrary.py b/csit/libraries/UtilLibrary.py index 7ded295799..dcd3d662b4 100644 --- a/csit/libraries/UtilLibrary.py +++ b/csit/libraries/UtilLibrary.py @@ -69,11 +69,11 @@ def post(url, userId, password, data): if password is None: password = 'admin' - print("post request with url "+url) - print("post request with data "+data) + print("post request with url " + url) + print("post request with data " + data) headers = {} headers['Content-Type'] = 'application/json' - # headers['Accept']= 'application/xml' + # headers['Accept'] = 'application/xml' session = _cache.switch("CLUSTERING_POST") resp = session.post(url, data.encode('utf-8'), headers=headers, auth=(userId, password)) @@ -91,7 +91,7 @@ def delete(url, userId='admin', password='admin'): "Use the Robot RequestsLibrary rather than this. See DatastoreCRUD.robot for examples", DeprecationWarning ) - print("delete all resources belonging to url"+url) + print("delete all resources belonging to url" + url) session = _cache.switch("CLUSTERING_DELETE") resp = session.delete(url, auth=(userId, password)) # noqa @@ -148,7 +148,7 @@ def wait_for_controller_up(ip, port="8181"): def startAllControllers(username, password, karafhome, port, *ips): # Start all controllers for ip in ips: - execute_ssh_command(ip, username, password, karafhome+"/bin/start") + execute_ssh_command(ip, username, password, karafhome + "/bin/start") # Wait for all of them to be up for ip in ips: @@ -170,7 +170,7 @@ def stopcontroller(ip, username, password, karafhome): def executeStopController(ip, username, password, karafhome): - execute_ssh_command(ip, username, password, karafhome+"/bin/stop") + execute_ssh_command(ip, username, password, karafhome + "/bin/stop") def stopAllControllers(username, password, karafhome, *ips): @@ -224,7 +224,7 @@ def isolate_controller(controllers, username, password, isolated): :param isolated: Number (starting at one) of the controller to be isolated. :return: If successful, returns "pass", otherwise returns the last failed IPTables text. """ - isolated_controller = controllers[isolated-1] + isolated_controller = controllers[isolated - 1] for controller in controllers: if controller != isolated_controller: base_str = 'sudo iptables -I OUTPUT -p all --source ' @@ -256,7 +256,7 @@ def rejoin_controller(controllers, username, password, isolated): :param isolated: Number (starting at one) of the isolated controller isolated. :return: If successful, returns "pass", otherwise returns the last failed IPTables text. """ - isolated_controller = controllers[isolated-1] + isolated_controller = controllers[isolated - 1] for controller in controllers: if controller != isolated_controller: base_str = 'sudo iptables -D OUTPUT -p all --source ' diff --git a/csit/libraries/XmlComparator.py b/csit/libraries/XmlComparator.py index 7bd8fff2fd..2e5a3ea483 100644 --- a/csit/libraries/XmlComparator.py +++ b/csit/libraries/XmlComparator.py @@ -284,7 +284,7 @@ class XmlComparator: if nodeDict['flow']['priority'] == origDict['flow']['priority']: for p in IGNORED_PATHS_FOR_OC: td = copy.copy(origDict) - _rem_unimplemented_tags(p[0], p[1], td) + _rem_unimplemented_tags(p[0], p[1], td) for (p, t, v) in TAGS_TO_ADD_FOR_OC: _add_tags(p, t, v, td) for (p, t, rt) in TAGS_TO_MODIFY_FOR_OC: diff --git a/csit/libraries/ipaddr.py b/csit/libraries/ipaddr.py index 8f02607502..36a23c7ce4 100644 --- a/csit/libraries/ipaddr.py +++ b/csit/libraries/ipaddr.py @@ -1565,7 +1565,7 @@ class _BaseV6(object): hex_str = '%032x' % ip_int hextets = [] for x in range(0, 32, 4): - hextets.append('%x' % int(hex_str[x:x+4], 16)) + hextets.append('%x' % int(hex_str[x:x + 4], 16)) hextets = self._compress_hextets(hextets) return ':'.join(hextets) diff --git a/csit/suites/vpnservice/custom.py b/csit/suites/vpnservice/custom.py index 64746be533..2d07a47e64 100755 --- a/csit/suites/vpnservice/custom.py +++ b/csit/suites/vpnservice/custom.py @@ -22,7 +22,7 @@ class Switch1(Topo): switch = self.addSwitch('s1') n = 2 for h in range(n): - host = self.addHost('h%s' % (h + 1), mac="00:00:00:00:00:0"+str(h+1), ip="10.0.0."+str(h+1)) + host = self.addHost('h%s' % (h + 1), mac="00:00:00:00:00:0" + str(h + 1), ip="10.0.0." + str(h + 1)) self.addLink(host, switch) @@ -33,7 +33,7 @@ class Switch2(Topo): switch = self.addSwitch('s2') n = 2 for h in range(n): - host = self.addHost('h%s' % (h + 3), mac="00:00:00:00:00:0"+str(h+3), ip="10.0.0."+str(h+3)) + host = self.addHost('h%s' % (h + 3), mac="00:00:00:00:00:0" + str(h + 3), ip="10.0.0." + str(h + 3)) self.addLink(host, switch) topos = {'Switch1': (lambda: Switch1()), diff --git a/tools/clustering/cluster-deployer/deploy.py b/tools/clustering/cluster-deployer/deploy.py index a948a1bc0b..a885da5c74 100755 --- a/tools/clustering/cluster-deployer/deploy.py +++ b/tools/clustering/cluster-deployer/deploy.py @@ -140,7 +140,7 @@ class Deployer: self.rootdir) def kill_controller(self): - self.remote.copy_file("kill_controller.sh", self.rootdir + "/") + self.remote.copy_file("kill_controller.sh", self.rootdir + "/") self.remote.exec_cmd(self.rootdir + "/kill_controller.sh") def deploy(self): @@ -266,10 +266,10 @@ def main(): for x in range(0, 10): if len(all_replicas) > args.rf: - replicas["REPLICAS_" + str(x+1)] \ + replicas["REPLICAS_" + str(x + 1)] \ = array_str(random.sample(all_replicas, args.rf)) else: - replicas["REPLICAS_" + str(x+1)] = array_str(all_replicas) + replicas["REPLICAS_" + str(x + 1)] = array_str(all_replicas) deployers = [] diff --git a/tools/clustering/cluster-monitor/monitor.py b/tools/clustering/cluster-monitor/monitor.py index 26a9d7a806..4df556bb27 100644 --- a/tools/clustering/cluster-monitor/monitor.py +++ b/tools/clustering/cluster-monitor/monitor.py @@ -131,11 +131,11 @@ for controller in controllers: # collect shards found in any controller; does not require all controllers to have the same shards for localShard in data['value']['LocalShards']: - shardName = localShard[(localShard.find("-shard-")+7):localShard.find("-config")] + shardName = localShard[(localShard.find("-shard-") + 7):localShard.find("-config")] Shards.add(shardName) print controller_names print Shards -field_len = max(map(len, Shards))+2 +field_len = max(map(len, Shards)) + 2 stdscr = curses.initscr() curses.noecho() @@ -155,7 +155,7 @@ curses.init_pair(5, curses.COLOR_BLACK, curses.COLOR_YELLOW) for row, controller in enumerate(controller_names): stdscr.addstr(row + 1, 0, string.center(controller, field_len), curses.color_pair(1)) for data_column, shard in enumerate(Shards): - stdscr.addstr(0, (field_len+1) * (data_column + 1), string.center(shard, field_len), curses.color_pair(1)) + stdscr.addstr(0, (field_len + 1) * (data_column + 1), string.center(shard, field_len), curses.color_pair(1)) stdscr.addstr(len(Shards) + 2, 0, 'Press q to quit.', curses.color_pair(1)) stdscr.refresh() @@ -170,12 +170,12 @@ while key != ord('q') and key != ord('Q'): cluster_stat = getClusterRolesWithCurl(shard_name, controllers, controller_names) for row, controller in enumerate(controllers): status = size_and_color(cluster_stat, field_len, controller) - stdscr.addstr(row + 1, (field_len+1) * (data_column + 1), status['txt'], status['color']) + stdscr.addstr(row + 1, (field_len + 1) * (data_column + 1), status['txt'], status['color']) time.sleep(0.5) if odd_or_even % 2 == 0: - stdscr.addstr(0, field_len/2 - 2, " <3 ", curses.color_pair(5)) + stdscr.addstr(0, field_len / 2 - 2, " <3 ", curses.color_pair(5)) else: - stdscr.addstr(0, field_len/2 - 2, " <3 ", curses.color_pair(0)) + stdscr.addstr(0, field_len / 2 - 2, " <3 ", curses.color_pair(0)) stdscr.refresh() # clean up diff --git a/tools/fastbgp/bgp_app_peer.py b/tools/fastbgp/bgp_app_peer.py index a57c64b7ee..a068beee5b 100755 --- a/tools/fastbgp/bgp_app_peer.py +++ b/tools/fastbgp/bgp_app_peer.py @@ -185,7 +185,7 @@ def get_prefixes(odl_ip, port, uri, auth, prefix_base=None, prefix_len=None, if "prefix" in item: prefixes += item + "," prefix_count += 1 - prefixes = prefixes[:len(prefixes)-1] + prefixes = prefixes[:len(prefixes) - 1] logger.debug("prefix_list=%s", prefixes) logger.info("prefix_count=%s", prefix_count) diff --git a/tools/fastbgp/play.py b/tools/fastbgp/play.py index 5551bc8df1..f63155fcf2 100755 --- a/tools/fastbgp/play.py +++ b/tools/fastbgp/play.py @@ -1360,13 +1360,13 @@ class ReadTracker(object): logger.debug("withdrawn_prefix_received: %s", prefix) # total path attribute length total_pa_length_offset = 21 + wdr_length - total_pa_length_hex = msg[total_pa_length_offset:total_pa_length_offset+2] + total_pa_length_hex = msg[total_pa_length_offset:total_pa_length_offset + 2] total_pa_length = int(binascii.b2a_hex(total_pa_length_hex), 16) logger.debug("Total path attribute lenght: 0x%s (%s)", binascii.b2a_hex(total_pa_length_hex), total_pa_length) # path attributes pa_offset = total_pa_length_offset + 2 - pa_hex = msg[pa_offset:pa_offset+total_pa_length] + pa_hex = msg[pa_offset:pa_offset + total_pa_length] logger.debug("Path attributes: 0x%s", binascii.b2a_hex(pa_hex)) self.decode_path_attributes(pa_hex) # network layer reachability information length @@ -1374,7 +1374,7 @@ class ReadTracker(object): logger.debug("Calculated NLRI length: %s", nlri_length) # network layer reachability information nlri_offset = pa_offset + total_pa_length - nlri_hex = msg[nlri_offset:nlri_offset+nlri_length] + nlri_hex = msg[nlri_offset:nlri_offset + nlri_length] logger.debug("NLRI: 0x%s", binascii.b2a_hex(nlri_hex)) nlri_prefix_list = get_prefix_list_from_hex(nlri_hex) logger.debug("NLRI prefix list: %s", nlri_prefix_list) diff --git a/tools/netconf_tools/getter.py b/tools/netconf_tools/getter.py index cac22dd203..7ef251b117 100644 --- a/tools/netconf_tools/getter.py +++ b/tools/netconf_tools/getter.py @@ -171,7 +171,7 @@ while request_count > 0: if len(responses) > 0: result = responses.popleft() if result[0] is None: - print "ERROR|" + result[1]+"|" + print "ERROR|" + result[1] + "|" break runtime = "%5.3f|%5.3f|%5.3f" % result[1] print "%03d|%s|%s|" % (result[0], runtime, result[2]) diff --git a/tools/odl-lispflowmapping-performance-tests/create_map_request_pcap.py b/tools/odl-lispflowmapping-performance-tests/create_map_request_pcap.py index 3e099fbeb1..76d459092c 100755 --- a/tools/odl-lispflowmapping-performance-tests/create_map_request_pcap.py +++ b/tools/odl-lispflowmapping-performance-tests/create_map_request_pcap.py @@ -30,7 +30,7 @@ def generate_eids_random(base, n): eids = [] for i in range(0, n): eids.append(str(netaddr.IPAddress(base) + - random.randint(0, (n-1)*increment))) + random.randint(0, (n - 1) * increment))) return eids @@ -44,7 +44,7 @@ def generate_eids_sequential(base, n): """ eids = [] for i in range(0, n): - eids.append(str(netaddr.IPAddress(base) + i*increment)) + eids.append(str(netaddr.IPAddress(base) + i * increment)) return eids diff --git a/tools/odl-mdsal-clustering-tests/clustering-functional-test/crud.py b/tools/odl-mdsal-clustering-tests/clustering-functional-test/crud.py index 4f8124995e..0e1f66a7cd 100644 --- a/tools/odl-mdsal-clustering-tests/clustering-functional-test/crud.py +++ b/tools/odl-mdsal-clustering-tests/clustering-functional-test/crud.py @@ -11,7 +11,7 @@ __email__ = "syedbahm@cisco.com" def addCar(numberOfCars): """Creates the specified number of cars based on Cars yang model using RESTCONF""" - for x in range(1, numberOfCars+1): + for x in range(1, numberOfCars + 1): strId = str(x) payload = settings.add_car_payload_template.substitute( id=strId, category="category" + strId, model="model" + strId, @@ -50,7 +50,7 @@ def addPerson(numberOfPersons): return genderToggle = "Male" - for x in range(1, numberOfPersons+1): + for x in range(1, numberOfPersons + 1): if(genderToggle == "Male"): genderToggle = "Female" else: diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py index 04a9fc3a80..5bad661eb1 100755 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py @@ -30,7 +30,7 @@ def monitor_stats(crawler, monitortime, period): yield (actualtime, crawler.nodes, crawler.reported_flows, crawler.found_flows) if actualtime > basetime + monitortime: break - time.sleep(period-get_time_delta(actualtime, lastcrawl)) + time.sleep(period - get_time_delta(actualtime, lastcrawl)) if __name__ == "__main__": diff --git a/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py b/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py index 1309fefab7..289624ddd5 100644 --- a/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py +++ b/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py @@ -84,12 +84,12 @@ def get_inventory(tnum, url, hdrs, rnum, cond): results[r.status_code] = 1 total = sum(results.values()) - rate = total/t.secs + rate = total / t.secs total_requests.increment(total) total_req_rate.increment(rate) - mbytes = total_len / (1024*1024) - mrate = mbytes/t.secs + mbytes = total_len / (1024 * 1024) + mrate = mbytes / t.secs total_mbytes.increment(mbytes) total_mb_rate.increment(mrate) diff --git a/tools/odl-mdsal-clustering-tests/scripts/cluster_rest_script.py b/tools/odl-mdsal-clustering-tests/scripts/cluster_rest_script.py index 7a6d060a96..27c703362b 100644 --- a/tools/odl-mdsal-clustering-tests/scripts/cluster_rest_script.py +++ b/tools/odl-mdsal-clustering-tests/scripts/cluster_rest_script.py @@ -262,10 +262,10 @@ def _task_executor(preparing_function, odl_ip="127.0.0.1", port="8181", hosts = odl_ip.split(',') nrhosts = len(hosts) - items = [i+1 for i in range(item_count)] + items = [i + 1 for i in range(item_count)] item_groups = [] for i in range(0, item_count, items_per_request): - item_groups.append(items[i:i+items_per_request]) + item_groups.append(items[i:i + items_per_request]) # fill the queue with details needed for one http requests send_queue = Queue.Queue() diff --git a/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py b/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py index 7d46757ef5..3e5b7cce9f 100644 --- a/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py +++ b/tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py @@ -107,7 +107,7 @@ class OvsdbConfigBlaster (object): vswitch_name: { 'name': vswitch_name, 'ip': vswitch_ip, - 'remote-ip': vswitch_remote_ip, + 'remote-ip': vswitch_remote_ip, 'ovsdb-port': vswitch_ovsdb_port, 'node-id': 'ovsdb://%s:%s' % (vswitch_ip, @@ -240,7 +240,7 @@ class OvsdbConfigBlaster (object): ], u"vlan-mode": u"access" } - ] + ] }, # TODO add port-body 'port-body': {}} diff --git a/tools/wcbench/stats.py b/tools/wcbench/stats.py index deb8c78d58..2065064e82 100755 --- a/tools/wcbench/stats.py +++ b/tools/wcbench/stats.py @@ -332,7 +332,7 @@ elif args.graphs: else: graphs_to_build = [] for graph, graph_num in zip(graphs_to_build, range(len(graphs_to_build))): - graph_map[graph](len(graphs_to_build), graph_num+1) + graph_map[graph](len(graphs_to_build), graph_num + 1) # Compute stats if args.all_stats: diff --git a/tox.ini b/tox.ini index dac05bf5e7..85f46013a6 100644 --- a/tox.ini +++ b/tox.ini @@ -12,3 +12,13 @@ commands = flake8 [flake8] show-source = True max-line-length = 120 + +# - Select E121, E123, E126, E226, E241, E242 and E704 which are turned OFF +# by default but represent guidelines accepted by us. +# - Do not select E133 because it is incompatible with E123 which was +# selected instead. +# - It turns out that now all checks except E133 are enabled so the select +# can be simplified to be just "E,W". However a new version could change +# that (select E133 instead of E123) but that should be caught by the +# verify job. +select = E,W