Make pep8 more picky 90/35690/4
authorJozef Behran <jbehran@cisco.com>
Thu, 3 Mar 2016 14:30:27 +0000 (15:30 +0100)
committerGerrit Code Review <gerrit@opendaylight.org>
Sun, 6 Mar 2016 20:40:38 +0000 (20:40 +0000)
Several guidelines were disabled and as a result I was
slipping into my old coding style, incompatible with the
Python sources here. Enabling them turned out to uncover I
was not the only one slipping. As a matter of fact all
checks are enabled now except E133 which is not compatible
with E123.

Change-Id: I754c740f49a213360df5b187897e776266e6653f
Signed-off-by: Jozef Behran <jbehran@cisco.com>
23 files changed:
csit/libraries/ClusterStateLibrary.py
csit/libraries/Common.py
csit/libraries/CrudLibrary.py
csit/libraries/ScaleClient.py
csit/libraries/Topology.py
csit/libraries/Topologynew.py
csit/libraries/UtilLibrary.py
csit/libraries/XmlComparator.py
csit/libraries/ipaddr.py
csit/suites/vpnservice/custom.py
tools/clustering/cluster-deployer/deploy.py
tools/clustering/cluster-monitor/monitor.py
tools/fastbgp/bgp_app_peer.py
tools/fastbgp/play.py
tools/netconf_tools/getter.py
tools/odl-lispflowmapping-performance-tests/create_map_request_pcap.py
tools/odl-mdsal-clustering-tests/clustering-functional-test/crud.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py
tools/odl-mdsal-clustering-tests/scripts/cluster_rest_script.py
tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py
tools/wcbench/stats.py
tox.ini

index dd006cb56a9b26d41ba6bdb852e55bec462003dd..ab5a643bb29d97a7ffb88f1ca612e5ef93cecd7d 100644 (file)
@@ -60,7 +60,7 @@ def getClusterRoles(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesI
     return dict
 
 
     return dict
 
 
-def isRole(role,  shardName, ipAddress, numOfShards=3, numOfRetries=1, sleepFor=3, port=8181):
+def isRole(role, shardName, ipAddress, numOfShards=3, numOfRetries=1, sleepFor=3, port=8181):
     """Given a role (Leader, Follower, Candidate, or IsolatedLeader),
     shardname (e.g. shard-inventory-config), controller IP address,
     and number of shards on the controller,this function determines if the controller,
     """Given a role (Leader, Follower, Candidate, or IsolatedLeader),
     shardname (e.g. shard-inventory-config), controller IP address,
     and number of shards on the controller,this function determines if the controller,
index 424320c5bad4ec0055941224e22189a9f70b2dae..7cd9a0e30e464fdea094e3045bf18fe523e05471 100644 (file)
@@ -77,7 +77,7 @@ def num_of_links_for_node(nodeid, leaflist, fanout):
     '''
     if nodeid in leaflist:
         return 1
     '''
     if nodeid in leaflist:
         return 1
-    return (fanout+1)
+    return (fanout + 1)
 
 if __name__ == '__main__':
     print(num_of_nodes(3, 4))
 
 if __name__ == '__main__':
     print(num_of_nodes(3, 4))
index 6198ddd7a08915d5bb34683ea1dd940ae95e65da..5fdf77db2336c7d7ebd1f4443098f3963765abbe 100644 (file)
@@ -68,7 +68,7 @@ def addPerson(hostname, port, numberOfPersons, *expected):
         return resp
 
     genderToggle = "Male"
         return resp
 
     genderToggle = "Male"
-    for x in range(1, numberOfPersons+1):
+    for x in range(1, numberOfPersons + 1):
         if(genderToggle == "Male"):
             genderToggle = "Female"
         else:
         if(genderToggle == "Male"):
             genderToggle = "Female"
         else:
@@ -113,7 +113,7 @@ def addCarPerson(hostname, port, numberOfCarPersons):
 
         return resp
 
 
         return resp
 
-    for x in range(1, numberOfCarPersons+1):
+    for x in range(1, numberOfCarPersons + 1):
         strId = str(x)
 
         payload = SettingsLibrary.add_car_person_template.substitute(Id=strId, personId="user" + strId)
         strId = str(x)
 
         payload = SettingsLibrary.add_car_person_template.substitute(Id=strId, personId="user" + strId)
@@ -143,8 +143,8 @@ def buyCar(hostname, port, numberOfCarBuyers, start=0):
     """
 
     print "Buying " + str(numberOfCarBuyers) + " Cars"
     """
 
     print "Buying " + str(numberOfCarBuyers) + " Cars"
-    for x in range(start, start+numberOfCarBuyers):
-        strId = str(x+1)
+    for x in range(start, start + numberOfCarBuyers):
+        strId = str(x + 1)
 
         payload = SettingsLibrary.buy_car_rpc_template.substitute(personId="user" + strId, carId=strId)
 
 
         payload = SettingsLibrary.buy_car_rpc_template.substitute(personId="user" + strId, carId=strId)
 
index d7ef8ec82893e75e365eb43c452ecabd4f1ab606..fd9f39f1eed7fad5ffd009872de310bedf220f23 100644 (file)
@@ -141,7 +141,7 @@ def _randomize(spread, maxn):
     while True:
         if spread == 'gauss':
             ga = abs(random.gauss(0, 1))
     while True:
         if spread == 'gauss':
             ga = abs(random.gauss(0, 1))
-            rv = int(ga*float(maxn)/3)
+            rv = int(ga * float(maxn) / 3)
             if rv < maxn:
                 return rv
         elif spread == 'linear':
             if rv < maxn:
                 return rv
         elif spread == 'linear':
@@ -160,7 +160,7 @@ def generate_new_flow_details(flows=10, switches=1, swspread='gauss', tables=250
     It also returns a dictionary with statsistics."""
     swflows = [_randomize(swspread, switches) for f in range(int(flows))]
     # we have to increse the switch index because mininet start indexing switches from 1 (not 0)
     It also returns a dictionary with statsistics."""
     swflows = [_randomize(swspread, switches) for f in range(int(flows))]
     # we have to increse the switch index because mininet start indexing switches from 1 (not 0)
-    fltables = [(s+1, _randomize(tabspread, tables), idx) for idx, s in enumerate(swflows)]
+    fltables = [(s + 1, _randomize(tabspread, tables), idx) for idx, s in enumerate(swflows)]
     notes = _get_notes(fltables)
     return fltables, notes
 
     notes = _get_notes(fltables)
     return fltables, notes
 
index 11a5c71fecc1e7252e534654337afabea56885fb..dad09ce08b2d8b5a4f4ac5d2f8887fa478c5f868 100644 (file)
@@ -16,7 +16,7 @@ class Topology(object):
         [{u'type': u'OF', u'id': u'00:00:00:00:00:00:00:01'},
          {u'type': u'OF', u'id': u'00:00:00:00:00:00:00:02'},
          {u'type': u'OF', u'id': u'00:00:00:00:00:00:00:03'}]
         [{u'type': u'OF', u'id': u'00:00:00:00:00:00:00:01'},
          {u'type': u'OF', u'id': u'00:00:00:00:00:00:00:02'},
          {u'type': u'OF', u'id': u'00:00:00:00:00:00:00:03'}]
-        ]
+    ]
 
     def __init__(self):
         self.builtin = BuiltIn()
 
     def __init__(self):
         self.builtin = BuiltIn()
index 1ab31f3593f9b4df4044508bc8f21969b3ddd0cb..b3c6020f412443a567b3d3ef313c704777676e6c 100644 (file)
@@ -20,7 +20,7 @@ class Topologynew(object):
         [{u'type': u'MD_SAL', u'id': u'openflow:1'},
          {u'type': u'MD_SAL', u'id': u'openflow:2'},
          {u'type': u'MD_SAL', u'id': u'openflow:3'}]
         [{u'type': u'MD_SAL', u'id': u'openflow:1'},
          {u'type': u'MD_SAL', u'id': u'openflow:2'},
          {u'type': u'MD_SAL', u'id': u'openflow:3'}]
-        ]
+    ]
 
     def __init__(self):
         self.builtin = BuiltIn()
 
     def __init__(self):
         self.builtin = BuiltIn()
@@ -71,7 +71,7 @@ class Topologynew(object):
 
         num_nodes = Common.num_of_nodes(depth, fanout)
         nodelist = []
 
         num_nodes = Common.num_of_nodes(depth, fanout)
         nodelist = []
-        for i in xrange(1, num_nodes+1):
+        for i in xrange(1, num_nodes + 1):
             temp = {"id": "00:00:00:00:00:00:00:%s" % format(i, '02x'), "type": "OF"}
             nodelist.append(temp)
         if int(exceptroot):
             temp = {"id": "00:00:00:00:00:00:00:%s" % format(i, '02x'), "type": "OF"}
             nodelist.append(temp)
         if int(exceptroot):
@@ -87,7 +87,7 @@ class Topologynew(object):
         @return     leafnodes:  list of ids of leaf nodes
         '''
         leafnodes = []
         @return     leafnodes:  list of ids of leaf nodes
         '''
         leafnodes = []
-        self._enumerate_nodes(0, 1, 1, fanout, depth-1, leafnodes)
+        self._enumerate_nodes(0, 1, 1, fanout, depth - 1, leafnodes)
         return leafnodes
 
     def _enumerate_nodes(self, currentdepth, nodeid, currentbranch, fanout, depth, leafnodes):
         return leafnodes
 
     def _enumerate_nodes(self, currentdepth, nodeid, currentbranch, fanout, depth, leafnodes):
@@ -95,8 +95,8 @@ class Topologynew(object):
             leafnodes.append("00:00:00:00:00:00:00:%s" % format(nodeid, '02x'))
             return 1
         nodes = 1
             leafnodes.append("00:00:00:00:00:00:00:%s" % format(nodeid, '02x'))
             return 1
         nodes = 1
-        for i in xrange(1,  fanout+1):
-            nodes += self._enumerate_nodes(currentdepth+1, nodeid+nodes, i, fanout, depth, leafnodes)
+        for i in xrange(1, fanout + 1):
+            nodes += self._enumerate_nodes(currentdepth + 1, nodeid + nodes, i, fanout, depth, leafnodes)
         return nodes
 
 if __name__ == '__main__':
         return nodes
 
 if __name__ == '__main__':
index 7ded295799b52051f35895458a84f5f6d69f69a0..dcd3d662b4651b94223446f95c5a2aa9864e5a90 100644 (file)
@@ -69,11 +69,11 @@ def post(url, userId, password, data):
     if password is None:
         password = 'admin'
 
     if password is None:
         password = 'admin'
 
-    print("post request with url "+url)
-    print("post request with data "+data)
+    print("post request with url " + url)
+    print("post request with data " + data)
     headers = {}
     headers['Content-Type'] = 'application/json'
     headers = {}
     headers['Content-Type'] = 'application/json'
-    # headers['Accept']= 'application/xml'
+    # headers['Accept'] = 'application/xml'
     session = _cache.switch("CLUSTERING_POST")
     resp = session.post(url, data.encode('utf-8'), headers=headers, auth=(userId, password))
 
     session = _cache.switch("CLUSTERING_POST")
     resp = session.post(url, data.encode('utf-8'), headers=headers, auth=(userId, password))
 
@@ -91,7 +91,7 @@ def delete(url, userId='admin', password='admin'):
         "Use the Robot RequestsLibrary rather than this. See DatastoreCRUD.robot for examples",
         DeprecationWarning
     )
         "Use the Robot RequestsLibrary rather than this. See DatastoreCRUD.robot for examples",
         DeprecationWarning
     )
-    print("delete all resources belonging to url"+url)
+    print("delete all resources belonging to url" + url)
     session = _cache.switch("CLUSTERING_DELETE")
     resp = session.delete(url, auth=(userId, password))  # noqa
 
     session = _cache.switch("CLUSTERING_DELETE")
     resp = session.delete(url, auth=(userId, password))  # noqa
 
@@ -148,7 +148,7 @@ def wait_for_controller_up(ip, port="8181"):
 def startAllControllers(username, password, karafhome, port, *ips):
     # Start all controllers
     for ip in ips:
 def startAllControllers(username, password, karafhome, port, *ips):
     # Start all controllers
     for ip in ips:
-        execute_ssh_command(ip, username, password, karafhome+"/bin/start")
+        execute_ssh_command(ip, username, password, karafhome + "/bin/start")
 
     # Wait for all of them to be up
     for ip in ips:
 
     # Wait for all of them to be up
     for ip in ips:
@@ -170,7 +170,7 @@ def stopcontroller(ip, username, password, karafhome):
 
 
 def executeStopController(ip, username, password, karafhome):
 
 
 def executeStopController(ip, username, password, karafhome):
-    execute_ssh_command(ip, username, password, karafhome+"/bin/stop")
+    execute_ssh_command(ip, username, password, karafhome + "/bin/stop")
 
 
 def stopAllControllers(username, password, karafhome, *ips):
 
 
 def stopAllControllers(username, password, karafhome, *ips):
@@ -224,7 +224,7 @@ def isolate_controller(controllers, username, password, isolated):
     :param isolated: Number (starting at one) of the controller to be isolated.
     :return: If successful, returns "pass", otherwise returns the last failed IPTables text.
     """
     :param isolated: Number (starting at one) of the controller to be isolated.
     :return: If successful, returns "pass", otherwise returns the last failed IPTables text.
     """
-    isolated_controller = controllers[isolated-1]
+    isolated_controller = controllers[isolated - 1]
     for controller in controllers:
         if controller != isolated_controller:
             base_str = 'sudo iptables -I OUTPUT -p all --source '
     for controller in controllers:
         if controller != isolated_controller:
             base_str = 'sudo iptables -I OUTPUT -p all --source '
@@ -256,7 +256,7 @@ def rejoin_controller(controllers, username, password, isolated):
     :param isolated: Number (starting at one) of the isolated controller isolated.
     :return: If successful, returns "pass", otherwise returns the last failed IPTables text.
     """
     :param isolated: Number (starting at one) of the isolated controller isolated.
     :return: If successful, returns "pass", otherwise returns the last failed IPTables text.
     """
-    isolated_controller = controllers[isolated-1]
+    isolated_controller = controllers[isolated - 1]
     for controller in controllers:
         if controller != isolated_controller:
             base_str = 'sudo iptables -D OUTPUT -p all --source '
     for controller in controllers:
         if controller != isolated_controller:
             base_str = 'sudo iptables -D OUTPUT -p all --source '
index 7bd8fff2fdf01835b5dd07721e1f7ca85e88371f..2e5a3ea4830f147b55aabe62f77c765818165bc8 100644 (file)
@@ -284,7 +284,7 @@ class XmlComparator:
             if nodeDict['flow']['priority'] == origDict['flow']['priority']:
                 for p in IGNORED_PATHS_FOR_OC:
                     td = copy.copy(origDict)
             if nodeDict['flow']['priority'] == origDict['flow']['priority']:
                 for p in IGNORED_PATHS_FOR_OC:
                     td = copy.copy(origDict)
-                    _rem_unimplemented_tags(p[0], p[1],  td)
+                    _rem_unimplemented_tags(p[0], p[1], td)
                     for (p, t, v) in TAGS_TO_ADD_FOR_OC:
                         _add_tags(p, t, v, td)
                     for (p, t, rt) in TAGS_TO_MODIFY_FOR_OC:
                     for (p, t, v) in TAGS_TO_ADD_FOR_OC:
                         _add_tags(p, t, v, td)
                     for (p, t, rt) in TAGS_TO_MODIFY_FOR_OC:
index 8f0260750258c02fcd326c1741c14bde1df9708b..36a23c7ce4bff9fae0c751b6c732fa918e625729 100644 (file)
@@ -1565,7 +1565,7 @@ class _BaseV6(object):
         hex_str = '%032x' % ip_int
         hextets = []
         for x in range(0, 32, 4):
         hex_str = '%032x' % ip_int
         hextets = []
         for x in range(0, 32, 4):
-            hextets.append('%x' % int(hex_str[x:x+4], 16))
+            hextets.append('%x' % int(hex_str[x:x + 4], 16))
 
         hextets = self._compress_hextets(hextets)
         return ':'.join(hextets)
 
         hextets = self._compress_hextets(hextets)
         return ':'.join(hextets)
index 64746be533a919bcc9967cca24e3dbe09678f418..2d07a47e643be2bb627fc70abf57ef8494e29a89 100755 (executable)
@@ -22,7 +22,7 @@ class Switch1(Topo):
         switch = self.addSwitch('s1')
         n = 2
         for h in range(n):
         switch = self.addSwitch('s1')
         n = 2
         for h in range(n):
-            host = self.addHost('h%s' % (h + 1), mac="00:00:00:00:00:0"+str(h+1), ip="10.0.0."+str(h+1))
+            host = self.addHost('h%s' % (h + 1), mac="00:00:00:00:00:0" + str(h + 1), ip="10.0.0." + str(h + 1))
             self.addLink(host, switch)
 
 
             self.addLink(host, switch)
 
 
@@ -33,7 +33,7 @@ class Switch2(Topo):
         switch = self.addSwitch('s2')
         n = 2
         for h in range(n):
         switch = self.addSwitch('s2')
         n = 2
         for h in range(n):
-            host = self.addHost('h%s' % (h + 3), mac="00:00:00:00:00:0"+str(h+3), ip="10.0.0."+str(h+3))
+            host = self.addHost('h%s' % (h + 3), mac="00:00:00:00:00:0" + str(h + 3), ip="10.0.0." + str(h + 3))
             self.addLink(host, switch)
 
 topos = {'Switch1': (lambda: Switch1()),
             self.addLink(host, switch)
 
 topos = {'Switch1': (lambda: Switch1()),
index a948a1bc0b7cbc4f8456724d51408cc93234679b..a885da5c74a3f6256e164af8a50a5a608502001c 100755 (executable)
@@ -140,7 +140,7 @@ class Deployer:
                                  self.rootdir)
 
     def kill_controller(self):
                                  self.rootdir)
 
     def kill_controller(self):
-        self.remote.copy_file("kill_controller.sh",  self.rootdir + "/")
+        self.remote.copy_file("kill_controller.sh", self.rootdir + "/")
         self.remote.exec_cmd(self.rootdir + "/kill_controller.sh")
 
     def deploy(self):
         self.remote.exec_cmd(self.rootdir + "/kill_controller.sh")
 
     def deploy(self):
@@ -266,10 +266,10 @@ def main():
 
     for x in range(0, 10):
         if len(all_replicas) > args.rf:
 
     for x in range(0, 10):
         if len(all_replicas) > args.rf:
-            replicas["REPLICAS_" + str(x+1)] \
+            replicas["REPLICAS_" + str(x + 1)] \
                 = array_str(random.sample(all_replicas, args.rf))
         else:
                 = array_str(random.sample(all_replicas, args.rf))
         else:
-            replicas["REPLICAS_" + str(x+1)] = array_str(all_replicas)
+            replicas["REPLICAS_" + str(x + 1)] = array_str(all_replicas)
 
     deployers = []
 
 
     deployers = []
 
index 26a9d7a80678acd82a0a7f337bd02ed232d99d8f..4df556bb27e5e5c2a4583ba80d5faf01a99d1fb0 100644 (file)
@@ -131,11 +131,11 @@ for controller in controllers:
 
     # collect shards found in any controller; does not require all controllers to have the same shards
     for localShard in data['value']['LocalShards']:
 
     # collect shards found in any controller; does not require all controllers to have the same shards
     for localShard in data['value']['LocalShards']:
-        shardName = localShard[(localShard.find("-shard-")+7):localShard.find("-config")]
+        shardName = localShard[(localShard.find("-shard-") + 7):localShard.find("-config")]
         Shards.add(shardName)
 print controller_names
 print Shards
         Shards.add(shardName)
 print controller_names
 print Shards
-field_len = max(map(len, Shards))+2
+field_len = max(map(len, Shards)) + 2
 
 stdscr = curses.initscr()
 curses.noecho()
 
 stdscr = curses.initscr()
 curses.noecho()
@@ -155,7 +155,7 @@ curses.init_pair(5, curses.COLOR_BLACK, curses.COLOR_YELLOW)
 for row, controller in enumerate(controller_names):
     stdscr.addstr(row + 1, 0, string.center(controller, field_len), curses.color_pair(1))
 for data_column, shard in enumerate(Shards):
 for row, controller in enumerate(controller_names):
     stdscr.addstr(row + 1, 0, string.center(controller, field_len), curses.color_pair(1))
 for data_column, shard in enumerate(Shards):
-    stdscr.addstr(0, (field_len+1) * (data_column + 1), string.center(shard, field_len), curses.color_pair(1))
+    stdscr.addstr(0, (field_len + 1) * (data_column + 1), string.center(shard, field_len), curses.color_pair(1))
 stdscr.addstr(len(Shards) + 2, 0, 'Press q to quit.', curses.color_pair(1))
 stdscr.refresh()
 
 stdscr.addstr(len(Shards) + 2, 0, 'Press q to quit.', curses.color_pair(1))
 stdscr.refresh()
 
@@ -170,12 +170,12 @@ while key != ord('q') and key != ord('Q'):
         cluster_stat = getClusterRolesWithCurl(shard_name, controllers, controller_names)
         for row, controller in enumerate(controllers):
             status = size_and_color(cluster_stat, field_len, controller)
         cluster_stat = getClusterRolesWithCurl(shard_name, controllers, controller_names)
         for row, controller in enumerate(controllers):
             status = size_and_color(cluster_stat, field_len, controller)
-            stdscr.addstr(row + 1, (field_len+1) * (data_column + 1), status['txt'], status['color'])
+            stdscr.addstr(row + 1, (field_len + 1) * (data_column + 1), status['txt'], status['color'])
     time.sleep(0.5)
     if odd_or_even % 2 == 0:
     time.sleep(0.5)
     if odd_or_even % 2 == 0:
-        stdscr.addstr(0, field_len/2 - 2, " <3 ", curses.color_pair(5))
+        stdscr.addstr(0, field_len / 2 - 2, " <3 ", curses.color_pair(5))
     else:
     else:
-        stdscr.addstr(0, field_len/2 - 2, " <3 ", curses.color_pair(0))
+        stdscr.addstr(0, field_len / 2 - 2, " <3 ", curses.color_pair(0))
     stdscr.refresh()
 
 # clean up
     stdscr.refresh()
 
 # clean up
index a57c64b7eeb2fb3d7d7676db263475c2aae0f39c..a068beee5bc0f7cee07d7df785f66585e56684c5 100755 (executable)
@@ -185,7 +185,7 @@ def get_prefixes(odl_ip, port, uri, auth, prefix_base=None, prefix_len=None,
             if "prefix" in item:
                 prefixes += item + ","
                 prefix_count += 1
             if "prefix" in item:
                 prefixes += item + ","
                 prefix_count += 1
-        prefixes = prefixes[:len(prefixes)-1]
+        prefixes = prefixes[:len(prefixes) - 1]
         logger.debug("prefix_list=%s", prefixes)
         logger.info("prefix_count=%s", prefix_count)
 
         logger.debug("prefix_list=%s", prefixes)
         logger.info("prefix_count=%s", prefix_count)
 
index 5551bc8df1480839bdd63afdc4ccb388041dd913..f63155fcf25975d9a5da8c6527f7e64650bbd8bd 100755 (executable)
@@ -1360,13 +1360,13 @@ class ReadTracker(object):
                 logger.debug("withdrawn_prefix_received: %s", prefix)
             # total path attribute length
             total_pa_length_offset = 21 + wdr_length
                 logger.debug("withdrawn_prefix_received: %s", prefix)
             # total path attribute length
             total_pa_length_offset = 21 + wdr_length
-            total_pa_length_hex = msg[total_pa_length_offset:total_pa_length_offset+2]
+            total_pa_length_hex = msg[total_pa_length_offset:total_pa_length_offset + 2]
             total_pa_length = int(binascii.b2a_hex(total_pa_length_hex), 16)
             logger.debug("Total path attribute lenght: 0x%s (%s)",
                          binascii.b2a_hex(total_pa_length_hex), total_pa_length)
             # path attributes
             pa_offset = total_pa_length_offset + 2
             total_pa_length = int(binascii.b2a_hex(total_pa_length_hex), 16)
             logger.debug("Total path attribute lenght: 0x%s (%s)",
                          binascii.b2a_hex(total_pa_length_hex), total_pa_length)
             # path attributes
             pa_offset = total_pa_length_offset + 2
-            pa_hex = msg[pa_offset:pa_offset+total_pa_length]
+            pa_hex = msg[pa_offset:pa_offset + total_pa_length]
             logger.debug("Path attributes: 0x%s", binascii.b2a_hex(pa_hex))
             self.decode_path_attributes(pa_hex)
             # network layer reachability information length
             logger.debug("Path attributes: 0x%s", binascii.b2a_hex(pa_hex))
             self.decode_path_attributes(pa_hex)
             # network layer reachability information length
@@ -1374,7 +1374,7 @@ class ReadTracker(object):
             logger.debug("Calculated NLRI length: %s", nlri_length)
             # network layer reachability information
             nlri_offset = pa_offset + total_pa_length
             logger.debug("Calculated NLRI length: %s", nlri_length)
             # network layer reachability information
             nlri_offset = pa_offset + total_pa_length
-            nlri_hex = msg[nlri_offset:nlri_offset+nlri_length]
+            nlri_hex = msg[nlri_offset:nlri_offset + nlri_length]
             logger.debug("NLRI: 0x%s", binascii.b2a_hex(nlri_hex))
             nlri_prefix_list = get_prefix_list_from_hex(nlri_hex)
             logger.debug("NLRI prefix list: %s", nlri_prefix_list)
             logger.debug("NLRI: 0x%s", binascii.b2a_hex(nlri_hex))
             nlri_prefix_list = get_prefix_list_from_hex(nlri_hex)
             logger.debug("NLRI prefix list: %s", nlri_prefix_list)
index cac22dd203f9cc01d5b71864ca43111ddbcd8378..7ef251b1170593bb222d118e2e884bdbb9ba9a06 100644 (file)
@@ -171,7 +171,7 @@ while request_count > 0:
     if len(responses) > 0:
         result = responses.popleft()
         if result[0] is None:
     if len(responses) > 0:
         result = responses.popleft()
         if result[0] is None:
-            print "ERROR|" + result[1]+"|"
+            print "ERROR|" + result[1] + "|"
             break
         runtime = "%5.3f|%5.3f|%5.3f" % result[1]
         print "%03d|%s|%s|" % (result[0], runtime, result[2])
             break
         runtime = "%5.3f|%5.3f|%5.3f" % result[1]
         print "%03d|%s|%s|" % (result[0], runtime, result[2])
index 3e099fbeb1aec7dd94f67fc6b37a5906788a9976..76d459092c4ff9e44c5794a0b68a18d8ae1e037a 100755 (executable)
@@ -30,7 +30,7 @@ def generate_eids_random(base, n):
     eids = []
     for i in range(0, n):
         eids.append(str(netaddr.IPAddress(base) +
     eids = []
     for i in range(0, n):
         eids.append(str(netaddr.IPAddress(base) +
-                        random.randint(0, (n-1)*increment)))
+                        random.randint(0, (n - 1) * increment)))
     return eids
 
 
     return eids
 
 
@@ -44,7 +44,7 @@ def generate_eids_sequential(base, n):
     """
     eids = []
     for i in range(0, n):
     """
     eids = []
     for i in range(0, n):
-        eids.append(str(netaddr.IPAddress(base) + i*increment))
+        eids.append(str(netaddr.IPAddress(base) + i * increment))
     return eids
 
 
     return eids
 
 
index 4f8124995ea6bec9338d45bce563109ff8ae7c51..0e1f66a7cd7d12a8ebc9f9c2182d71a3b7418832 100644 (file)
@@ -11,7 +11,7 @@ __email__ = "syedbahm@cisco.com"
 
 def addCar(numberOfCars):
     """Creates the specified number of cars based on Cars yang model using RESTCONF"""
 
 def addCar(numberOfCars):
     """Creates the specified number of cars based on Cars yang model using RESTCONF"""
-    for x in range(1, numberOfCars+1):
+    for x in range(1, numberOfCars + 1):
         strId = str(x)
         payload = settings.add_car_payload_template.substitute(
             id=strId, category="category" + strId, model="model" + strId,
         strId = str(x)
         payload = settings.add_car_payload_template.substitute(
             id=strId, category="category" + strId, model="model" + strId,
@@ -50,7 +50,7 @@ def addPerson(numberOfPersons):
         return
 
     genderToggle = "Male"
         return
 
     genderToggle = "Male"
-    for x in range(1, numberOfPersons+1):
+    for x in range(1, numberOfPersons + 1):
         if(genderToggle == "Male"):
             genderToggle = "Female"
         else:
         if(genderToggle == "Male"):
             genderToggle = "Female"
         else:
index 04a9fc3a80f6200e64b20c935073a5bf1e3431df..5bad661eb175ac84e5092bae4194a872071aea86 100755 (executable)
@@ -30,7 +30,7 @@ def monitor_stats(crawler, monitortime, period):
         yield (actualtime, crawler.nodes, crawler.reported_flows, crawler.found_flows)
         if actualtime > basetime + monitortime:
             break
         yield (actualtime, crawler.nodes, crawler.reported_flows, crawler.found_flows)
         if actualtime > basetime + monitortime:
             break
-        time.sleep(period-get_time_delta(actualtime, lastcrawl))
+        time.sleep(period - get_time_delta(actualtime, lastcrawl))
 
 
 if __name__ == "__main__":
 
 
 if __name__ == "__main__":
index 1309fefab78b190398e97800e5123cd90701631b..289624ddd5a4243b7400a258c2a23c6862b2a62c 100644 (file)
@@ -84,12 +84,12 @@ def get_inventory(tnum, url, hdrs, rnum, cond):
                 results[r.status_code] = 1
 
     total = sum(results.values())
                 results[r.status_code] = 1
 
     total = sum(results.values())
-    rate = total/t.secs
+    rate = total / t.secs
     total_requests.increment(total)
     total_req_rate.increment(rate)
 
     total_requests.increment(total)
     total_req_rate.increment(rate)
 
-    mbytes = total_len / (1024*1024)
-    mrate = mbytes/t.secs
+    mbytes = total_len / (1024 * 1024)
+    mrate = mbytes / t.secs
     total_mbytes.increment(mbytes)
     total_mb_rate.increment(mrate)
 
     total_mbytes.increment(mbytes)
     total_mb_rate.increment(mrate)
 
index 7a6d060a964ba8648b3a0bd9111eba173d1253d7..27c703362b70ed5f5daf46eb6ca43fec5820d5e7 100644 (file)
@@ -262,10 +262,10 @@ def _task_executor(preparing_function, odl_ip="127.0.0.1", port="8181",
     hosts = odl_ip.split(',')
     nrhosts = len(hosts)
 
     hosts = odl_ip.split(',')
     nrhosts = len(hosts)
 
-    items = [i+1 for i in range(item_count)]
+    items = [i + 1 for i in range(item_count)]
     item_groups = []
     for i in range(0, item_count, items_per_request):
     item_groups = []
     for i in range(0, item_count, items_per_request):
-        item_groups.append(items[i:i+items_per_request])
+        item_groups.append(items[i:i + items_per_request])
 
     # fill the queue with details needed for one http requests
     send_queue = Queue.Queue()
 
     # fill the queue with details needed for one http requests
     send_queue = Queue.Queue()
index 7d46757ef5a886b744470f135f8e6f6f20cce17f..3e5b7cce9f792902f9bec33576db21c8b7b29462 100644 (file)
@@ -107,7 +107,7 @@ class OvsdbConfigBlaster (object):
             vswitch_name: {
                 'name': vswitch_name,
                 'ip': vswitch_ip,
             vswitch_name: {
                 'name': vswitch_name,
                 'ip': vswitch_ip,
-                'remote-ip':  vswitch_remote_ip,
+                'remote-ip': vswitch_remote_ip,
                 'ovsdb-port': vswitch_ovsdb_port,
                 'node-id': 'ovsdb://%s:%s'
                            % (vswitch_ip,
                 'ovsdb-port': vswitch_ovsdb_port,
                 'node-id': 'ovsdb://%s:%s'
                            % (vswitch_ip,
@@ -240,7 +240,7 @@ class OvsdbConfigBlaster (object):
                             ],
                             u"vlan-mode": u"access"
                         }
                             ],
                             u"vlan-mode": u"access"
                         }
-                        ]
+                    ]
                 },
                     # TODO add port-body
                     'port-body': {}}
                 },
                     # TODO add port-body
                     'port-body': {}}
index deb8c78d58dc9a857041b3145b4e81762c774ebc..2065064e82a998d23086315cd63f88ef65909b5a 100755 (executable)
@@ -332,7 +332,7 @@ elif args.graphs:
 else:
     graphs_to_build = []
 for graph, graph_num in zip(graphs_to_build, range(len(graphs_to_build))):
 else:
     graphs_to_build = []
 for graph, graph_num in zip(graphs_to_build, range(len(graphs_to_build))):
-    graph_map[graph](len(graphs_to_build), graph_num+1)
+    graph_map[graph](len(graphs_to_build), graph_num + 1)
 
 # Compute stats
 if args.all_stats:
 
 # Compute stats
 if args.all_stats:
diff --git a/tox.ini b/tox.ini
index dac05bf5e7aaf826b461e713a2e090c3cb46b833..85f46013a6c35c44e14aafdea4eaf6756084b407 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -12,3 +12,13 @@ commands = flake8
 [flake8]
 show-source = True
 max-line-length = 120
 [flake8]
 show-source = True
 max-line-length = 120
+
+# - Select E121, E123, E126, E226, E241, E242 and E704 which are turned OFF
+#   by default but represent guidelines accepted by us.
+# - Do not select E133 because it is incompatible with E123 which was
+#   selected instead.
+# - It turns out that now all checks except E133 are enabled so the select
+#   can be simplified to be just "E,W". However a new version could change
+#   that (select E133 instead of E123) but that should be caught by the
+#   verify job.
+select = E,W