Auto-generated patch by python-black 57/88757/11
authorThanh Ha <zxiiro@gmail.com>
Fri, 27 Mar 2020 19:11:25 +0000 (15:11 -0400)
committerThanh Ha <zxiiro@gmail.com>
Tue, 11 Aug 2020 15:55:32 +0000 (11:55 -0400)
Refer to the next patch in the list before merging this one. This
also removes autopep8 in prep for python-black as both these tools
conflict with each other.

Change-Id: I8adbeea0d42827e9dc341b023e79da2ce389d491
Signed-off-by: Thanh Ha <zxiiro@gmail.com>
134 files changed:
.flake8
.pre-commit-config.yaml
csit/libraries/AAAJsonUtils.py
csit/libraries/ALTO/AltoParser.py
csit/libraries/Appenders/ElasticsearchAppender.py
csit/libraries/AuthStandalone.py
csit/libraries/BGPCEP/ipaddr.py
csit/libraries/BgpRpcClient.py
csit/libraries/CapwapLibrary.py
csit/libraries/ClusterStateLibrary.py
csit/libraries/Common.py
csit/libraries/ConfGen.py
csit/libraries/Counter.py
csit/libraries/CrudLibrary.py
csit/libraries/Docker.py
csit/libraries/DynamicMininet.py
csit/libraries/FlowLib.py
csit/libraries/GbpSxp.py
csit/libraries/IoTDM/ciotdm.py
csit/libraries/IoTDM/client_libs/iot_data_concepts.py
csit/libraries/IoTDM/client_libs/iotdm_it_test_com.py
csit/libraries/IoTDM/client_libs/onem2m_http.py
csit/libraries/IoTDM/client_libs/onem2m_json_primitive.py
csit/libraries/IoTDM/client_libs/onem2m_primitive.py
csit/libraries/IoTDM/client_libs/testing/test_onem2m_http.py
csit/libraries/IoTDM/client_libs/testing/test_onem2m_json_primitive.py
csit/libraries/IoTDM/criotdm.py
csit/libraries/IoTDM/iotdm.py
csit/libraries/IoTDM/iotdm_comm.py
csit/libraries/IoTDM/riotdm.py
csit/libraries/JsonGenerator.py
csit/libraries/MdsalLowlevelPy.py
csit/libraries/MininetTopo/create_fullymesh.py
csit/libraries/MininetTopo/topo-3sw-2host_multipath.py
csit/libraries/MininetTopo/vlan_vtn_test.py
csit/libraries/SFC/SfcUtils.py
csit/libraries/ScaleClient.py
csit/libraries/SettingsLibrary.py
csit/libraries/SwitchClasses/BaseSwitch.py
csit/libraries/SwitchClasses/H3C.py
csit/libraries/SwitchClasses/H3C_5920.py
csit/libraries/SwitchClasses/Ovs.py
csit/libraries/SwitchClasses/ProVision.py
csit/libraries/SwitchClasses/ProVision_3800.py
csit/libraries/SwitchManager.py
csit/libraries/Sxp.py
csit/libraries/Topology.py
csit/libraries/Topologynew.py
csit/libraries/UtilLibrary.py
csit/libraries/VsctlListParser.py
csit/libraries/XmlComparator.py
csit/libraries/backuprestore/JsonDiffTool.py
csit/libraries/backuprestore/backuprestoretest.py
csit/libraries/backuprestore/jsonpathl.py
csit/libraries/netvirt/excepts.py
csit/libraries/norm_json.py
csit/scripts/data_generate.py
csit/scripts/generate_dashVis.py
csit/scripts/generate_searchSourceJSON.py
csit/scripts/generate_uiStateJSON.py
csit/scripts/generate_visState.py
csit/scripts/push_dashboard.py
csit/scripts/push_test_data.py
csit/suites/groupbasedpolicy/GBP/3-node/gbp1/init_scripts/infrastructure_config.py
csit/suites/groupbasedpolicy/GBP/3-node/gbp2-multitenant/init_scripts/infrastructure_config.py
csit/suites/groupbasedpolicy/GBPSFC/6-node/demo-asymmetric-chain/init_scripts/infrastructure_config.py
csit/suites/groupbasedpolicy/GBPSFC/6-node/demo-symmetric-chain/init_scripts/infrastructure_config.py
csit/suites/groupbasedpolicy/common_scripts/infrastructure_launch.py
csit/suites/l2switch/topologies/customtopo.py
csit/suites/lacp/Lacp_Feature_OF13/LACP_custom1.py
csit/suites/vpnservice/custom.py
csit/variables/Variables.py
csit/variables/alto/Variables.py
csit/variables/genius/Modules.py
csit/variables/jsonrpc/odl-jsonrpc-test-read
csit/variables/netvirt/Modules.py
csit/variables/ocpplugin/Variables.py
csit/variables/ofplugin/RpcVariables.py
csit/variables/ovsdb/ovsdb.py
csit/variables/pcepuser/variables.py
csit/variables/sfc/Modules.py
csit/variables/tcpmd5user/variables.py
csit/variables/topoprocessing/TargetFields.py
csit/variables/topoprocessing/Topologies.py
csit/variables/topoprocessing/TopologyRequests.py
csit/variables/vpnservice/Variables.py
csit/variables/vpnservice/configureSwitches.py
csit/variables/vtn/Modules.py
docs/conf.py
tools/clustering/cluster-debugging/transaction-tracking/process.py
tools/clustering/cluster-deployer/deploy.py
tools/clustering/cluster-deployer/remote_host.py
tools/clustering/cluster-deployer/restart.py
tools/clustering/cluster-tools/cluster_check.py
tools/distchanges/changes.py
tools/distchanges/distcompare.py
tools/distchanges/gerritquery.py
tools/distchanges/logg.py
tools/distchanges/tests/test_changes.py
tools/distchanges/tests/test_gerritquery.py
tools/exabgp_files/exarpc.py
tools/fastbgp/bgp_app_peer.py
tools/fastbgp/play.py
tools/mdsal_benchmark/dsbenchmark.py
tools/mdsal_benchmark/ntfbenchmark.py
tools/mdsal_benchmark/rpcbenchmark.py
tools/netconf_tools/configurer.py
tools/netconf_tools/getter.py
tools/odl-lispflowmapping-performance-tests/create_lisp_control_plane_pcap.py
tools/odl-lispflowmapping-performance-tests/mapping_blaster.py
tools/odl-mdsal-clustering-tests/clustering-functional-test/crud.py
tools/odl-mdsal-clustering-tests/clustering-functional-test/settings.py
tools/odl-mdsal-clustering-tests/clustering-functional-test/util.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/create_plot_data_files.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_bulk.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster_fle.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_stats_stability_monitor.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_read_blaster.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/odl_tester.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_stats.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/onos_tester.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/pretty_print.py
tools/odl-mdsal-clustering-tests/clustering-performance-test/shard_perf_test.py
tools/odl-mdsal-clustering-tests/replace_cars.py
tools/odl-mdsal-clustering-tests/scripts/cluster_rest_script.py
tools/odl-ovsdb-performance-tests/ovsdbconfigblaster.py
tools/pcep_updater/updater.py
tools/wcbench/stats.py
tools/wstools/wsreceiver.py

diff --git a/.flake8 b/.flake8
index 5dca78bc9beae9529d42f634285f5fc47bfba039..5071590ef2fec22a90bece36e3effddaf9165524 100644 (file)
--- a/.flake8
+++ b/.flake8
@@ -11,8 +11,9 @@ max-line-length = 120
 #   that (select E133 instead of E123) but that should be caught by the
 #   verify job.
 select = E,W
-ignore = E722,W503 # as of aprox 10/25 some update/change has caused existing code to fail on E722
+
+# E203,E501,W503 are disabled as recommended by python-black.
+ignore = E203,E501,W503
 exclude =
     .git,
-    .tox,
-    docs/conf.py
+    .tox
index 64886a158c9afdac2a7edb73a2afb334fff4ad5d..efc1f7cfe5396e5ca718545254cdb660c521300c 100644 (file)
@@ -11,11 +11,6 @@ repos:
         # so to prevent this conflict we will ignore .robot files in this trailing-whitespace hook
         exclude: '\.robot'
 
-  - repo: https://github.com/pre-commit/mirrors-autopep8
-    rev: v1.4.4
-    hooks:
-      - id: autopep8
-
   - repo: https://github.com/guykisel/pre-commit-robotframework-tidy
     rev: master
     hooks:
index d0000705fe8bcd3d28e86c912f35b5b1eea79b57..750487abedca66e4c94657c75613c82a61d8841c 100644 (file)
@@ -27,15 +27,15 @@ def countnodes(args):
     ctr = 0
 
     try:
-        jsonobj = json.loads(args['jsonblob'])
+        jsonobj = json.loads(args["jsonblob"])
     except KeyError:
         print("countnodes: json blob to parse not found")
         raise
 
-    if 'subnode' in args:
+    if "subnode" in args:
         ctr = len(jsonobj)
-    elif 'category' in args:
-        category_ = args['category'].encode('ascii', 'ignore')
+    elif "category" in args:
+        category_ = args["category"].encode("ascii", "ignore")
         ctr = len(jsonobj[category_])
     else:
         # working with a single record, short-cut and return count of 1
@@ -64,7 +64,7 @@ def fieldcount(pobject, field):
         :returns number_nodes: the correct number of fields you counted
         in the json
     """
-    number_nodes = countnodes({'jsonblob': pobject, 'field': field})
+    number_nodes = countnodes({"jsonblob": pobject, "field": field})
     return number_nodes
 
 
@@ -97,7 +97,7 @@ def subnodecount(pobject, subnode):
         :returns number_nodes: the correct number of fields you counted
         in the json
     """
-    number_nodes = countnodes({'jsonblob': pobject, 'subnode': subnode})
+    number_nodes = countnodes({"jsonblob": pobject, "subnode": subnode})
     return number_nodes
 
 
@@ -135,8 +135,7 @@ def nodecount(pobject, category, node):
         :returns number_nodes: the correct number of fields you counted
         in the json
     """
-    number_nodes = \
-        countnodes({'jsonblob': pobject, 'category': category, 'node': node})
+    number_nodes = countnodes({"jsonblob": pobject, "category": category, "node": node})
     return number_nodes
 
 
@@ -157,32 +156,32 @@ def get_id_by_name(args):
         :returns nodelist: return the first id that has same corresponding name
     """
     try:
-        jsonobj = json.loads(str(args['jsonblob']))
+        jsonobj = json.loads(str(args["jsonblob"]))
     except KeyError:
         print("get_id_by_name: json blob not specified:")
         raise
 
     try:
-        name = args['name']
+        name = args["name"]
     except KeyError:
         print("get_id_by_name: name [usr, domain, role] not specified in args")
         raise
 
-    if 'head' in args:
-        blobkey = args['head']
+    if "head" in args:
+        blobkey = args["head"]
     else:
         # use an empty key when the arg is not specified.  deals with simpler
         # form
-        blobkey = ''
+        blobkey = ""
 
     try:
-        datatype = args['typeval']
+        datatype = args["typeval"]
     except KeyError:
         print("get_id_by_name: need a type arg to process correct name for id")
         raise
 
     try:
-        ncount = args['size']
+        ncount = args["size"]
     except KeyError:
         raise
 
@@ -193,12 +192,12 @@ def get_id_by_name(args):
     if ncount > 0:
         for i in range(ncount):
             # build up some 'lookup' keys, call jsonpath with that key
-            bkey1 = '$.' + blobkey + '[' + str(i) + '].name'
-            typename = datatype + 'id'
-            bkey2 = '$.' + blobkey + '[' + str(i) + '].' + typename
+            bkey1 = "$." + blobkey + "[" + str(i) + "].name"
+            typename = datatype + "id"
+            bkey2 = "$." + blobkey + "[" + str(i) + "]." + typename
 
             # find records with same name
-            name_record = jsonobj[blobkey][i]['name']
+            name_record = jsonobj[blobkey][i]["name"]
             # find corresponding node info, for that name
             node_record = jsonobj[blobkey][i][typename]
 
@@ -229,48 +228,48 @@ def get_attribute_by_id(args):
         to the provided id
     """
     try:
-        jsonobj = json.loads(args['jsonblob'])
+        jsonobj = json.loads(args["jsonblob"])
     except KeyError:
         print("get_attribute_by_id: json blob not specified:")
         raise
 
     try:
-        nodeid = args['id']
+        nodeid = args["id"]
     except KeyError:
         print("get_attribute_by_id: id to look for not specified in parameters")
         raise
 
-    if 'attr' in args:
-        attr = args['attr']
+    if "attr" in args:
+        attr = args["attr"]
     else:
         # If caller does not specify a record attribute to return, then
         # simply default to giving the description of the id you are
         # searching on
-        attr = 'description'
+        attr = "description"
 
-    if 'head' in args:
+    if "head" in args:
         # will be one of roles, users, domains, or empty to process more
         # specific grouping of json data
-        blobkey = args['head']
+        blobkey = args["head"]
     else:
         # use an empty key when the arg is not specified, allows us to
         # process chunk of JSON without the outer layer defining roles,
         # users, domains. (simpler format)
-        blobkey = ''
+        blobkey = ""
 
     try:
-        datatype = args['typeval']
+        datatype = args["typeval"]
     except KeyError:
         print("get_attribute_by_id: need type arg to process name for id")
         raise
 
     try:
-        size = args['size']
+        size = args["size"]
     except KeyError:
         print("get_attribute_by_id: specify number of records we need")
         raise
 
-    typename = datatype + 'id'
+    typename = datatype + "id"
 
     # Loop through the records looking for the nodeid, when found, return
     # the corresponding attribute value
@@ -280,10 +279,10 @@ def get_attribute_by_id(args):
         for i in range(ncount):
 
             try:
-                name_record = jsonobj[blobkey][i]['name']
+                name_record = jsonobj[blobkey][i]["name"]
                 node_record = jsonobj[blobkey][i][typename]
             except Exception:
-                name_record = jsonobj['name']
+                name_record = jsonobj["name"]
                 node_record = jsonobj[typename]
 
             if nodeid == node_record:
@@ -312,11 +311,15 @@ def get_role_id_by_rolename(pobject, rolename, number_nodes):
         :returns roleid:  a list of one or more roleid's that match
         the rolename given
     """
-    roleid = get_id_by_name({'jsonblob': pobject,
-                             'name': rolename,
-                             'head': 'roles',
-                             'size': number_nodes,
-                             'typeval': 'role'})
+    roleid = get_id_by_name(
+        {
+            "jsonblob": pobject,
+            "name": rolename,
+            "head": "roles",
+            "size": number_nodes,
+            "typeval": "role",
+        }
+    )
     try:
         roleid
     except Exception:
@@ -347,12 +350,16 @@ def get_role_name_by_roleid(pobject, roleid, number_nodes):
         :returns rolename:  the role name that corresponds to the record
         identified by the role-id
     """
-    rolename = get_attribute_by_id({'jsonblob': pobject,
-                                    'head': 'roles',
-                                    'id': roleid,
-                                    'attr': 'name',
-                                    'size': number_nodes,
-                                    'typeval': 'role'})
+    rolename = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "roles",
+            "id": roleid,
+            "attr": "name",
+            "size": number_nodes,
+            "typeval": "role",
+        }
+    )
     try:
         rolename
     except Exception:
@@ -383,12 +390,16 @@ def get_role_description_by_roleid(pobject, roleid, number_nodes):
         :returns roledesc:  the role description that corresponds to the record
         identified by the role-id
     """
-    roledesc = get_attribute_by_id({'jsonblob': pobject,
-                                    'head': 'roles',
-                                    'id': roleid,
-                                    'attr': 'description',
-                                    'size': number_nodes,
-                                    'typeval': 'role'})
+    roledesc = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "roles",
+            "id": roleid,
+            "attr": "description",
+            "size": number_nodes,
+            "typeval": "role",
+        }
+    )
     try:
         roledesc
     except Exception:
@@ -420,11 +431,15 @@ def get_domain_id_by_domainname(pobject, domainname, number_nodes):
         :returns domainid:  a list of one or more domain-id's that match
         the domain-name given
     """
-    domainid = get_id_by_name({'jsonblob': pobject,
-                               'head': 'domains',
-                               'name': domainname,
-                               'size': number_nodes,
-                               'typeval': 'domain'})
+    domainid = get_id_by_name(
+        {
+            "jsonblob": pobject,
+            "head": "domains",
+            "name": domainname,
+            "size": number_nodes,
+            "typeval": "domain",
+        }
+    )
 
     try:
         domainid
@@ -457,12 +472,16 @@ def get_domain_name_by_domainid(pobject, domainid, number_nodes):
         :returns domainname:  the domain name that corresponds to the record
         identified by the domainid
     """
-    domainname = get_attribute_by_id({'jsonblob': pobject,
-                                      'head': 'domains',
-                                      'id': domainid,
-                                      'attr': 'name',
-                                      'size': number_nodes,
-                                      'typeval': 'domain'})
+    domainname = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "domains",
+            "id": domainid,
+            "attr": "name",
+            "size": number_nodes,
+            "typeval": "domain",
+        }
+    )
     try:
         domainname
     except Exception:
@@ -494,12 +513,16 @@ def get_domain_description_by_domainid(pobject, domainid, number_nodes):
         :returns domainname:  the domain description field that corresponds
         to the record identified by the domainid
     """
-    domaindesc = get_attribute_by_id({'jsonblob': pobject,
-                                      'head': 'domains',
-                                      'id': domainid,
-                                      'attr': 'description',
-                                      'size': number_nodes,
-                                      'typeval': 'domain'})
+    domaindesc = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "domains",
+            "id": domainid,
+            "attr": "description",
+            "size": number_nodes,
+            "typeval": "domain",
+        }
+    )
     try:
         domaindesc
     except Exception:
@@ -531,12 +554,16 @@ def get_domain_state_by_domainid(pobject, domainid, number_nodes):
         :returns domainstate:  the domain state (enabled) field that
         corresponds to the record identified by the domainid
     """
-    domainstate = get_attribute_by_id({'jsonblob': pobject,
-                                       'head': 'domains',
-                                       'id': domainid,
-                                       'attr': 'enabled',
-                                       'size': number_nodes,
-                                       'typeval': 'domain'})
+    domainstate = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "domains",
+            "id": domainid,
+            "attr": "enabled",
+            "size": number_nodes,
+            "typeval": "domain",
+        }
+    )
     try:
         domainstate
     except Exception:
@@ -571,11 +598,15 @@ def get_user_id_by_username(pobject, username, number_nodes):
         :returns userid:  a list of one or more user-id's that match
         the username given
     """
-    userid = get_id_by_name({'jsonblob': pobject,
-                             'name': username,
-                             'head': 'users',
-                             'size': number_nodes,
-                             'typeval': 'user'})
+    userid = get_id_by_name(
+        {
+            "jsonblob": pobject,
+            "name": username,
+            "head": "users",
+            "size": number_nodes,
+            "typeval": "user",
+        }
+    )
     try:
         userid
     except Exception:
@@ -610,12 +641,16 @@ def get_user_password_by_userid(pobject, userid, number_nodes):
         :returns userpassword:  the raw password field that corresponds to
          the record identified by the userid
     """
-    userpassword = get_attribute_by_id({'jsonblob': pobject,
-                                        'head': 'users',
-                                        'id': userid,
-                                        'attr': 'password',
-                                        'size': number_nodes,
-                                        'typeval': 'user'})
+    userpassword = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "users",
+            "id": userid,
+            "attr": "password",
+            "size": number_nodes,
+            "typeval": "user",
+        }
+    )
     try:
         userpassword
     except Exception:
@@ -650,12 +685,16 @@ def get_user_name_by_userid(pobject, userid, number_nodes):
         :returns username:  the name field that corresponds to the record
         identified by the userid
     """
-    username = get_attribute_by_id({'jsonblob': pobject,
-                                    'head': 'users',
-                                    'id': userid,
-                                    'attr': 'name',
-                                    'size': number_nodes,
-                                    'typeval': 'user'})
+    username = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "users",
+            "id": userid,
+            "attr": "name",
+            "size": number_nodes,
+            "typeval": "user",
+        }
+    )
     try:
         username
     except Exception:
@@ -690,12 +729,16 @@ def get_user_state_by_userid(pobject, userid, number_nodes):
         :returns userstate:  the enabled field that corresponds to the record
         identified by the userid
     """
-    userstate = get_attribute_by_id({'jsonblob': pobject,
-                                     'head': 'users',
-                                     'id': userid,
-                                     'attr': 'enabled',
-                                     'size': number_nodes,
-                                     'typeval': 'user'})
+    userstate = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "users",
+            "id": userid,
+            "attr": "enabled",
+            "size": number_nodes,
+            "typeval": "user",
+        }
+    )
     try:
         userstate
     except Exception:
@@ -730,12 +773,16 @@ def get_user_email_by_userid(pobject, userid, number_nodes):
         :returns useremail:  the email field that corresponds to the record
         identified by the userid
     """
-    useremail = get_attribute_by_id({'jsonblob': pobject,
-                                     'head': 'users',
-                                     'id': userid,
-                                     'attr': 'email',
-                                     'size': number_nodes,
-                                     'typeval': 'user'})
+    useremail = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "users",
+            "id": userid,
+            "attr": "email",
+            "size": number_nodes,
+            "typeval": "user",
+        }
+    )
     try:
         useremail
     except Exception:
@@ -770,12 +817,16 @@ def get_user_description_by_userid(pobject, userid, number_nodes):
         :returns userdesc:  the description field that corresponds to the
         record identified by the userid
     """
-    userdesc = get_attribute_by_id({'jsonblob': pobject,
-                                    'head': 'users',
-                                    'id': userid,
-                                    'attr': 'description',
-                                    'size': number_nodes,
-                                    'typeval': 'user'})
+    userdesc = get_attribute_by_id(
+        {
+            "jsonblob": pobject,
+            "head": "users",
+            "id": userid,
+            "attr": "description",
+            "size": number_nodes,
+            "typeval": "user",
+        }
+    )
     try:
         userdesc
     except Exception:
index 0174ae06fafacce72c6c2ca384e80d24b232312d..86e5701af3a70bba92fd828019afb9910a489e2c 100644 (file)
@@ -9,17 +9,18 @@ import re
 content_key_set = {"meta", "resources"}
 resource_key_set = {"uri", "media-type", "accepts", "capabilities", "uses"}
 cost_type_key_set = {"cost-mode", "cost-metric", "description"}
-media_type_set = {"application/alto-directory+json",
-                  "application/alto-networkmap+json",
-                  "application/alto-networkmapfilter+json",
-                  "application/alto-costmap+json",
-                  "application/alto-costmapfilter+json",
-                  "application/alto-endpointprop+json",
-                  "application/alto-endpointpropparams+json",
-                  "application/alto-endpointcost+json",
-                  "application/alto-endpointcostparams+json",
-                  "application/alto-error+json"
-                  }
+media_type_set = {
+    "application/alto-directory+json",
+    "application/alto-networkmap+json",
+    "application/alto-networkmapfilter+json",
+    "application/alto-costmap+json",
+    "application/alto-costmapfilter+json",
+    "application/alto-endpointprop+json",
+    "application/alto-endpointpropparams+json",
+    "application/alto-endpointcost+json",
+    "application/alto-endpointcostparams+json",
+    "application/alto-error+json",
+}
 
 
 def get_basic_info(response):
@@ -64,9 +65,13 @@ def check_ird_configuration_entry(response, ird_resource_id, context_id, resourc
             for tag in context_tags:
                 if "dependency" in tag:
                     for one_dependency in tag["dependency"]:
-                        _context_id = re.findall("\d{8}-\d{4}-\d{4}-\d{4}-\d{12}", one_dependency)[0]
+                        _context_id = re.findall(
+                            "\d{8}-\d{4}-\d{4}-\d{4}-\d{12}", one_dependency
+                        )[0]
                         if _context_id == context_id:
-                            long_resource_id = re.findall("resource-id='[a-zA-Z\-]*'", one_dependency)[0]
+                            long_resource_id = re.findall(
+                                "resource-id='[a-zA-Z\-]*'", one_dependency
+                            )[0]
                             short_resource_id = re.findall("'.*'", long_resource_id)[0]
                             _resource_id = short_resource_id.replace("'", "")
                             if _resource_id == resource_id:
@@ -107,7 +112,10 @@ def verify_ird(response):
     resources = resp["resources"]
     for resource in resources.keys():
         if set(resources[resource].keys()).issubset(resource_key_set):
-            if "uri" not in resources[resource] or "media-type" not in resources[resource]:
+            if (
+                "uri" not in resources[resource]
+                or "media-type" not in resources[resource]
+            ):
                 return False
             else:
                 _resource = resources[resource]
index 52cbe746c0a73a4f313674c55998153fdb0bc79e..efb1a6b663f9f7ee33389130b756b379c6f7c219 100644 (file)
@@ -35,7 +35,8 @@ from elasticsearch import Elasticsearch
 from elasticsearch_dsl import Search
 import re
 import matplotlib as mpl
-mpl.use('Agg')
+
+mpl.use("Agg")
 
 
 class MBeanNotFoundError(Exception):
@@ -44,9 +45,9 @@ class MBeanNotFoundError(Exception):
 
 
 class BaseAppender(object):
-    '''
+    """
         Base Appender from which all appenders should inherit
-    '''
+    """
 
     def _get_index(self, need_all):
         raise NotImplementedError
@@ -56,55 +57,73 @@ class BaseAppender(object):
 
 
 class ElasticsearchAppender(BaseAppender):
-    '''
+    """
         ElasticsearchAppender Class
         Metrics supported : Memory, ClassLoading, Threading, GarbageCollector
         Individual resource attributes as defined in attr dictionary object
-    '''
-
-    attr = {'Memory': ['HeapMemoryUsage', 'NonHeapMemoryUsage',
-                       '@timestamp'],
-            'ClassLoading': ['TotalLoadedClassCount', 'UnloadedClassCount',
-                             '@timestamp'],
-            'OperatingSystem': ['FreeSwapSpaceSize', 'TotalSwapSpaceSize',
-                                'FreePhysicalMemorySize',
-                                'TotalPhysicalMemorySize',
-                                'CommittedVirtualMemorySize', 'ProcessCpuLoad',
-                                'ProcessCpuTime', 'SystemCpuLoad',
-                                '@timestamp'],
-            'Threading': ['DaemonThreadCount', 'PeakThreadCount',
-                          'ThreadCount', 'TotalStartedThreadCount',
-                          '@timestamp'],
-            'GarbageCollector': ['LastGcInfo', 'CollectionCount',
-                                 '@timestamp', 'CollectionTime']}
-    label = {'Memory': 'Memory', 'ClassLoading': 'Class Loading',
-             'Threading': 'Threads', 'GarbageCollector': 'Garbage Collector'}
-
-    def get_connection(self, host='localhost', port=9200):
+    """
+
+    attr = {
+        "Memory": ["HeapMemoryUsage", "NonHeapMemoryUsage", "@timestamp"],
+        "ClassLoading": ["TotalLoadedClassCount", "UnloadedClassCount", "@timestamp"],
+        "OperatingSystem": [
+            "FreeSwapSpaceSize",
+            "TotalSwapSpaceSize",
+            "FreePhysicalMemorySize",
+            "TotalPhysicalMemorySize",
+            "CommittedVirtualMemorySize",
+            "ProcessCpuLoad",
+            "ProcessCpuTime",
+            "SystemCpuLoad",
+            "@timestamp",
+        ],
+        "Threading": [
+            "DaemonThreadCount",
+            "PeakThreadCount",
+            "ThreadCount",
+            "TotalStartedThreadCount",
+            "@timestamp",
+        ],
+        "GarbageCollector": [
+            "LastGcInfo",
+            "CollectionCount",
+            "@timestamp",
+            "CollectionTime",
+        ],
+    }
+    label = {
+        "Memory": "Memory",
+        "ClassLoading": "Class Loading",
+        "Threading": "Threads",
+        "GarbageCollector": "Garbage Collector",
+    }
+
+    def get_connection(self, host="localhost", port=9200):
         host = self.cleanse_string(host)
         port = self.cleanse_string(port)
         return self._get_connection(host, port)
 
     def get_jvm_memory(self, connection):
-        return self._get_mbean_attr(connection, 'Memory')
+        return self._get_mbean_attr(connection, "Memory")
 
     def get_jvm_classloading(self, connection):
-        return self._get_mbean_attr(connection, 'ClassLoading',)
+        return self._get_mbean_attr(connection, "ClassLoading")
 
     def get_jvm_threading(self, connection):
-        return self._get_mbean_attr(connection, 'Threading')
+        return self._get_mbean_attr(connection, "Threading")
 
     def get_jvm_garbagecollector(self, connection):
-        return self._get_mbean_attr(connection, 'GarbageCollector')
+        return self._get_mbean_attr(connection, "GarbageCollector")
 
     def get_jvm_operatingsystem(self, connection):
-        return self._get_mbean_attr(connection, 'OperatingSystem')
+        return self._get_mbean_attr(connection, "OperatingSystem")
 
     def cleanse_string(self, s):
         return str(s).replace("'", "")
 
-    def plot_points(self, connection, title, filename, metric, submetric,
-                    submetrickey=None):
+    def plot_points(
+        self, connection, title, filename, metric, submetric, submetrickey=None
+    ):
 
         from matplotlib import dates, pyplot as plt, ticker as tkr
 
@@ -113,25 +132,24 @@ class ElasticsearchAppender(BaseAppender):
         if submetrickey is not None:
             submetrickey = self.cleanse_string(submetrickey)
 
-        points = self._get_plot_points(connection, metric, submetric,
-                                       submetrickey)
+        points = self._get_plot_points(connection, metric, submetric, submetrickey)
         points[0] = [p.replace(microsecond=0) for p in points[0]]
-        myFmt = dates.DateFormatter('%m-%d %H:%M:%S')
+        myFmt = dates.DateFormatter("%m-%d %H:%M:%S")
         fig, ax = plt.subplots()
 
-        ax.plot_date(points[0], points[1], 'c-')
-        ax.grid(color='grey')
-        ax.patch.set_facecolor('black')
+        ax.plot_date(points[0], points[1], "c-")
+        ax.grid(color="grey")
+        ax.patch.set_facecolor("black")
         ax.xaxis.set_major_formatter(myFmt)
 
         axes = plt.gca()
         axes.get_yaxis().get_major_formatter().set_scientific(False)
         axes.get_yaxis().get_major_formatter().set_useOffset(False)
 
-        ax.set_xlabel('Time')
+        ax.set_xlabel("Time")
         xlabel = self._convert(submetric).title()
         if submetrickey is not None:
-            xlabel = xlabel + ' : ' + str(submetrickey).title()
+            xlabel = xlabel + " : " + str(submetrickey).title()
         ax.set_ylabel(xlabel)
 
         mx = max(points[1]) + max(points[1]) * 0.00001
@@ -140,17 +158,15 @@ class ElasticsearchAppender(BaseAppender):
 
         ax.set_title(title)
         if isinstance(points[1][0], int):
-            axes.yaxis.set_major_formatter(tkr.FuncFormatter(lambda x, _:
-                                                             int(x)))
+            axes.yaxis.set_major_formatter(tkr.FuncFormatter(lambda x, _: int(x)))
         else:
-            axes.yaxis.set_major_formatter(tkr.FuncFormatter(lambda x, _:
-                                                             float(x)))
+            axes.yaxis.set_major_formatter(tkr.FuncFormatter(lambda x, _: float(x)))
         plt.gcf().autofmt_xdate()
-        plt.savefig(filename, bbox_inches='tight')
+        plt.savefig(filename, bbox_inches="tight")
 
     def _convert(self, name):
-        s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
-        return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1).lower()
+        s1 = re.sub("(.)([A-Z][a-z]+)", r"\1 \2", name)
+        return re.sub("([a-z0-9])([A-Z])", r"\1 \2", s1).lower()
 
     def _get_y_val(self, response, metric, submetric=None):
         if isinstance(response[metric], dict):
@@ -158,51 +174,61 @@ class ElasticsearchAppender(BaseAppender):
         else:
             return response[metric]
 
-    def _get_plot_points(self, connection, metric, submetric,
-                         submetrickey=None):
+    def _get_plot_points(self, connection, metric, submetric, submetrickey=None):
         indices = self._get_index(connection, need_all=True)
         points = []
         for index in indices:
             responses = self._get_all_mbean_attr(connection, metric, index)
             for response in responses:
-                point = (self._get_datetime_object(response['@timestamp']),
-                         self._get_y_val(response, submetric, submetrickey))
+                point = (
+                    self._get_datetime_object(response["@timestamp"]),
+                    self._get_y_val(response, submetric, submetrickey),
+                )
                 points.append(point)
         points.sort(key=itemgetter(0))
         return zip(*points)
 
     def _get_index(self, connection, need_all=False):
-        indices = sorted([i for i in
-                          connection.indices.get_mapping().keys()
-                          if i.startswith('karaf')])
+        indices = sorted(
+            [
+                i
+                for i in connection.indices.get_mapping().keys()
+                if i.startswith("karaf")
+            ]
+        )
         if need_all:
             return indices
         else:
             return sorted(indices, reverse=True)[0]
 
     def _get_connection(self, host, port):
-        con_obj = {'host': host, 'port': port}
+        con_obj = {"host": host, "port": port}
         es = Elasticsearch([con_obj])
         return es
 
-    def _get_all_mbean_attr(self, connection, mbean, index, dsl_class='match'):
-        s = Search(using=connection, index=index).\
-            query(dsl_class, ObjectName=mbean).\
-            sort({"@timestamp": {"order": 'desc'}})
+    def _get_all_mbean_attr(self, connection, mbean, index, dsl_class="match"):
+        s = (
+            Search(using=connection, index=index)
+            .query(dsl_class, ObjectName=mbean)
+            .sort({"@timestamp": {"order": "desc"}})
+        )
         response = []
         for hit in s.scan():
             response.append(self._get_attr_obj([hit], mbean))
         return response
 
-    def _get_mbean_attr(self, connection, mbean, dsl_class='match'):
+    def _get_mbean_attr(self, connection, mbean, dsl_class="match"):
         index = self._get_index(connection)
 
         try:
-            s = Search(using=connection, index=index).\
-                query(dsl_class, ObjectName=mbean).\
-                sort({"@timestamp": {"order": 'desc'}})[0].execute()
+            s = (
+                Search(using=connection, index=index)
+                .query(dsl_class, ObjectName=mbean)
+                .sort({"@timestamp": {"order": "desc"}})[0]
+                .execute()
+            )
         except Exception:
-            raise MBeanNotFoundError('Could Not Fetch %s mbean' % mbean)
+            raise MBeanNotFoundError("Could Not Fetch %s mbean" % mbean)
 
         mem_attr = self._get_attr_obj(s, mbean)
         return mem_attr
@@ -219,4 +245,4 @@ class ElasticsearchAppender(BaseAppender):
         return mbean_attr
 
     def _get_datetime_object(self, timestamp):
-        return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S,%fZ')
+        return datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S,%fZ")
index 1ba99c1f176b004aa2e47072881c547a2e8ab339..5cb840a4f9054b38e7fc23a12d9cddac7140fd63 100644 (file)
@@ -92,7 +92,9 @@ class _BasicReusingSession(object):
         if username:
             self.session.auth = (username, password)  # May work with non-string values
         else:
-            self.session.auth = None  # Supports "no authentication mode" as in odl-restconf-noauth
+            self.session.auth = (
+                None  # Supports "no authentication mode" as in odl-restconf-noauth
+            )
 
     def robust_method(self, method, uri, **kwargs):
         """Try method once using session credentials. Return last response."""
@@ -109,7 +111,9 @@ class _BasicClosingSession(object):
         if username:
             self.auth = (username, password)  # May work with non-string values
         else:
-            self.auth = None  # Supports "no authentication mode" as in odl-restconf-noauth
+            self.auth = (
+                None  # Supports "no authentication mode" as in odl-restconf-noauth
+            )
         self.session = None
 
     def robust_method(self, method, uri, **kwargs):
@@ -144,7 +148,9 @@ class _TokenReusingSession(object):
         if self.session:
             self.session.close()
         self.session = requests.Session()
-        resp = self.session.post(self.auth_url, data=self.auth_data, headers=self.auth_header)
+        resp = self.session.post(
+            self.auth_url, data=self.auth_data, headers=self.auth_header
+        )
         resp_obj = json.loads(resp.text)
         try:
             token = resp_obj["access_token"]
@@ -191,7 +197,9 @@ class _TokenClosingSession(object):
         if self.session:
             self.session.close()
         self.session = requests.Session()
-        resp = self.session.post(self.auth_url, data=self.auth_data, headers=self.auth_header)
+        resp = self.session.post(
+            self.auth_url, data=self.auth_data, headers=self.auth_header
+        )
         resp_obj = json.loads(resp.text)
         try:
             token = resp_obj["access_token"]
index 59a5e89cb7002d176e8c104d7a218aa30620ad9d..eb90e2d32d22821daabcf8d217991696d980d4f6 100644 (file)
@@ -25,7 +25,7 @@ and networks.
 import struct
 
 
-__version__ = '2.1.11'
+__version__ = "2.1.11"
 
 
 IPV4LENGTH = 32
@@ -76,8 +76,7 @@ def IPAddress(address, version=None):
     except (AddressValueError, NetmaskValueError):
         pass
 
-    raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
-                     address)
+    raise ValueError("%r does not appear to be an IPv4 or IPv6 address" % address)
 
 
 def IPNetwork(address, version=None, strict=False):
@@ -117,8 +116,7 @@ def IPNetwork(address, version=None, strict=False):
     except (AddressValueError, NetmaskValueError):
         pass
 
-    raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
-                     address)
+    raise ValueError("%r does not appear to be an IPv4 or IPv6 network" % address)
 
 
 def v4_int_to_packed(address):
@@ -135,8 +133,8 @@ def v4_int_to_packed(address):
           address.
     """
     if address > _BaseV4._ALL_ONES:
-        raise ValueError('Address too large for IPv4')
-    return Bytes(struct.pack('!I', address))
+        raise ValueError("Address too large for IPv4")
+    return Bytes(struct.pack("!I", address))
 
 
 def v6_int_to_packed(address):
@@ -148,7 +146,7 @@ def v6_int_to_packed(address):
     Returns:
         The binary representation of this address.
     """
-    return Bytes(struct.pack('!QQ', address >> 64, address & (2**64 - 1)))
+    return Bytes(struct.pack("!QQ", address >> 64, address & (2 ** 64 - 1)))
 
 
 def _find_address_range(addresses):
@@ -233,12 +231,13 @@ def summarize_address_range(first, last):
 
     """
     if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
-        raise TypeError('first and last must be IP addresses, not networks')
+        raise TypeError("first and last must be IP addresses, not networks")
     if first.version != last.version:
-        raise TypeError("%s and %s are not of the same version" % (
-                        str(first), str(last)))
+        raise TypeError(
+            "%s and %s are not of the same version" % (str(first), str(last))
+        )
     if first > last:
-        raise ValueError('last IP address must be greater than first')
+        raise ValueError("last IP address must be greater than first")
 
     networks = []
 
@@ -247,7 +246,7 @@ def summarize_address_range(first, last):
     elif first.version == 6:
         ip = IPv6Network
     else:
-        raise ValueError('unknown IP version')
+        raise ValueError("unknown IP version")
 
     ip_bits = first._max_prefixlen
     first_int = first._ip
@@ -256,13 +255,13 @@ def summarize_address_range(first, last):
         nbits = _count_righthand_zero_bits(first_int, ip_bits)
         current = None
         while nbits >= 0:
-            addend = 2**nbits - 1
+            addend = 2 ** nbits - 1
             current = first_int + addend
             nbits -= 1
             if current <= last_int:
                 break
         prefix = _get_prefix_length(first_int, current, ip_bits)
-        net = ip('%s/%d' % (str(first), prefix))
+        net = ip("%s/%d" % (str(first), prefix))
         networks.append(net)
         if current == ip._ALL_ONES:
             break
@@ -345,18 +344,21 @@ def collapse_address_list(addresses):
     for ip in addresses:
         if isinstance(ip, _BaseIP):
             if ips and ips[-1]._version != ip._version:
-                raise TypeError("%s and %s are not of the same version" % (
-                                str(ip), str(ips[-1])))
+                raise TypeError(
+                    "%s and %s are not of the same version" % (str(ip), str(ips[-1]))
+                )
             ips.append(ip)
         elif ip._prefixlen == ip._max_prefixlen:
             if ips and ips[-1]._version != ip._version:
-                raise TypeError("%s and %s are not of the same version" % (
-                                str(ip), str(ips[-1])))
+                raise TypeError(
+                    "%s and %s are not of the same version" % (str(ip), str(ips[-1]))
+                )
             ips.append(ip.ip)
         else:
             if nets and nets[-1]._version != ip._version:
-                raise TypeError("%s and %s are not of the same version" % (
-                                str(ip), str(nets[-1])))
+                raise TypeError(
+                    "%s and %s are not of the same version" % (str(ip), str(nets[-1]))
+                )
             nets.append(ip)
 
     # sort and dedup
@@ -368,8 +370,9 @@ def collapse_address_list(addresses):
         i = ips.index(last) + 1
         addrs.extend(summarize_address_range(first, last))
 
-    return _collapse_address_list_recursive(sorted(
-        addrs + nets, key=_BaseNet._get_networks_key))
+    return _collapse_address_list_recursive(
+        sorted(addrs + nets, key=_BaseNet._get_networks_key)
+    )
 
 
 # backwards compatibility
@@ -393,9 +396,10 @@ try:
         raise TypeError("bytes is not a distinct type")
     Bytes = bytes
 except (NameError, TypeError):
+
     class Bytes(str):
         def __repr__(self):
-            return 'Bytes(%s)' % str.__repr__(self)
+            return "Bytes(%s)" % str.__repr__(self)
 
 
 def get_mixed_type_key(obj):
@@ -458,8 +462,7 @@ class _BaseIP(_IPAddrBase):
 
     def __eq__(self, other):
         try:
-            return (self._ip == other._ip and
-                    self._version == other._version)
+            return self._ip == other._ip and self._version == other._version
         except AttributeError:
             return NotImplemented
 
@@ -483,22 +486,26 @@ class _BaseIP(_IPAddrBase):
 
     def __lt__(self, other):
         if self._version != other._version:
-            raise TypeError('%s and %s are not of the same version' % (
-                            str(self), str(other)))
+            raise TypeError(
+                "%s and %s are not of the same version" % (str(self), str(other))
+            )
         if not isinstance(other, _BaseIP):
-            raise TypeError('%s and %s are not of the same type' % (
-                            str(self), str(other)))
+            raise TypeError(
+                "%s and %s are not of the same type" % (str(self), str(other))
+            )
         if self._ip != other._ip:
             return self._ip < other._ip
         return False
 
     def __gt__(self, other):
         if self._version != other._version:
-            raise TypeError('%s and %s are not of the same version' % (
-                            str(self), str(other)))
+            raise TypeError(
+                "%s and %s are not of the same version" % (str(self), str(other))
+            )
         if not isinstance(other, _BaseIP):
-            raise TypeError('%s and %s are not of the same type' % (
-                            str(self), str(other)))
+            raise TypeError(
+                "%s and %s are not of the same type" % (str(self), str(other))
+            )
         if self._ip != other._ip:
             return self._ip > other._ip
         return False
@@ -516,10 +523,10 @@ class _BaseIP(_IPAddrBase):
         return IPAddress(int(self) - other, version=self._version)
 
     def __repr__(self):
-        return '%s(%r)' % (self.__class__.__name__, str(self))
+        return "%s(%r)" % (self.__class__.__name__, str(self))
 
     def __str__(self):
-        return '%s' % self._string_from_ip_int(self._ip)
+        return "%s" % self._string_from_ip_int(self._ip)
 
     def __hash__(self):
         return hash(hex(long(self._ip)))
@@ -529,7 +536,7 @@ class _BaseIP(_IPAddrBase):
 
     @property
     def version(self):
-        raise NotImplementedError('BaseIP has no version')
+        raise NotImplementedError("BaseIP has no version")
 
 
 class _BaseNet(_IPAddrBase):
@@ -545,7 +552,7 @@ class _BaseNet(_IPAddrBase):
         self._cache = {}
 
     def __repr__(self):
-        return '%s(%r)' % (self.__class__.__name__, str(self))
+        return "%s(%r)" % (self.__class__.__name__, str(self))
 
     def iterhosts(self):
         """Generate Iterator over usable hosts in a network.
@@ -582,11 +589,13 @@ class _BaseNet(_IPAddrBase):
 
     def __lt__(self, other):
         if self._version != other._version:
-            raise TypeError('%s and %s are not of the same version' % (
-                            str(self), str(other)))
+            raise TypeError(
+                "%s and %s are not of the same version" % (str(self), str(other))
+            )
         if not isinstance(other, _BaseNet):
-            raise TypeError('%s and %s are not of the same type' % (
-                            str(self), str(other)))
+            raise TypeError(
+                "%s and %s are not of the same type" % (str(self), str(other))
+            )
         if self.network != other.network:
             return self.network < other.network
         if self.netmask != other.netmask:
@@ -595,11 +604,13 @@ class _BaseNet(_IPAddrBase):
 
     def __gt__(self, other):
         if self._version != other._version:
-            raise TypeError('%s and %s are not of the same version' % (
-                            str(self), str(other)))
+            raise TypeError(
+                "%s and %s are not of the same version" % (str(self), str(other))
+            )
         if not isinstance(other, _BaseNet):
-            raise TypeError('%s and %s are not of the same type' % (
-                            str(self), str(other)))
+            raise TypeError(
+                "%s and %s are not of the same type" % (str(self), str(other))
+            )
         if self.network != other.network:
             return self.network > other.network
         if self.netmask != other.netmask:
@@ -620,13 +631,14 @@ class _BaseNet(_IPAddrBase):
 
     def __eq__(self, other):
         try:
-            return (self._version == other._version and
-                    self.network == other.network and
-                    int(self.netmask) == int(other.netmask))
+            return (
+                self._version == other._version
+                and self.network == other.network
+                and int(self.netmask) == int(other.netmask)
+            )
         except AttributeError:
             if isinstance(other, _BaseIP):
-                return (self._version == other._version and
-                        self._ip == other._ip)
+                return self._version == other._version and self._ip == other._ip
 
     def __ne__(self, other):
         eq = self.__eq__(other)
@@ -635,8 +647,7 @@ class _BaseNet(_IPAddrBase):
         return not eq
 
     def __str__(self):
-        return '%s/%s' % (str(self.ip),
-                          str(self._prefixlen))
+        return "%s/%s" % (str(self.ip), str(self._prefixlen))
 
     def __hash__(self):
         return hash(int(self.network) ^ int(self.netmask))
@@ -647,54 +658,54 @@ class _BaseNet(_IPAddrBase):
             return False
         # dealing with another network.
         if isinstance(other, _BaseNet):
-            return (self.network <= other.network and
-                    self.broadcast >= other.broadcast)
+            return self.network <= other.network and self.broadcast >= other.broadcast
         # dealing with another address
         else:
-            return (int(self.network) <= int(other._ip) <=
-                    int(self.broadcast))
+            return int(self.network) <= int(other._ip) <= int(self.broadcast)
 
     def overlaps(self, other):
         """Tell if self is partly contained in other."""
-        return self.network in other or self.broadcast in other or (
-            other.network in self or other.broadcast in self)
+        return (
+            self.network in other
+            or self.broadcast in other
+            or (other.network in self or other.broadcast in self)
+        )
 
     @property
     def network(self):
-        x = self._cache.get('network')
+        x = self._cache.get("network")
         if x is None:
             x = IPAddress(self._ip & int(self.netmask), version=self._version)
-            self._cache['network'] = x
+            self._cache["network"] = x
         return x
 
     @property
     def broadcast(self):
-        x = self._cache.get('broadcast')
+        x = self._cache.get("broadcast")
         if x is None:
             x = IPAddress(self._ip | int(self.hostmask), version=self._version)
-            self._cache['broadcast'] = x
+            self._cache["broadcast"] = x
         return x
 
     @property
     def hostmask(self):
-        x = self._cache.get('hostmask')
+        x = self._cache.get("hostmask")
         if x is None:
-            x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
-                          version=self._version)
-            self._cache['hostmask'] = x
+            x = IPAddress(int(self.netmask) ^ self._ALL_ONES, version=self._version)
+            self._cache["hostmask"] = x
         return x
 
     @property
     def with_prefixlen(self):
-        return '%s/%d' % (str(self.ip), self._prefixlen)
+        return "%s/%d" % (str(self.ip), self._prefixlen)
 
     @property
     def with_netmask(self):
-        return '%s/%s' % (str(self.ip), str(self.netmask))
+        return "%s/%s" % (str(self.ip), str(self.netmask))
 
     @property
     def with_hostmask(self):
-        return '%s/%s' % (str(self.ip), str(self.hostmask))
+        return "%s/%s" % (str(self.ip), str(self.hostmask))
 
     @property
     def numhosts(self):
@@ -703,7 +714,7 @@ class _BaseNet(_IPAddrBase):
 
     @property
     def version(self):
-        raise NotImplementedError('BaseNet has no version')
+        raise NotImplementedError("BaseNet has no version")
 
     @property
     def prefixlen(self):
@@ -744,23 +755,24 @@ class _BaseNet(_IPAddrBase):
 
         """
         if not self._version == other._version:
-            raise TypeError("%s and %s are not of the same version" % (
-                str(self), str(other)))
+            raise TypeError(
+                "%s and %s are not of the same version" % (str(self), str(other))
+            )
 
         if not isinstance(other, _BaseNet):
             raise TypeError("%s is not a network object" % str(other))
 
         if other not in self:
-            raise ValueError('%s not contained in %s' % (str(other),
-                                                         str(self)))
+            raise ValueError("%s not contained in %s" % (str(other), str(self)))
         if other == self:
             return []
 
         ret_addrs = []
 
         # Make sure we're comparing the network of other.
-        other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
-                          version=other._version)
+        other = IPNetwork(
+            "%s/%s" % (str(other.network), str(other.prefixlen)), version=other._version
+        )
 
         s1, s2 = self.subnet()
         while s1 != other and s2 != other:
@@ -772,18 +784,21 @@ class _BaseNet(_IPAddrBase):
                 s1, s2 = s2.subnet()
             else:
                 # If we got here, there's a bug somewhere.
-                assert False, ('Error performing exclusion: '
-                               's1: %s s2: %s other: %s' %
-                               (str(s1), str(s2), str(other)))
+                assert False, (
+                    "Error performing exclusion: "
+                    "s1: %s s2: %s other: %s" % (str(s1), str(s2), str(other))
+                )
         if s1 == other:
             ret_addrs.append(s2)
         elif s2 == other:
             ret_addrs.append(s1)
         else:
             # If we got here, there's a bug somewhere.
-            assert False, ('Error performing exclusion: '
-                           's1: %s s2: %s other: %s' %
-                           (str(s1), str(s2), str(other)))
+            assert False, "Error performing exclusion: " "s1: %s s2: %s other: %s" % (
+                str(s1),
+                str(s2),
+                str(other),
+            )
 
         return sorted(ret_addrs, key=_BaseNet._get_networks_key)
 
@@ -883,7 +898,7 @@ class _BaseNet(_IPAddrBase):
         if ip_int == (1 << prefixlen) - 1:
             return prefixlen
         else:
-            raise NetmaskValueError('Bit pattern does not match /1*0*/')
+            raise NetmaskValueError("Bit pattern does not match /1*0*/")
 
     def _prefix_from_prefix_string(self, prefixlen_str):
         """Turn a prefix length string into an integer.
@@ -905,8 +920,7 @@ class _BaseNet(_IPAddrBase):
             if not (0 <= prefixlen <= self._max_prefixlen):
                 raise ValueError
         except ValueError:
-            raise NetmaskValueError('%s is not a valid prefix length' %
-                                    prefixlen_str)
+            raise NetmaskValueError("%s is not a valid prefix length" % prefixlen_str)
         return prefixlen
 
     def _prefix_from_ip_string(self, ip_str):
@@ -926,7 +940,7 @@ class _BaseNet(_IPAddrBase):
         try:
             ip_int = self._ip_int_from_string(ip_str)
         except AddressValueError:
-            raise NetmaskValueError('%s is not a valid netmask' % ip_str)
+            raise NetmaskValueError("%s is not a valid netmask" % ip_str)
 
         # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
         # Note that the two ambiguous cases (all-ones and all-zeroes) are
@@ -941,7 +955,7 @@ class _BaseNet(_IPAddrBase):
         try:
             return self._prefix_from_ip_int(ip_int)
         except NetmaskValueError:
-            raise NetmaskValueError('%s is not a valid netmask' % ip_str)
+            raise NetmaskValueError("%s is not a valid netmask" % ip_str)
 
     def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
         """The subnets which join to make the current subnet.
@@ -975,23 +989,25 @@ class _BaseNet(_IPAddrBase):
 
         if new_prefix is not None:
             if new_prefix < self._prefixlen:
-                raise ValueError('new prefix must be longer')
+                raise ValueError("new prefix must be longer")
             if prefixlen_diff != 1:
-                raise ValueError('cannot set prefixlen_diff and new_prefix')
+                raise ValueError("cannot set prefixlen_diff and new_prefix")
             prefixlen_diff = new_prefix - self._prefixlen
 
         if prefixlen_diff < 0:
-            raise ValueError('prefix length diff must be > 0')
+            raise ValueError("prefix length diff must be > 0")
         new_prefixlen = self._prefixlen + prefixlen_diff
 
         if new_prefixlen > self._max_prefixlen:
             raise ValueError(
-                'prefix length diff %d is invalid for netblock %s' % (
-                    new_prefixlen, str(self)))
+                "prefix length diff %d is invalid for netblock %s"
+                % (new_prefixlen, str(self))
+            )
 
-        first = IPNetwork('%s/%s' % (str(self.network),
-                                     str(self._prefixlen + prefixlen_diff)),
-                          version=self._version)
+        first = IPNetwork(
+            "%s/%s" % (str(self.network), str(self._prefixlen + prefixlen_diff)),
+            version=self._version,
+        )
 
         yield first
         current = first
@@ -1000,15 +1016,17 @@ class _BaseNet(_IPAddrBase):
             if broadcast == self.broadcast:
                 return
             new_addr = IPAddress(int(broadcast) + 1, version=self._version)
-            current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
-                                version=self._version)
+            current = IPNetwork(
+                "%s/%s" % (str(new_addr), str(new_prefixlen)), version=self._version
+            )
 
             yield current
 
     def masked(self):
         """Return the network object with the host bits masked out."""
-        return IPNetwork('%s/%d' % (self.network, self._prefixlen),
-                         version=self._version)
+        return IPNetwork(
+            "%s/%d" % (self.network, self._prefixlen), version=self._version
+        )
 
     def subnet(self, prefixlen_diff=1, new_prefix=None):
         """Return a list of subnets, rather than an iterator."""
@@ -1040,18 +1058,20 @@ class _BaseNet(_IPAddrBase):
 
         if new_prefix is not None:
             if new_prefix > self._prefixlen:
-                raise ValueError('new prefix must be shorter')
+                raise ValueError("new prefix must be shorter")
             if prefixlen_diff != 1:
-                raise ValueError('cannot set prefixlen_diff and new_prefix')
+                raise ValueError("cannot set prefixlen_diff and new_prefix")
             prefixlen_diff = self._prefixlen - new_prefix
 
         if self.prefixlen - prefixlen_diff < 0:
             raise ValueError(
-                'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
-                (self.prefixlen, prefixlen_diff))
-        return IPNetwork('%s/%s' % (str(self.network),
-                                    str(self.prefixlen - prefixlen_diff)),
-                         version=self._version)
+                "current prefixlen is %d, cannot have a prefixlen_diff of %d"
+                % (self.prefixlen, prefixlen_diff)
+            )
+        return IPNetwork(
+            "%s/%s" % (str(self.network), str(self.prefixlen - prefixlen_diff)),
+            version=self._version,
+        )
 
     # backwards compatibility
     Subnet = subnet
@@ -1071,8 +1091,8 @@ class _BaseV4(object):
     """
 
     # Equivalent to 255.255.255.255 or 32 bits of 1's.
-    _ALL_ONES = (2**IPV4LENGTH) - 1
-    _DECIMAL_DIGITS = frozenset('0123456789')
+    _ALL_ONES = (2 ** IPV4LENGTH) - 1
+    _DECIMAL_DIGITS = frozenset("0123456789")
 
     def __init__(self, address):
         self._version = 4
@@ -1094,7 +1114,7 @@ class _BaseV4(object):
             AddressValueError: if ip_str isn't a valid IPv4 Address.
 
         """
-        octets = ip_str.split('.')
+        octets = ip_str.split(".")
         if len(octets) != 4:
             raise AddressValueError(ip_str)
 
@@ -1125,7 +1145,7 @@ class _BaseV4(object):
         octet_int = int(octet_str, 10)
         # Disallow leading zeroes, because no clear standard exists on
         # whether these should be interpreted as decimal or octal.
-        if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):
+        if octet_int > 255 or (octet_str[0] == "0" and len(octet_str) > 1):
             raise ValueError
         return octet_int
 
@@ -1143,7 +1163,7 @@ class _BaseV4(object):
         for _ in xrange(4):
             octets.insert(0, str(ip_int & 0xFF))
             ip_int >>= 8
-        return '.'.join(octets)
+        return ".".join(octets)
 
     @property
     def max_prefixlen(self):
@@ -1167,7 +1187,7 @@ class _BaseV4(object):
             reserved IPv4 Network range.
 
         """
-        return self in IPv4Network('240.0.0.0/4')
+        return self in IPv4Network("240.0.0.0/4")
 
     @property
     def is_private(self):
@@ -1177,9 +1197,11 @@ class _BaseV4(object):
             A boolean, True if the address is reserved per RFC 1918.
 
         """
-        return (self in IPv4Network('10.0.0.0/8') or
-                self in IPv4Network('172.16.0.0/12') or
-                self in IPv4Network('192.168.0.0/16'))
+        return (
+            self in IPv4Network("10.0.0.0/8")
+            or self in IPv4Network("172.16.0.0/12")
+            or self in IPv4Network("192.168.0.0/16")
+        )
 
     @property
     def is_multicast(self):
@@ -1190,7 +1212,7 @@ class _BaseV4(object):
             See RFC 3171 for details.
 
         """
-        return self in IPv4Network('224.0.0.0/4')
+        return self in IPv4Network("224.0.0.0/4")
 
     @property
     def is_unspecified(self):
@@ -1201,7 +1223,7 @@ class _BaseV4(object):
             RFC 5735 3.
 
         """
-        return self in IPv4Network('0.0.0.0')
+        return self in IPv4Network("0.0.0.0")
 
     @property
     def is_loopback(self):
@@ -1211,7 +1233,7 @@ class _BaseV4(object):
             A boolean, True if the address is a loopback per RFC 3330.
 
         """
-        return self in IPv4Network('127.0.0.0/8')
+        return self in IPv4Network("127.0.0.0/8")
 
     @property
     def is_link_local(self):
@@ -1221,7 +1243,7 @@ class _BaseV4(object):
             A boolean, True if the address is link-local per RFC 3927.
 
         """
-        return self in IPv4Network('169.254.0.0/16')
+        return self in IPv4Network("169.254.0.0/16")
 
 
 class IPv4Address(_BaseV4, _BaseIP):
@@ -1256,7 +1278,7 @@ class IPv4Address(_BaseV4, _BaseIP):
         # Constructing from a packed address
         if isinstance(address, Bytes):
             try:
-                self._ip, = struct.unpack('!I', address)
+                (self._ip,) = struct.unpack("!I", address)
             except struct.error:
                 raise AddressValueError(address)  # Wrong length.
             return
@@ -1335,7 +1357,7 @@ class IPv4Network(_BaseV4, _BaseNet):
 
         # Assume input argument to be string or any object representation
         # which converts into a formatted IP prefix string.
-        addr = str(address).split('/')
+        addr = str(address).split("/")
 
         if len(addr) > 2:
             raise AddressValueError(address)
@@ -1358,8 +1380,7 @@ class IPv4Network(_BaseV4, _BaseNet):
 
         if strict:
             if self.ip != self.network:
-                raise ValueError('%s has host bits set' %
-                                 self.ip)
+                raise ValueError("%s has host bits set" % self.ip)
         if self._prefixlen == (self._max_prefixlen - 1):
             self.iterhosts = self.__iter__
 
@@ -1386,9 +1407,9 @@ class _BaseV6(object):
 
     """
 
-    _ALL_ONES = (2**IPV6LENGTH) - 1
+    _ALL_ONES = (2 ** IPV6LENGTH) - 1
     _HEXTET_COUNT = 8
-    _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
+    _HEX_DIGITS = frozenset("0123456789ABCDEFabcdef")
 
     def __init__(self, address):
         self._version = 6
@@ -1407,17 +1428,17 @@ class _BaseV6(object):
             AddressValueError: if ip_str isn't a valid IPv6 Address.
 
         """
-        parts = ip_str.split(':')
+        parts = ip_str.split(":")
 
         # An IPv6 address needs at least 2 colons (3 parts).
         if len(parts) < 3:
             raise AddressValueError(ip_str)
 
         # If the address has an IPv4-style suffix, convert it to hexadecimal.
-        if '.' in parts[-1]:
+        if "." in parts[-1]:
             ipv4_int = IPv4Address(parts.pop())._ip
-            parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
-            parts.append('%x' % (ipv4_int & 0xFFFF))
+            parts.append("%x" % ((ipv4_int >> 16) & 0xFFFF))
+            parts.append("%x" % (ipv4_int & 0xFFFF))
 
         # An IPv6 address can't have more than 8 colons (9 parts).
         if len(parts) > self._HEXTET_COUNT + 1:
@@ -1426,9 +1447,9 @@ class _BaseV6(object):
         # Disregarding the endpoints, find '::' with nothing in between.
         # This indicates that a run of zeroes has been skipped.
         try:
-            skip_index, = (
-                [i for i in xrange(1, len(parts) - 1) if not parts[i]] or
-                [None])
+            (skip_index,) = [i for i in xrange(1, len(parts) - 1) if not parts[i]] or [
+                None
+            ]
         except ValueError:
             # Can't have more than one '::'
             raise AddressValueError(ip_str)
@@ -1517,7 +1538,7 @@ class _BaseV6(object):
         doublecolon_start = -1
         doublecolon_len = 0
         for index in range(len(hextets)):
-            if hextets[index] == '0':
+            if hextets[index] == "0":
                 doublecolon_len += 1
                 if doublecolon_start == -1:
                     # Start of a sequence of zeros.
@@ -1531,15 +1552,14 @@ class _BaseV6(object):
                 doublecolon_start = -1
 
         if best_doublecolon_len > 1:
-            best_doublecolon_end = (best_doublecolon_start +
-                                    best_doublecolon_len)
+            best_doublecolon_end = best_doublecolon_start + best_doublecolon_len
             # For zeros at the end of the address.
             if best_doublecolon_end == len(hextets):
-                hextets += ['']
-            hextets[best_doublecolon_start:best_doublecolon_end] = ['']
+                hextets += [""]
+            hextets[best_doublecolon_start:best_doublecolon_end] = [""]
             # For zeros at the beginning of the address.
             if best_doublecolon_start == 0:
-                hextets = [''] + hextets
+                hextets = [""] + hextets
 
         return hextets
 
@@ -1560,15 +1580,15 @@ class _BaseV6(object):
             ip_int = int(self._ip)
 
         if ip_int > self._ALL_ONES:
-            raise ValueError('IPv6 address is too large')
+            raise ValueError("IPv6 address is too large")
 
-        hex_str = '%032x' % ip_int
+        hex_str = "%032x" % ip_int
         hextets = []
         for x in range(0, 32, 4):
-            hextets.append('%x' % int(hex_str[x:x + 4], 16))
+            hextets.append("%x" % int(hex_str[x : x + 4], 16))
 
         hextets = self._compress_hextets(hextets)
-        return ':'.join(hextets)
+        return ":".join(hextets)
 
     def _explode_shorthand_ip_string(self):
         """Expand a shortened IPv6 address.
@@ -1588,12 +1608,12 @@ class _BaseV6(object):
         ip_int = self._ip_int_from_string(ip_str)
         parts = []
         for i in xrange(self._HEXTET_COUNT):
-            parts.append('%04x' % (ip_int & 0xFFFF))
+            parts.append("%04x" % (ip_int & 0xFFFF))
             ip_int >>= 16
         parts.reverse()
         if isinstance(self, _BaseNet):
-            return '%s/%d' % (':'.join(parts), self.prefixlen)
-        return ':'.join(parts)
+            return "%s/%d" % (":".join(parts), self.prefixlen)
+        return ":".join(parts)
 
     @property
     def max_prefixlen(self):
@@ -1617,7 +1637,7 @@ class _BaseV6(object):
             See RFC 2373 2.7 for details.
 
         """
-        return self in IPv6Network('ff00::/8')
+        return self in IPv6Network("ff00::/8")
 
     @property
     def is_reserved(self):
@@ -1628,21 +1648,23 @@ class _BaseV6(object):
             reserved IPv6 Network ranges.
 
         """
-        return (self in IPv6Network('::/8') or
-                self in IPv6Network('100::/8') or
-                self in IPv6Network('200::/7') or
-                self in IPv6Network('400::/6') or
-                self in IPv6Network('800::/5') or
-                self in IPv6Network('1000::/4') or
-                self in IPv6Network('4000::/3') or
-                self in IPv6Network('6000::/3') or
-                self in IPv6Network('8000::/3') or
-                self in IPv6Network('A000::/3') or
-                self in IPv6Network('C000::/3') or
-                self in IPv6Network('E000::/4') or
-                self in IPv6Network('F000::/5') or
-                self in IPv6Network('F800::/6') or
-                self in IPv6Network('FE00::/9'))
+        return (
+            self in IPv6Network("::/8")
+            or self in IPv6Network("100::/8")
+            or self in IPv6Network("200::/7")
+            or self in IPv6Network("400::/6")
+            or self in IPv6Network("800::/5")
+            or self in IPv6Network("1000::/4")
+            or self in IPv6Network("4000::/3")
+            or self in IPv6Network("6000::/3")
+            or self in IPv6Network("8000::/3")
+            or self in IPv6Network("A000::/3")
+            or self in IPv6Network("C000::/3")
+            or self in IPv6Network("E000::/4")
+            or self in IPv6Network("F000::/5")
+            or self in IPv6Network("F800::/6")
+            or self in IPv6Network("FE00::/9")
+        )
 
     @property
     def is_unspecified(self):
@@ -1653,7 +1675,7 @@ class _BaseV6(object):
             RFC 2373 2.5.2.
 
         """
-        return self._ip == 0 and getattr(self, '_prefixlen', 128) == 128
+        return self._ip == 0 and getattr(self, "_prefixlen", 128) == 128
 
     @property
     def is_loopback(self):
@@ -1664,7 +1686,7 @@ class _BaseV6(object):
             RFC 2373 2.5.3.
 
         """
-        return self._ip == 1 and getattr(self, '_prefixlen', 128) == 128
+        return self._ip == 1 and getattr(self, "_prefixlen", 128) == 128
 
     @property
     def is_link_local(self):
@@ -1674,7 +1696,7 @@ class _BaseV6(object):
             A boolean, True if the address is reserved per RFC 4291.
 
         """
-        return self in IPv6Network('fe80::/10')
+        return self in IPv6Network("fe80::/10")
 
     @property
     def is_site_local(self):
@@ -1688,7 +1710,7 @@ class _BaseV6(object):
             A boolean, True if the address is reserved per RFC 3513 2.5.6.
 
         """
-        return self in IPv6Network('fec0::/10')
+        return self in IPv6Network("fec0::/10")
 
     @property
     def is_private(self):
@@ -1698,7 +1720,7 @@ class _BaseV6(object):
             A boolean, True if the address is reserved per RFC 4193.
 
         """
-        return self in IPv6Network('fc00::/7')
+        return self in IPv6Network("fc00::/7")
 
     @property
     def ipv4_mapped(self):
@@ -1725,8 +1747,10 @@ class _BaseV6(object):
         """
         if (self._ip >> 96) != 0x20010000:
             return None
-        return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
-                IPv4Address(~self._ip & 0xFFFFFFFF))
+        return (
+            IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+            IPv4Address(~self._ip & 0xFFFFFFFF),
+        )
 
     @property
     def sixtofour(self):
@@ -1776,7 +1800,7 @@ class IPv6Address(_BaseV6, _BaseIP):
         # Constructing from a packed address
         if isinstance(address, Bytes):
             try:
-                hi, lo = struct.unpack('!QQ', address)
+                hi, lo = struct.unpack("!QQ", address)
             except struct.error:
                 raise AddressValueError(address)  # Wrong length.
             self._ip = (hi << 64) | lo
@@ -1786,7 +1810,7 @@ class IPv6Address(_BaseV6, _BaseIP):
         # which converts into a formatted IP string.
         addr_str = str(address)
         if not addr_str:
-            raise AddressValueError('')
+            raise AddressValueError("")
 
         self._ip = self._ip_int_from_string(addr_str)
 
@@ -1850,7 +1874,7 @@ class IPv6Network(_BaseV6, _BaseNet):
 
         # Assume input argument to be string or any object representation
         # which converts into a formatted IP prefix string.
-        addr = str(address).split('/')
+        addr = str(address).split("/")
 
         if len(addr) > 2:
             raise AddressValueError(address)
@@ -1868,8 +1892,7 @@ class IPv6Network(_BaseV6, _BaseNet):
 
         if strict:
             if self.ip != self.network:
-                raise ValueError('%s has host bits set' %
-                                 self.ip)
+                raise ValueError("%s has host bits set" % self.ip)
         if self._prefixlen == (self._max_prefixlen - 1):
             self.iterhosts = self.__iter__
 
index 817d8f7f1f9dede661989f6ff5507080bf71adf9..fb2b6fe75f7610774a72e0d0c041fb59313cbd73 100644 (file)
@@ -27,19 +27,19 @@ class BgpRpcClient(object):
 
     def exa_get_received_open_count(self):
         """Gets open messages counter."""
-        return self._exa_get_counter('open')
+        return self._exa_get_counter("open")
 
     def exa_get_received_keepalive_count(self):
         """Gets keepalive messages counter."""
-        return self._exa_get_counter('keepalive')
+        return self._exa_get_counter("keepalive")
 
     def exa_get_received_update_count(self):
         """Gets update messges counter."""
-        return self._exa_get_counter('update')
+        return self._exa_get_counter("update")
 
     def exa_get_received_route_refresh_count(self):
         """Gets route refresh message counter."""
-        return self._exa_get_counter('route_refresh')
+        return self._exa_get_counter("route_refresh")
 
     def _exa_clean_counter(self, msg_type):
         """Cleans counter on the server of given message type."""
@@ -47,19 +47,19 @@ class BgpRpcClient(object):
 
     def exa_clean_received_open_count(self):
         """Cleans open message counter."""
-        return self._exa_clean_counter('open')
+        return self._exa_clean_counter("open")
 
     def exa_clean_received_keepalive_count(self):
         """Cleans keepalive message counter."""
-        return self._exa_clean_counter('keepalive')
+        return self._exa_clean_counter("keepalive")
 
     def exa_clean_received_update_count(self):
         """Cleans update message counter."""
-        return self._exa_clean_counter('update')
+        return self._exa_clean_counter("update")
 
     def exa_clean_received_route_refresh_count(self):
         """Cleans route refresh message counter."""
-        return self._exa_clean_counter('route_refresh')
+        return self._exa_clean_counter("route_refresh")
 
     def _exa_clean_message(self, msg_type):
         """Cleans stored message on the server of given message type."""
@@ -67,7 +67,7 @@ class BgpRpcClient(object):
 
     def exa_clean_update_message(self):
         """Cleans update message."""
-        return self._exa_clean_message('update')
+        return self._exa_clean_message("update")
 
     def _exa_get_message(self, msg_type):
         """Gets stored message on the server of given message type."""
@@ -80,24 +80,26 @@ class BgpRpcClient(object):
         timestamp, ...). msg_only is a flag that we want just message content
         and no details.
         """
-        msg = self._exa_get_message('update')
+        msg = self._exa_get_message("update")
         if not msg_only:
             return msg
-        return msg if 'neighbor' not in msg else msg['neighbor']['message']
+        return msg if "neighbor" not in msg else msg["neighbor"]["message"]
 
     def play_send(self, hexstring):
         """Sends given hex data, already encoded bgp update message is expected."""
         return self.proxy.send(hexstring.rstrip())
 
-    def play_get(self, what='update'):
+    def play_get(self, what="update"):
         """Gets the last received (update) mesage as hex string."""
         return self.proxy.get(what)
 
-    def play_clean(self, what='update'):
+    def play_clean(self, what="update"):
         """Cleans the message (update) on the server."""
         return self.proxy.clean(what)
 
     def sum_hex_message(self, hex_string):
         """Verifies two hex messages are equal even in case, their arguments are misplaced.
         Converts hex message arguments to integers and sums them up and returns the sum."""
-        return sum([int(x, 16) for x in re.compile('[a-f\d]{2}').findall(hex_string[32:])])
+        return sum(
+            [int(x, 16) for x in re.compile("[a-f\d]{2}").findall(hex_string[32:])]
+        )
index 89a123a9e72c505ace4f4a4b0b73804f58c9d05b..0a1e63ee7dacaeac27046f3626427700860469bc 100644 (file)
@@ -14,13 +14,35 @@ class CapwapLibrary(object):
     def __init__(self):
         self.builtin = BuiltIn()
 
-    def send_discover(self, ac_ip, wtp_ip='', ip='ip', port=5246):
+    def send_discover(self, ac_ip, wtp_ip="", ip="ip", port=5246):
         """Send Discover CAPWAP Packet from a WTP."""
-        data = ''.join(chr(x) for x in [0x00, 0x20, 0x01, 0x02, 0x03, 0x04, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
-        self.builtin.log('Sending Discover Packet to: %s' % ac_ip, 'DEBUG')
+        data = "".join(
+            chr(x)
+            for x in [
+                0x00,
+                0x20,
+                0x01,
+                0x02,
+                0x03,
+                0x04,
+                5,
+                6,
+                7,
+                8,
+                9,
+                10,
+                11,
+                12,
+                13,
+                14,
+                15,
+                16,
+            ]
+        )
+        self.builtin.log("Sending Discover Packet to: %s" % ac_ip, "DEBUG")
         session = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
         session.sendto(data, (ac_ip, port))
-        self.builtin.log('Packet Sent', 'DEBUG')
+        self.builtin.log("Packet Sent", "DEBUG")
 
     def get_hostip(self):
         """Get Host IP Address."""
@@ -29,7 +51,7 @@ class CapwapLibrary(object):
 
     def get_simulated_wtpip(self, controller):
         """Get the Simulated WTP ip based on the controller."""
-        if controller == '127.0.0.1':
+        if controller == "127.0.0.1":
             exp_ip = controller
         else:
             exp_ip = self.get_hostip()
index 91d82e1dfde7bf06d09b12b8384fa85d9488ade9..54fc0839925358916df265bbdfd4f418b117ae99 100644 (file)
@@ -11,7 +11,9 @@ __license__ = "New-style BSD"
 __email__ = "syedbahm@cisco.com"
 
 
-def getClusterRoles(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesInSecs=3, port=8181, *ips):
+def getClusterRoles(
+    shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesInSecs=3, port=8181, *ips
+):
     """Given a shardname (e.g. shard-inventory-config), number of shards and bunch of ips
 
     determines what role each ip has in an Akka (Raft based) cluster
@@ -26,13 +28,17 @@ def getClusterRoles(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesI
         while i <= numOfShards:
             shardMemberName = "member-" + str(i) + "-" + shardName
             j = 1
-            print('j => ', str(j))
-            print('numOfTries => ', str(numOfTries))
+            print("j => ", str(j))
+            print("numOfTries => ", str(numOfTries))
             while int(j) <= int(numOfTries):
                 print("Try number " + str(j))
                 try:
-                    print("getting role of " + ip + "  for shardName = " + shardMemberName)
-                    url = SettingsLibrary.getJolokiaURL(ip, str(port), str(i), shardName)
+                    print(
+                        "getting role of " + ip + "  for shardName = " + shardMemberName
+                    )
+                    url = SettingsLibrary.getJolokiaURL(
+                        ip, str(port), str(i), shardName
+                    )
                     print(url)
                     resp = UtilLibrary.get(url)
                     print(resp)
@@ -41,14 +47,20 @@ def getClusterRoles(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesI
                         continue
                     print(resp.text)
                     data = json.loads(resp.text)
-                    if 'value' in data:
-                        dataValue = data['value']
-                        print("datavalue RaftState is", dataValue['RaftState'])
-                        dict[ip] = dataValue['RaftState']
+                    if "value" in data:
+                        dataValue = data["value"]
+                        print("datavalue RaftState is", dataValue["RaftState"])
+                        dict[ip] = dataValue["RaftState"]
                 except Exception:
                     e = sys.exc_info()[0]
-                    print("Try" + str(j) + ":An error occurred when finding leader on" + ip +
-                          " for shardName:" + shardMemberName)
+                    print(
+                        "Try"
+                        + str(j)
+                        + ":An error occurred when finding leader on"
+                        + ip
+                        + " for shardName:"
+                        + shardMemberName
+                    )
                     print(e)
                     sleep(sleepBetweenRetriesInSecs)
                     continue
@@ -60,48 +72,68 @@ def getClusterRoles(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesI
     return dict
 
 
-def isRole(role, shardName, ipAddress, numOfShards=3, numOfRetries=1, sleepFor=3, port=8181):
+def isRole(
+    role, shardName, ipAddress, numOfShards=3, numOfRetries=1, sleepFor=3, port=8181
+):
     """Given a role (Leader, Follower, Candidate, or IsolatedLeader),
     shardname (e.g. shard-inventory-config), controller IP address,
     and number of shards on the controller,this function determines if the controller,
     has that role for the specified shard.
     """
-    ip = getClusterRoles(shardName, numOfShards, numOfRetries, sleepFor, port, ipAddress)
+    ip = getClusterRoles(
+        shardName, numOfShards, numOfRetries, sleepFor, port, ipAddress
+    )
     print(ip)
     if ip[ipAddress] == role:
         return True
     return False
 
 
-def getLeader(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesInSecs=1, port=8181, *ips):
+def getLeader(
+    shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesInSecs=1, port=8181, *ips
+):
     """Returns the leader of the shard given a set of IPs Or None"""
     for i in range(3):  # Try 3 times to find a leader
-        dict = getClusterRoles(shardName, numOfShards, numOfTries, sleepBetweenRetriesInSecs, port, *ips)
+        dict = getClusterRoles(
+            shardName, numOfShards, numOfTries, sleepBetweenRetriesInSecs, port, *ips
+        )
         for ip in dict.keys():
-            if dict[ip] == 'Leader':
+            if dict[ip] == "Leader":
                 return ip
     return None
 
 
-def getFollowers(shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesInSecs=1, port=8181, *ips):
+def getFollowers(
+    shardName, numOfShards=3, numOfTries=3, sleepBetweenRetriesInSecs=1, port=8181, *ips
+):
     """Returns the follower list of a shard given a set of IPs Or []"""
     for i in range(6):  # Try 6 times to find all followers
-        dict = getClusterRoles(shardName, numOfShards, numOfTries, sleepBetweenRetriesInSecs, port, *ips)
+        dict = getClusterRoles(
+            shardName, numOfShards, numOfTries, sleepBetweenRetriesInSecs, port, *ips
+        )
         result = []
 
         for ip in dict.keys():
-            if dict[ip] == 'Follower':
+            if dict[ip] == "Follower":
                 result.append(ip)
         print("i=%s result=%s" % (i, result))
-        if (len(result) == (len(ips) - 1)):
+        if len(result) == (len(ips) - 1):
             break
         sleep(1)
     return result
 
 
 def testGetClusterRoles():
-    dict = getClusterRoles("shard-inventory-config", 3, 1, 1, 8181,
-                           "10.194.126.116", "10.194.126.117", "10.194.126.118")
+    dict = getClusterRoles(
+        "shard-inventory-config",
+        3,
+        1,
+        1,
+        8181,
+        "10.194.126.116",
+        "10.194.126.117",
+        "10.194.126.118",
+    )
     print(dict)
 
     for ip in dict.keys():
@@ -114,18 +146,35 @@ def testGetClusterRoles():
 
 
 def testGetLeader():
-    leader = getLeader("shard-inventory-config", 3, 1, 1, 8181,
-                       "10.194.126.116", "10.194.126.117", "10.194.126.118")
+    leader = getLeader(
+        "shard-inventory-config",
+        3,
+        1,
+        1,
+        8181,
+        "10.194.126.116",
+        "10.194.126.117",
+        "10.194.126.118",
+    )
     print(leader)
     return leader
 
 
 def testGetFollowers():
-    followers = getFollowers("shard-inventory-config", 3, 1, 1, 8181,
-                             "10.194.126.116", "10.194.126.117", "10.194.126.118")
+    followers = getFollowers(
+        "shard-inventory-config",
+        3,
+        1,
+        1,
+        8181,
+        "10.194.126.116",
+        "10.194.126.117",
+        "10.194.126.118",
+    )
     print(followers)
     return followers
 
+
 # testGetClusterRoles()
 # testGetLeader()
 # testGetFollowers()
index eb27b9d358641aa6303531af798ecff251a593c0..448815406e7188653d9a90799a9fb6896e5b8d74 100644 (file)
@@ -5,9 +5,9 @@ Updated: 2013-11-14
 """
 import collections
 
-'''
+"""
 Common constants and functions for the robot framework.
-'''
+"""
 
 
 def collection_should_contain(collection, *members):
@@ -28,11 +28,11 @@ def combine_strings(*strings):
     Combines the given `strings` together and returns the result.
     The given strings are not altered by this keyword.
     """
-    result = ''
+    result = ""
     for s in strings:
         if isinstance(s, str) or isinstance(s, str):
             result += s
-    if result == '':
+    if result == "":
         return None
     else:
         return result
@@ -45,40 +45,40 @@ def compare_xml(xml1, xml2):
     It just split the xml in to lines and just check the line is in
     the other file
     """
-    for line in xml1.rstrip().split('\n'):
-        if line not in xml2.rstrip().split('\n'):
+    for line in xml1.rstrip().split("\n"):
+        if line not in xml2.rstrip().split("\n"):
             return False
 
-    for line in xml2.rstrip().split('\n'):
-        if line not in xml1.rstrip().split('\n'):
+    for line in xml2.rstrip().split("\n"):
+        if line not in xml1.rstrip().split("\n"):
             return False
 
     return True
 
 
 def num_of_nodes(depth, fanout):
-    '''returns num of switches of a mininet with tree topology
+    """returns num of switches of a mininet with tree topology
     with particular depth and fanout parameters
-    '''
+    """
     result = 0
     for i in range(depth):
-        result += fanout**i
+        result += fanout ** i
     return result
 
 
 def num_of_links_for_node(nodeid, leaflist, fanout):
-    '''
+    """
     If the given node is a leaf node, there will be an only one link for it
     and nodeid will be represented 2 times in topology
     If the given node is not a leaf node, then there will be fanout+1 links
     for it and nodeid will be represented (fanout+1)*2 times in topology
 
     p.s. root node is excluded.
-    '''
+    """
     if nodeid in leaflist:
         return 1
-    return (fanout + 1)
+    return fanout + 1
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     print((num_of_nodes(3, 4)))
index d97bbdb77178a47f2e74370876c254d2d0bac2f8..a181314f02acf2c7d40b25d12432ab398801065d 100644 (file)
@@ -21,7 +21,7 @@ def _parse_input(file_name):
     return pyhocon.ConfigFactory.parse_file(file_name)
 
 
-def generate_akka(original_file, node_idx=1, nodes_ip_list=['127.0.0.1']):
+def generate_akka(original_file, node_idx=1, nodes_ip_list=["127.0.0.1"]):
     """Generates akka.conf content.
 
     Args:
@@ -35,14 +35,21 @@ def generate_akka(original_file, node_idx=1, nodes_ip_list=['127.0.0.1']):
     """
 
     conf = _parse_input(original_file)
-    conf['odl-cluster-data']['akka']['remote']['netty']['tcp']['hostname'] = nodes_ip_list[node_idx - 1]
-    seed_nodes = [u'akka.tcp://opendaylight-cluster-data@{}:2550'.format(ip) for ip in nodes_ip_list]
-    conf['odl-cluster-data']['akka']['cluster']['seed-nodes'] = seed_nodes
-    conf['odl-cluster-data']['akka']['cluster']['roles'] = ["member-{}".format(node_idx)]
+    conf["odl-cluster-data"]["akka"]["remote"]["netty"]["tcp"][
+        "hostname"
+    ] = nodes_ip_list[node_idx - 1]
+    seed_nodes = [
+        u"akka.tcp://opendaylight-cluster-data@{}:2550".format(ip)
+        for ip in nodes_ip_list
+    ]
+    conf["odl-cluster-data"]["akka"]["cluster"]["seed-nodes"] = seed_nodes
+    conf["odl-cluster-data"]["akka"]["cluster"]["roles"] = [
+        "member-{}".format(node_idx)
+    ]
     return pyhocon.tool.HOCONConverter.to_hocon(conf)
 
 
-def generate_modules(original_file, name='', namespace=''):
+def generate_modules(original_file, name="", namespace=""):
     """Generates modules.conf content.
 
     If name and namespace parameters are filled, exactly one module item is added to the content of orginal file.
@@ -59,13 +66,16 @@ def generate_modules(original_file, name='', namespace=''):
         :returns str: modules.conf content
     """
     conf = _parse_input(original_file)
-    if name != '' and namespace != '':
-        conf['modules'].append(
-            pyhocon.ConfigTree([("name", name), ("namespace", namespace), ("shard-strategy", "module")]))
+    if name != "" and namespace != "":
+        conf["modules"].append(
+            pyhocon.ConfigTree(
+                [("name", name), ("namespace", namespace), ("shard-strategy", "module")]
+            )
+        )
     return pyhocon.tool.HOCONConverter.to_hocon(conf)
 
 
-def generate_module_shards(original_file, nodes=1, shard_name='', replicas=[]):
+def generate_module_shards(original_file, nodes=1, shard_name="", replicas=[]):
     """Generates module-shards.conf content.
 
     If shard_name and replicas parameters are filled, exactly one shard item is added to the content of orginal file.
@@ -83,12 +93,30 @@ def generate_module_shards(original_file, nodes=1, shard_name='', replicas=[]):
         :returns str: module-shards.conf content
     """
     conf = _parse_input(original_file)
-    for module_shard in conf['module-shards']:
-        module_shard["shards"][0]["replicas"] = ["member-{}".format(i + 1) for i in range(int(nodes))]
-    if shard_name != '' and replicas != []:
-        conf['module-shards'].append(
-            pyhocon.ConfigTree([("name", shard_name),
-                                ("shards", [pyhocon.ConfigTree(
-                                    [("name", shard_name),
-                                     ("replicas", ["member-{}".format(i) for i in replicas])])])]))
+    for module_shard in conf["module-shards"]:
+        module_shard["shards"][0]["replicas"] = [
+            "member-{}".format(i + 1) for i in range(int(nodes))
+        ]
+    if shard_name != "" and replicas != []:
+        conf["module-shards"].append(
+            pyhocon.ConfigTree(
+                [
+                    ("name", shard_name),
+                    (
+                        "shards",
+                        [
+                            pyhocon.ConfigTree(
+                                [
+                                    ("name", shard_name),
+                                    (
+                                        "replicas",
+                                        ["member-{}".format(i) for i in replicas],
+                                    ),
+                                ]
+                            )
+                        ],
+                    ),
+                ]
+            )
+        )
     return pyhocon.tool.HOCONConverter.to_hocon(conf)
index 4f327ee3fd1ce7d053ff5d3bbf40498c6ace52bc..b43382cf16cc339f8b809c5f1e5b13538898e6fa 100644 (file)
@@ -4,17 +4,17 @@ from itertools import repeat, ifilter
 
 
 class Counter(dict):
-    '''Dict subclass for counting hashable objects.  Sometimes called a bag
+    """Dict subclass for counting hashable objects.  Sometimes called a bag
     or multiset.  Elements are stored as dictionary keys and their counts
     are stored as dictionary values.
 
     >>> Counter('zyzygy')
     Counter({'y': 3, 'z': 2, 'g': 1})
 
-    '''
+    """
 
     def __init__(self, iterable=None, **kwds):
-        '''Create a new, empty Counter object.  And if given, count elements
+        """Create a new, empty Counter object.  And if given, count elements
         from an input iterable.  Or, initialize the count from another mapping
         of elements to their counts.
 
@@ -23,26 +23,26 @@ class Counter(dict):
         >>> c = Counter({'a': 4, 'b': 2})           # a new counter from a mapping
         >>> c = Counter(a=4, b=2)                   # a new counter from keyword args
 
-        '''
+        """
         self.update(iterable, **kwds)
 
     def __missing__(self, key):
         return 0
 
     def most_common(self, n=None):
-        '''List the n most common elements and their counts from the most
+        """List the n most common elements and their counts from the most
         common to the least.  If n is None, then list all element counts.
 
         >>> Counter('abracadabra').most_common(3)
         [('a', 5), ('r', 2), ('b', 2)]
 
-        '''
+        """
         if n is None:
             return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
         return nlargest(n, self.iteritems(), key=itemgetter(1))
 
     def elements(self):
-        '''Iterator over elements repeating each as many times as its count.
+        """Iterator over elements repeating each as many times as its count.
 
         >>> c = Counter('ABCABC')
         >>> sorted(c.elements())
@@ -51,7 +51,7 @@ class Counter(dict):
         If an element's count has been set to zero or is a negative number,
         elements() will ignore it.
 
-        '''
+        """
         for elem, count in self.iteritems():
             for _ in repeat(None, count):
                 yield elem
@@ -61,10 +61,11 @@ class Counter(dict):
     @classmethod
     def fromkeys(cls, iterable, v=None):
         raise NotImplementedError(
-            'Counter.fromkeys() is undefined.  Use Counter(iterable) instead.')
+            "Counter.fromkeys() is undefined.  Use Counter(iterable) instead."
+        )
 
     def update(self, iterable=None, **kwds):
-        '''Like dict.update() but add counts instead of replacing them.
+        """Like dict.update() but add counts instead of replacing them.
 
         Source can be an iterable, a dictionary, or another Counter instance.
 
@@ -75,9 +76,9 @@ class Counter(dict):
         >>> c['h']                      # four 'h' in which, witch, and watch
         4
 
-        '''
+        """
         if iterable is not None:
-            if hasattr(iterable, 'iteritems'):
+            if hasattr(iterable, "iteritems"):
                 if self:
                     self_get = self.get
                     for elem, count in iterable.iteritems():
@@ -92,19 +93,19 @@ class Counter(dict):
             self.update(kwds)
 
     def copy(self):
-        'Like dict.copy() but returns a Counter instance instead of a dict.'
+        "Like dict.copy() but returns a Counter instance instead of a dict."
         return Counter(self)
 
     def __delitem__(self, elem):
-        'Like dict.__delitem__() but does not raise KeyError for missing values.'
+        "Like dict.__delitem__() but does not raise KeyError for missing values."
         if elem in self:
             dict.__delitem__(self, elem)
 
     def __repr__(self):
         if not self:
-            return '%s()' % self.__class__.__name__
-        items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
-        return '%s({%s})' % (self.__class__.__name__, items)
+            return "%s()" % self.__class__.__name__
+        items = ", ".join(map("%r: %r".__mod__, self.most_common()))
+        return "%s({%s})" % (self.__class__.__name__, items)
 
     # Multiset-style mathematical operations discussed in:
     #       Knuth TAOCP Volume II section 4.6.3 exercise 19
@@ -116,13 +117,13 @@ class Counter(dict):
     #       c += Counter()
 
     def __add__(self, other):
-        '''Add counts from two counters.
+        """Add counts from two counters.
 
         >>> Counter('abbb') + Counter('bcc')
         Counter({'b': 4, 'c': 2, 'a': 1})
 
 
-        '''
+        """
         if not isinstance(other, Counter):
             return NotImplemented
         result = Counter()
@@ -133,12 +134,12 @@ class Counter(dict):
         return result
 
     def __sub__(self, other):
-        ''' Subtract count, but keep only results with positive counts.
+        """ Subtract count, but keep only results with positive counts.
 
         >>> Counter('abbbc') - Counter('bccd')
         Counter({'b': 2, 'a': 1})
 
-        '''
+        """
         if not isinstance(other, Counter):
             return NotImplemented
         result = Counter()
@@ -149,12 +150,12 @@ class Counter(dict):
         return result
 
     def __or__(self, other):
-        '''Union is the maximum of value in either of the input counters.
+        """Union is the maximum of value in either of the input counters.
 
         >>> Counter('abbb') | Counter('bcc')
         Counter({'b': 3, 'c': 2, 'a': 1})
 
-        '''
+        """
         if not isinstance(other, Counter):
             return NotImplemented
         _max = max
@@ -166,12 +167,12 @@ class Counter(dict):
         return result
 
     def __and__(self, other):
-        ''' Intersection is the minimum of corresponding counts.
+        """ Intersection is the minimum of corresponding counts.
 
         >>> Counter('abbb') & Counter('bcc')
         Counter({'b': 1})
 
-        '''
+        """
         if not isinstance(other, Counter):
             return NotImplemented
         _min = min
@@ -185,6 +186,7 @@ class Counter(dict):
         return result
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     import doctest
+
     print(doctest.testmod())
index baaf5f4547f30e12bdc45e503e3993a226607636..0f80253a3a5c81ac80c56b9cd082a521e4c8376a 100644 (file)
@@ -15,12 +15,17 @@ def initCar(hostname, port):
     x = 0
     strId = str(x)
     payload = SettingsLibrary.add_car_init_payload_template.substitute(
-        id=strId, category="category" + strId, model="model" + strId,
+        id=strId,
+        category="category" + strId,
+        model="model" + strId,
         manufacturer="manufacturer" + strId,
-        year=(2000 + x % 100))
+        year=(2000 + x % 100),
+    )
     print("Initialization payload=")
     print(payload)
-    resp = UtilLibrary.post(SettingsLibrary.getAddCarInitUrl(hostname, port), "admin", "admin", payload)
+    resp = UtilLibrary.post(
+        SettingsLibrary.getAddCarInitUrl(hostname, port), "admin", "admin", payload
+    )
     print("the response of the POST to add car=")
     print(resp)
     return resp
@@ -31,19 +36,27 @@ def addCar(hostname, port, numberOfCars, *expected):
     for x in range(1, numberOfCars + 1):
         strId = str(x)
         payload = SettingsLibrary.add_car_payload_template.substitute(
-            id=strId, category="category" + strId, model="model" + strId,
+            id=strId,
+            category="category" + strId,
+            model="model" + strId,
             manufacturer="manufacturer" + strId,
-            year=(2000 + x % 100))
+            year=(2000 + x % 100),
+        )
         print("payload formed after template substitution=")
         print(payload)
         # Send the POST request
-        resp = UtilLibrary.post(SettingsLibrary.getAddCarUrl(hostname, port), "admin", "admin", payload)
+        resp = UtilLibrary.post(
+            SettingsLibrary.getAddCarUrl(hostname, port), "admin", "admin", payload
+        )
 
         print("the response of the POST to add car=")
         print(resp)
         if expected and str(resp.status_code) not in expected:
-            raise RuntimeError('Add car failed for {}:{} with status {}'.
-                               format(hostname, port, resp.status_code))
+            raise RuntimeError(
+                "Add car failed for {}:{} with status {}".format(
+                    hostname, port, resp.status_code
+                )
+            )
 
     return resp
 
@@ -57,19 +70,24 @@ def addPerson(hostname, port, numberOfPersons, *expected):
     </note>
     """
     # FOR RPC TO WORK PROPERLY THE FIRST ENTRY SHOULD BE VIA RESTCONF
-    if (numberOfPersons == 0):
+    if numberOfPersons == 0:
         strId = str(numberOfPersons)
         payload = SettingsLibrary.add_person_payload_template.substitute(
-            personId="user" + strId, gender="unknown", age=0,
+            personId="user" + strId,
+            gender="unknown",
+            age=0,
             address=strId + "Way, Some Country, Some Zip  " + strId,
-            contactNo="some number" + strId)
+            contactNo="some number" + strId,
+        )
         # Send the POST request using RESTCONF
-        resp = UtilLibrary.nonprintpost(SettingsLibrary.getAddPersonUrl(hostname, port), "admin", "admin", payload)
+        resp = UtilLibrary.nonprintpost(
+            SettingsLibrary.getAddPersonUrl(hostname, port), "admin", "admin", payload
+        )
         return resp
 
     genderToggle = "Male"
     for x in range(1, numberOfPersons + 1):
-        if(genderToggle == "Male"):
+        if genderToggle == "Male":
             genderToggle = "Female"
         else:
             genderToggle = "Male"
@@ -77,19 +95,30 @@ def addPerson(hostname, port, numberOfPersons, *expected):
         strId = str(x)
 
         payload = SettingsLibrary.add_person_rpc_payload_template.substitute(
-            personId="user" + strId, gender=genderToggle, age=(20 + x % 100),
+            personId="user" + strId,
+            gender=genderToggle,
+            age=(20 + x % 100),
             address=strId + "Way, Some Country, Some Zip  " + str(x % 1000),
-            contactNo="some number" + strId)
+            contactNo="some number" + strId,
+        )
         # Send the POST request using RPC
-        resp = UtilLibrary.post(SettingsLibrary.getAddPersonRpcUrl(hostname, port), "admin", "admin", payload)
+        resp = UtilLibrary.post(
+            SettingsLibrary.getAddPersonRpcUrl(hostname, port),
+            "admin",
+            "admin",
+            payload,
+        )
 
         print("payload formed after template substitution=")
         print(payload)
         print("the response of the POST to add person=")
         print(resp)
         if expected and str(resp.status_code) not in expected:
-            raise RuntimeError('Add person failed for {}:{} with status {}'.
-                               format(hostname, port, resp.status_code))
+            raise RuntimeError(
+                "Add person failed for {}:{} with status {}".format(
+                    hostname, port, resp.status_code
+                )
+            )
 
     return resp
 
@@ -105,21 +134,34 @@ def addCarPerson(hostname, port, numberOfCarPersons):
     </note>
     """
     # FOR RPC TO WORK PROPERLY THE FIRST ENTRY SHOULD BE VIA RESTCONF
-    if (numberOfCarPersons == 0):
+    if numberOfCarPersons == 0:
         payload = SettingsLibrary.add_car_person_template.substitute(
-            Id=str(numberOfCarPersons), personId="user" + str(numberOfCarPersons))
+            Id=str(numberOfCarPersons), personId="user" + str(numberOfCarPersons)
+        )
         # Send the POST request REST CONF
-        resp = UtilLibrary.nonprintpost(SettingsLibrary.getAddCarPersonUrl(hostname, port), "admin", "admin", payload)
+        resp = UtilLibrary.nonprintpost(
+            SettingsLibrary.getAddCarPersonUrl(hostname, port),
+            "admin",
+            "admin",
+            payload,
+        )
 
         return resp
 
     for x in range(1, numberOfCarPersons + 1):
         strId = str(x)
 
-        payload = SettingsLibrary.add_car_person_template.substitute(Id=strId, personId="user" + strId)
+        payload = SettingsLibrary.add_car_person_template.substitute(
+            Id=strId, personId="user" + strId
+        )
 
         # Send the POST request REST CONF
-        resp = UtilLibrary.post(SettingsLibrary.getAddCarPersonUrl(hostname, port), "admin", "admin", payload)
+        resp = UtilLibrary.post(
+            SettingsLibrary.getAddCarPersonUrl(hostname, port),
+            "admin",
+            "admin",
+            payload,
+        )
 
         print("payload formed after template substitution=")
         print(payload)
@@ -146,23 +188,30 @@ def buyCar(hostname, port, numberOfCarBuyers, start=0):
     for x in range(start, start + numberOfCarBuyers):
         strId = str(x + 1)
 
-        payload = SettingsLibrary.buy_car_rpc_template.substitute(personId="user" + strId, carId=strId)
+        payload = SettingsLibrary.buy_car_rpc_template.substitute(
+            personId="user" + strId, carId=strId
+        )
 
         # Send the POST request using RPC
-        resp = UtilLibrary.post(SettingsLibrary.getBuyCarRpcUrl(hostname, port), "admin", "admin", payload)
+        resp = UtilLibrary.post(
+            SettingsLibrary.getBuyCarRpcUrl(hostname, port), "admin", "admin", payload
+        )
 
         print(resp)
         print(resp.text)
 
-        if (resp.status_code != 200):
-            raise RuntimeError('Buy car failed for {}:{} with status {}'.
-                               format(hostname, port, resp.status_code))
+        if resp.status_code != 200:
+            raise RuntimeError(
+                "Buy car failed for {}:{} with status {}".format(
+                    hostname, port, resp.status_code
+                )
+            )
 
 
 def getCars(hostname, port, ignore):
     """Uses the GET on car:cars resource to get all cars in the store using RESTCONF"""
     resp = UtilLibrary.get(SettingsLibrary.getCarsUrl(hostname, port), "admin", "admin")
-    resp.encoding = 'utf-8'
+    resp.encoding = "utf-8"
     print(resp.text)
     return resp
 
@@ -175,8 +224,10 @@ def getPersons(hostname, port, ignore):
         with personId being user0
     </note>
     """
-    resp = UtilLibrary.get(SettingsLibrary.getPersonsUrl(hostname, port), "admin", "admin")
-    resp.encoding = 'utf-8'
+    resp = UtilLibrary.get(
+        SettingsLibrary.getPersonsUrl(hostname, port), "admin", "admin"
+    )
+    resp.encoding = "utf-8"
     print(resp.text)
     return resp
 
@@ -190,8 +241,10 @@ def getCarPersonMappings(hostname, port, ignore):
         with personId being user0
     </note>
     """
-    resp = UtilLibrary.get(SettingsLibrary.getCarPersonUrl(hostname, port), "admin", "admin")
-    resp.encoding = 'utf-8'
+    resp = UtilLibrary.get(
+        SettingsLibrary.getCarPersonUrl(hostname, port), "admin", "admin"
+    )
+    resp.encoding = "utf-8"
     print(resp)
 
     return resp
@@ -213,7 +266,9 @@ def deleteAllPersons(hostname, port, ignore):
 
 def deleteAllCarsPersons(hostname, port, ignore):
     """delete all car -poeple s in the store using RESTCONF"""
-    UtilLibrary.delete(SettingsLibrary.getCarPersonUrl(hostname, port), "admin", "admin")
+    UtilLibrary.delete(
+        SettingsLibrary.getCarPersonUrl(hostname, port), "admin", "admin"
+    )
     resp = getPersons(hostname, port, ignore)
     print("Persons in store after deletion:" + str(resp))
 
@@ -273,27 +328,33 @@ def testlongevity(inputtime, port, *ips):
                     print("Pass: car person data matches")
             else:
                 print("Fail: car person addition failed")
-            time.sleep(60)    # sleep before next host starts working
+            time.sleep(60)  # sleep before next host starts working
 
 
 #
 # Usage message shown to user
 #
 
-def options():
 
-    command = 'ac=Add Car\n\t\tap=Add Person \n\t\tbc=Buy Car\n\t\tgc=Get Cars\n\t\tgp=Get Persons\n\t\t' \
-              'gcp=Get Car-Person Mappings\n\t\tdc=Delete All Cars\n\t\tdp=Delete All Persons)'
+def options():
 
-    param = '\n\t<param> is\n\t\t' \
-            'number of cars to be added if <command>=ac\n\t\t' \
-            'number of persons to be added if <command>=ap\n\t\t' \
-            'number of car buyers if <command>=bc\n\t\t'\
-            'pass 0 if <command>=gc or gp or gcp or dc or dp'\
+    command = (
+        "ac=Add Car\n\t\tap=Add Person \n\t\tbc=Buy Car\n\t\tgc=Get Cars\n\t\tgp=Get Persons\n\t\t"
+        "gcp=Get Car-Person Mappings\n\t\tdc=Delete All Cars\n\t\tdp=Delete All Persons)"
+    )
 
+    param = (
+        "\n\t<param> is\n\t\t"
+        "number of cars to be added if <command>=ac\n\t\t"
+        "number of persons to be added if <command>=ap\n\t\t"
+        "number of car buyers if <command>=bc\n\t\t"
+        "pass 0 if <command>=gc or gp or gcp or dc or dp"
+    )
 
-    usageString = 'usage: python crud <ipaddress> <command> <param>\nwhere\n\t<ipaddress> = ODL server ip address' \
-                  '\n\t<command> = any of the following commands \n\t\t'
+    usageString = (
+        "usage: python crud <ipaddress> <command> <param>\nwhere\n\t<ipaddress> = ODL server ip address"
+        "\n\t<command> = any of the following commands \n\t\t"
+    )
 
     usageString = usageString + command + param
 
@@ -304,14 +365,23 @@ def options():
 # entry point for command executions
 #
 
+
 def main():
     if len(sys.argv) < 4:
         options()
         quit(0)
     SettingsLibrary.hostname = sys.argv[1]
-    SettingsLibrary.port = '8181'
-    call = dict(ac=addCar, ap=addPerson, bc=buyCar,
-                gc=getCars, gp=getPersons, gcp=getCarPersonMappings, dc=deleteAllCars, dp=deleteAllPersons)
+    SettingsLibrary.port = "8181"
+    call = dict(
+        ac=addCar,
+        ap=addPerson,
+        bc=buyCar,
+        gc=getCars,
+        gp=getPersons,
+        gcp=getCarPersonMappings,
+        dc=deleteAllCars,
+        dp=deleteAllPersons,
+    )
 
     # FOR RPC TO WORK PROPERLY THE FIRST PERSON SHOULD BE ADDED VIA RESTCONF
     addPerson(SettingsLibrary.hostname, SettingsLibrary.port, 0)
index bc3d1e3e22baacebe3635222084d0603df32b60c..808c68f624421269770a512861f81b0735a5d9b9 100755 (executable)
@@ -34,33 +34,35 @@ def docker_create(docker_image_name, passed_args_dict=None):
     logger.info(passed_args_dict)
     docker_client = docker_get_client()
 
-    default_args_dict = dict(command=None,
-                             hostname=None,
-                             user=None,
-                             detach=False,
-                             stdin_open=False,
-                             tty=False,
-                             mem_limit=0,
-                             ports=None,
-                             environment=None,
-                             dns=None,
-                             volumes=None,
-                             volumes_from=None,
-                             network_disabled=False,
-                             name=None,
-                             entrypoint=None,
-                             cpu_shares=None,
-                             working_dir=None,
-                             domainname=None,
-                             memswap_limit=0,
-                             cpuset=None,
-                             host_config=None
-                             )
-    args_dict = docker_process_args(passed_args_dict, default_args_dict, "docker_create")
+    default_args_dict = dict(
+        command=None,
+        hostname=None,
+        user=None,
+        detach=False,
+        stdin_open=False,
+        tty=False,
+        mem_limit=0,
+        ports=None,
+        environment=None,
+        dns=None,
+        volumes=None,
+        volumes_from=None,
+        network_disabled=False,
+        name=None,
+        entrypoint=None,
+        cpu_shares=None,
+        working_dir=None,
+        domainname=None,
+        memswap_limit=0,
+        cpuset=None,
+        host_config=None,
+    )
+    args_dict = docker_process_args(
+        passed_args_dict, default_args_dict, "docker_create"
+    )
 
     docker_client.images(name=docker_image_name)
-    docker_uid_dict = docker_client\
-        .create_container(docker_image_name, **args_dict)
+    docker_uid_dict = docker_client.create_container(docker_image_name, **args_dict)
     docker_info = docker_client.inspect_container(docker_uid_dict.get("Id"))
     return docker_info
 
@@ -85,28 +87,30 @@ def docker_start(docker_name, passed_args_dict=None):
     logger.info(passed_args_dict)
     docker_client = docker_get_client()
 
-    default_args_dict = dict(binds=None,
-                             port_bindings=None,
-                             lxc_conf=None,
-                             publish_all_ports=False,
-                             links=None,
-                             privileged=False,
-                             dns=None,
-                             dns_search=None,
-                             volumes_from=None,
-                             network_mode=None,
-                             restart_policy=None,
-                             cap_add=None,
-                             cap_drop=None,
-                             devices=None,
-                             extra_hosts=None
-                             )
+    default_args_dict = dict(
+        binds=None,
+        port_bindings=None,
+        lxc_conf=None,
+        publish_all_ports=False,
+        links=None,
+        privileged=False,
+        dns=None,
+        dns_search=None,
+        volumes_from=None,
+        network_mode=None,
+        restart_policy=None,
+        cap_add=None,
+        cap_drop=None,
+        devices=None,
+        extra_hosts=None,
+    )
     args_dict = docker_process_args(passed_args_dict, default_args_dict, "docker_start")
 
     docker_client.start(docker_name, **args_dict)
 
-    if "True" in str(docker_client.inspect_container(docker_name)
-                     .get("State").get("Running")):
+    if "True" in str(
+        docker_client.inspect_container(docker_name).get("State").get("Running")
+    ):
         logger.info("Started docker %s successfully" % docker_name)
         return True
     else:
@@ -131,11 +135,10 @@ def docker_remove(docker_name, passed_args_dict=None):
     logger.info(passed_args_dict)
     docker_client = docker_get_client()
 
-    default_args_dict = dict(v=False,
-                             link=False,
-                             force=False
-                             )
-    args_dict = docker_process_args(passed_args_dict, default_args_dict, "docker_remove")
+    default_args_dict = dict(v=False, link=False, force=False)
+    args_dict = docker_process_args(
+        passed_args_dict, default_args_dict, "docker_remove"
+    )
 
     docker_client.remove_container(docker_name, **args_dict)
     docker_containers = docker_client.containers(all=True)
@@ -164,8 +167,9 @@ def docker_stop(docker_name, timeout=10):
 
     docker_client.stop(docker_name, timeout)
 
-    if "False" in str(docker_client.inspect_container(docker_name)
-                      .get("State").get("Running")):
+    if "False" in str(
+        docker_client.inspect_container(docker_name).get("State").get("Running")
+    ):
         logger.info("Stopped docker %s successfully" % docker_name)
         return True
     else:
@@ -189,13 +193,12 @@ def docker_return_logs(docker_name, passed_args_dict=None):
     logger.info(passed_args_dict)
     docker_client = docker_get_client()
 
-    default_args_dict = dict(stdout=True,
-                             stderr=True,
-                             stream=False,
-                             timestamps=False,
-                             tail='all'
-                             )
-    args_dict = docker_process_args(passed_args_dict, default_args_dict, "docker_return_logs")
+    default_args_dict = dict(
+        stdout=True, stderr=True, stream=False, timestamps=False, tail="all"
+    )
+    args_dict = docker_process_args(
+        passed_args_dict, default_args_dict, "docker_return_logs"
+    )
 
     return docker_client.logs(docker_name, **args_dict)
 
@@ -223,13 +226,12 @@ def docker_execute(docker_name, cmd, passed_args_dict=None):
     logger.info(passed_args_dict)
     docker_client = docker_get_client()
 
-    default_args_dict = dict(detach=False,
-                             stdout=True,
-                             stderr=True,
-                             stream=False,
-                             tty=False
-                             )
-    args_dict = docker_process_args(passed_args_dict, default_args_dict, "docker_execute")
+    default_args_dict = dict(
+        detach=False, stdout=True, stderr=True, stream=False, tty=False
+    )
+    args_dict = docker_process_args(
+        passed_args_dict, default_args_dict, "docker_execute"
+    )
 
     return docker_client.execute(docker_name, cmd, **args_dict)
 
@@ -246,9 +248,11 @@ def docker_get_ip4(docker_name):
     """
     logger.info("Getting IP of docker %s" % docker_name)
     docker_client = docker_get_client()
-    return str(docker_client.inspect_container(docker_name)
-               .get("NetworkSettings")
-               .get("IPAddress"))
+    return str(
+        docker_client.inspect_container(docker_name)
+        .get("NetworkSettings")
+        .get("IPAddress")
+    )
 
 
 def docker_ping(docker_name, ip, count=3):
@@ -282,23 +286,24 @@ def docker_list_containers(passed_args_dict=None):
     logger.info("Listing docker containers")
     logger.info(passed_args_dict)
 
-    default_args_dict = dict(quiet=True,
-                             all=True,
-                             trunc=True,
-                             latest=False,
-                             since=None,
-                             before=None,
-                             limit=-1,
-                             size=False,
-                             filters=None
-                             )
-    args_dict = docker_process_args(passed_args_dict, default_args_dict,
-                                    "docker_list_containers")
-
-    return docker.Client(
-        base_url='unix://var/run/docker.sock',
-        timeout=10)\
-        .containers(**args_dict)
+    default_args_dict = dict(
+        quiet=True,
+        all=True,
+        trunc=True,
+        latest=False,
+        since=None,
+        before=None,
+        limit=-1,
+        size=False,
+        filters=None,
+    )
+    args_dict = docker_process_args(
+        passed_args_dict, default_args_dict, "docker_list_containers"
+    )
+
+    return docker.Client(base_url="unix://var/run/docker.sock", timeout=10).containers(
+        **args_dict
+    )
 
 
 def docker_create_host_config(passed_args_dict):
@@ -316,24 +321,26 @@ def docker_create_host_config(passed_args_dict):
     """
     logger.info("Creating host config.")
 
-    default_args_dict = dict(binds=None,
-                             port_bindings=None,
-                             lxc_conf=None,
-                             publish_all_ports=False,
-                             links=None,
-                             privileged=False,
-                             dns=None,
-                             dns_search=None,
-                             volumes_from=None,
-                             network_mode=None,
-                             restart_policy=None,
-                             cap_add=None,
-                             cap_drop=None,
-                             devices=None,
-                             extra_hosts=None
-                             )
-    args_dict = docker_process_args(passed_args_dict, default_args_dict,
-                                    "docker_create_host_config")
+    default_args_dict = dict(
+        binds=None,
+        port_bindings=None,
+        lxc_conf=None,
+        publish_all_ports=False,
+        links=None,
+        privileged=False,
+        dns=None,
+        dns_search=None,
+        volumes_from=None,
+        network_mode=None,
+        restart_policy=None,
+        cap_add=None,
+        cap_drop=None,
+        devices=None,
+        extra_hosts=None,
+    )
+    args_dict = docker_process_args(
+        passed_args_dict, default_args_dict, "docker_create_host_config"
+    )
 
     return docker.utils.create_host_config(**args_dict)
 
@@ -380,9 +387,10 @@ def docker_get_client(*passed_args_dict):
     Returns:
         :returns obj: returns docker-py client object.
     """
-    default_args_dict = dict(base_url="unix://var/run/docker.sock",
-                             version=None,
-                             timeout=10,
-                             tls=False)
-    args_dict = docker_process_args(passed_args_dict, default_args_dict, "docker_get_client")
+    default_args_dict = dict(
+        base_url="unix://var/run/docker.sock", version=None, timeout=10, tls=False
+    )
+    args_dict = docker_process_args(
+        passed_args_dict, default_args_dict, "docker_get_client"
+    )
     return docker.Client(**args_dict)
index 8be374e4ff92466e1f88eb336ab62e46ac1cce10..639fe0c6abac7ddf26108ebeb12bfe41513c2c7a 100644 (file)
@@ -3,6 +3,7 @@ New CLI for mininet which should dynamically add and delete switches from networ
 """
 
 import cmd
+
 # import json
 # import logging
 
@@ -35,7 +36,7 @@ class DynamicMininet(cmd.Cmd):
     Note: Do not mix scanarios
     """
 
-    prompt = 'mininet> '
+    prompt = "mininet> "
 
     def __init__(self):
         cmd.Cmd.__init__(self)
@@ -53,25 +54,31 @@ class DynamicMininet(cmd.Cmd):
             :param num: initial number of switches in the topology
         """
         if self._running:
-            print('Mininet topology is already active')
+            print("Mininet topology is already active")
             return
         cntl, numsw = line.split()
         self._topo = mininet.topo.Topo()
         for _ in range(int(numsw)):
             self._lid += 1
-            self._topo.addSwitch('s{0}'.format(self._lid))
-        controller = mininet.util.customConstructor({'remote': RemoteController}, 'remote,ip={0}'.format(cntl))
-        switch = mininet.util.customConstructor({'ovsk': OVSKernelSwitch}, 'ovsk,protocols=OpenFlow13')
-        self._net = mininet.net.Mininet(topo=self._topo, switch=switch, controller=controller)
+            self._topo.addSwitch("s{0}".format(self._lid))
+        controller = mininet.util.customConstructor(
+            {"remote": RemoteController}, "remote,ip={0}".format(cntl)
+        )
+        switch = mininet.util.customConstructor(
+            {"ovsk": OVSKernelSwitch}, "ovsk,protocols=OpenFlow13"
+        )
+        self._net = mininet.net.Mininet(
+            topo=self._topo, switch=switch, controller=controller
+        )
         self._net.start()
         self._running = True
 
     def help_start(self):
         """Provide help message for start command"""
-        print('Starts mininet')
-        print('Usage: start <controller_ip> <num>')
-        print('\tcontroller_ip - controllers ip or host name')
-        print('\tnum           - number of switches at start')
+        print("Starts mininet")
+        print("Usage: start <controller_ip> <num>")
+        print("\tcontroller_ip - controllers ip or host name")
+        print("\tnum           - number of switches at start")
 
     def do_start_with_cluster(self, line):
         """Starts mininet network with initial number of switches
@@ -81,17 +88,21 @@ class DynamicMininet(cmd.Cmd):
                                    e.g.  1.1.1.1,2.2.2.2,3.3.3.3 (no spaces)
         """
         if self._running:
-            print('Mininet topology is already active')
+            print("Mininet topology is already active")
             return
-        cntls = line.split(',')
+        cntls = line.split(",")
 
         self._topo = mininet.topo.SingleSwitchTopo()
-        switch = mininet.util.customConstructor({'ovsk': OVSKernelSwitch}, 'ovsk,protocols=OpenFlow13')
+        switch = mininet.util.customConstructor(
+            {"ovsk": OVSKernelSwitch}, "ovsk,protocols=OpenFlow13"
+        )
         self._net = mininet.net.Mininet(switch=switch)
 
         controllers = []
         for i, cntl_ip in enumerate(cntls):
-            cnt = self._net.addController('c{0}'.format(i), controller=RemoteController, ip=cntl_ip, port=6633)
+            cnt = self._net.addController(
+                "c{0}".format(i), controller=RemoteController, ip=cntl_ip, port=6633
+            )
             controllers.append(cnt)
             print("contrller {0} created".format(cnt))
 
@@ -101,9 +112,9 @@ class DynamicMininet(cmd.Cmd):
 
     def help_start_with_cluster(self):
         """Provide help message for start_with_cluster command"""
-        print('Starts mininet with one switch')
-        print('Usage: start <controller_ips>')
-        print('\tcontroller_ips - comma separated list of controllers ip or host names')
+        print("Starts mininet with one switch")
+        print("Usage: start <controller_ips>")
+        print("\tcontroller_ips - comma separated list of controllers ip or host names")
 
     def do_start_switches_with_cluster(self, line):
         """Starts mininet network with initial number of switches
@@ -114,18 +125,22 @@ class DynamicMininet(cmd.Cmd):
                                    e.g.  1.1.1.1,2.2.2.2,3.3.3.3 (no spaces)
         """
         if self._running:
-            print('Mininet topology is already active')
+            print("Mininet topology is already active")
             return
         num, contls = line.split()
-        cntls = contls.split(',')
+        cntls = contls.split(",")
 
         self._topo = mininet.topo.LinearTopo(int(num))
-        switch = mininet.util.customConstructor({'ovsk': OVSKernelSwitch}, 'ovsk,protocols=OpenFlow13')
+        switch = mininet.util.customConstructor(
+            {"ovsk": OVSKernelSwitch}, "ovsk,protocols=OpenFlow13"
+        )
         self._net = mininet.net.Mininet(switch=switch)
 
         controllers = []
         for i, cntl_ip in enumerate(cntls):
-            cnt = self._net.addController('c{0}'.format(i), controller=RemoteController, ip=cntl_ip, port=6633)
+            cnt = self._net.addController(
+                "c{0}".format(i), controller=RemoteController, ip=cntl_ip, port=6633
+            )
             controllers.append(cnt)
             print("contrller {0} created".format(cnt))
 
@@ -135,10 +150,10 @@ class DynamicMininet(cmd.Cmd):
 
     def help_start_switches_with_cluster(self):
         """Provide help message for start_with_cluster command"""
-        print('Starts mininet with one switch')
-        print('Usage: start <swnr> <controller_ips>')
-        print('\tswnt - number of switches in topology')
-        print('\tcontroller_ips - comma separated list of controllers ip or host names')
+        print("Starts mininet with one switch")
+        print("Usage: start <swnr> <controller_ips>")
+        print("\tswnt - number of switches in topology")
+        print("\tcontroller_ips - comma separated list of controllers ip or host names")
 
     def do_add_switch(self, line):
         """Adds one switch to the network
@@ -148,17 +163,17 @@ class DynamicMininet(cmd.Cmd):
         if not self._running:
             raise RuntimeError('Network not running, use command "start" first')
         self._lid += 1
-        sname = 's{0}'.format(self._lid)
+        sname = "s{0}".format(self._lid)
         self._topo.addSwitch(sname)
         self._net.addSwitch(sname, **self._topo.nodeInfo(sname))
         s = self._net.get(sname)
-        c = self._net.get('c0')
+        c = self._net.get("c0")
         s.start([c])
 
     def help_add_switch(self):
         """Provide help message for add_switch command"""
-        print('Adds one sinle switch to the running topology')
-        print('Usage: add_switch')
+        print("Adds one sinle switch to the running topology")
+        print("Usage: add_switch")
 
     def do_add_switches(self, line):
         """Adds switches to the network
@@ -170,9 +185,9 @@ class DynamicMininet(cmd.Cmd):
 
     def help_add_switches(self):
         """Provide help message for add_switch command"""
-        print('Adds one sinle switch to the running topology')
-        print('Usage: add_switches <num>')
-        print('\tnum - number of switches tp be added')
+        print("Adds one sinle switch to the running topology")
+        print("Usage: add_switches <num>")
+        print("\tnum - number of switches tp be added")
 
     def do_exit(self, line):
         """Stops mininet"""
@@ -183,8 +198,8 @@ class DynamicMininet(cmd.Cmd):
 
     def help_exit(self):
         """Provide help message for exit command"""
-        print('Exit mininet cli')
-        print('Usage: exit')
+        print("Exit mininet cli")
+        print("Usage: exit")
 
     def do_sh(self, line):
         """Run an external shell command
@@ -195,14 +210,14 @@ class DynamicMininet(cmd.Cmd):
 
     def help_sh(self, line):
         """Provide help message for sh command"""
-        print('Executes given commandAdds one sinle switch to the running topology')
-        print('Usage: sh <line>')
-        print('\tline - command to be executed(e.g. ps -e')
+        print("Executes given commandAdds one sinle switch to the running topology")
+        print("Usage: sh <line>")
+        print("\tline - command to be executed(e.g. ps -e")
 
     def emptyline(self):
         pass
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     dynamic_mininet_cli = DynamicMininet()
     dynamic_mininet_cli.cmdloop()
index cd884d22971070af9bdd796ea8910646e886704a..43edf8455aaa3fb48a68b2a566f07ad417068554 100644 (file)
@@ -3,27 +3,31 @@ Library for dynamic flow construction.
 Authors: james.luhrsen@hp.com
 Updated: 2014-08-29
 """
-'''
+"""
 xmltodict and json libs not needed at this point, but may be useful in
 the future.
-'''
+"""
 
 # bare bones xml for building a flow xml for flow:inventory
-flow_xml_skeleton = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>' +      \
-                    '<flow xmlns="urn:opendaylight:flow:inventory">' +      \
-                    '<instructions></instructions>' +      \
-                    '<match></match>' +      \
-                    '</flow>'
+flow_xml_skeleton = (
+    '<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
+    + '<flow xmlns="urn:opendaylight:flow:inventory">'
+    + "<instructions></instructions>"
+    + "<match></match>"
+    + "</flow>"
+)
 
-input_xml_skeleton = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>' +      \
-                     '<input xmlns="urn:opendaylight:flow:service">' +      \
-                     '</input>'
+input_xml_skeleton = (
+    '<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
+    + '<input xmlns="urn:opendaylight:flow:service">'
+    + "</input>"
+)
 
 
 class Flow:
-    '''
+    """
     Flow class for creating and interacting with OpenFlow flows
-    '''
+    """
 
     strict = "false"
     instruction_xmls = ""
@@ -43,41 +47,41 @@ class Flow:
     json = ""
 
     def set_field(self, field, value):
-        '''
+        """
            allows for generically setting any attribute in this
            class based on the 'field' passed in.  In the future,
            adding a new attribute only requires that single line
            addition.  no need for additional setter.
-        '''
+        """
         setattr(self, field, value)
 
 
 def Make_Inventory_Flow():
-    '''
+    """
         Robot Keyword to create and return an instance of the Flow
         class.
-    '''
+    """
     flow = Flow()
     flow.xml = flow_xml_skeleton
     return flow
 
 
 def Make_Service_Flow():
-    '''
+    """
         Robot Keyword to create an input XML that can be used to
         directly send to flow:service for things like accessing
         the remove-flow RPC via restconf
-    '''
+    """
     flow = Flow()
     flow.xml = input_xml_skeleton
     return flow
 
 
 def Set_Flow_Field(flow, field, value):
-    '''
+    """
         Robot Keyword to allow the modification (setting) of the
         flow object attributes
-    '''
+    """
     flow.set_field(field, value)
     return flow
 
index 4dbc7ef94d2d7f7bdc84a8960037114273b0bbfa..7721a1285a08b72dda72851c877f4f15c7458d5f 100644 (file)
@@ -12,7 +12,7 @@ def check_iso8601_datetime_younger_then_limit(status_timestamp_raw, limit):
     :param limit: datetime value - status must be younger than this
     """
 
-    logger.debug('limit:{0}'.format(limit))
+    logger.debug("limit:{0}".format(limit))
 
     timestamp_raw = status_timestamp_raw
     # 2016-11-23T13:25:00.733+01:00
@@ -21,9 +21,11 @@ def check_iso8601_datetime_younger_then_limit(status_timestamp_raw, limit):
     limit_tstamp = limit_tstamp.replace(tzinfo=status_tstamp.tzinfo)
 
     if status_tstamp <= limit_tstamp:
-        logger.info('status stamp --> {0}'.format(status_tstamp))
-        logger.info('limit        --> {0}'.format(limit_tstamp))
-        raise ExecutionFailed('received status is not up-to-date: {0}'.format(status_tstamp))
+        logger.info("status stamp --> {0}".format(status_tstamp))
+        logger.info("limit        --> {0}".format(limit_tstamp))
+        raise ExecutionFailed(
+            "received status is not up-to-date: {0}".format(status_tstamp)
+        )
 
 
 def replace_ise_source_address(ise_source_json, new_target):
@@ -33,7 +35,9 @@ def replace_ise_source_address(ise_source_json, new_target):
     :param ise_source_json: ise source configuration as json
     :param new_target: current ise server url
     """
-    ise_source_json['ise-source-config']['connection-config']['ise-rest-url'] = new_target
+    ise_source_json["ise-source-config"]["connection-config"][
+        "ise-rest-url"
+    ] = new_target
 
 
 def remove_endpoint_timestamp(endpoint_json):
@@ -43,10 +47,12 @@ def remove_endpoint_timestamp(endpoint_json):
     :return: plain text without timestamp
     """
     try:
-        for address_endpoint in endpoint_json['endpoints']['address-endpoints']['address-endpoint']:
-            del address_endpoint['timestamp']
+        for address_endpoint in endpoint_json["endpoints"]["address-endpoints"][
+            "address-endpoint"
+        ]:
+            del address_endpoint["timestamp"]
     except KeyError:
-        msg = 'No endpoint present - can not wipe timestamp'
+        msg = "No endpoint present - can not wipe timestamp"
         logger.debug(msg)
         raise ExecutionFailed(msg)
 
@@ -61,10 +67,10 @@ def resolve_sxp_node_is_enabled(sxp_node_json):
     """
     enabled = None
     try:
-        for node in sxp_node_json['node']:
-            enabled = node['sxp-node:enabled']
+        for node in sxp_node_json["node"]:
+            enabled = node["sxp-node:enabled"]
     except KeyError:
-        msg = 'No sxp node content present - can not read value of enabled'
+        msg = "No sxp node content present - can not read value of enabled"
         logger.debug(msg)
         raise ExecutionFailed(msg)
 
@@ -80,11 +86,11 @@ def replace_netconf_node_host(netconf_node_json, node_name, host_value):
     :return: plain text with replaced host value
     """
     try:
-        for node in netconf_node_json['node']:
-            node['netconf-node-topology:host'] = host_value
-            node['node-id'] = node_name
+        for node in netconf_node_json["node"]:
+            node["netconf-node-topology:host"] = host_value
+            node["node-id"] = node_name
     except KeyError:
-        msg = 'No host found in given netconf node config'
+        msg = "No host found in given netconf node config"
         logger.debug(msg)
         raise ExecutionFailed(msg)
 
@@ -99,13 +105,15 @@ def replace_ip_mgmt_address_in_forwarder(sf_forwarders_json, ip_mgmt_map):
     :return: plain sfc forwarders with replaced ip-mgmt-addresses
     """
     try:
-        for sff in sf_forwarders_json['service-function-forwarders']['service-function-forwarder']:
-            sff_name = sff['name']
+        for sff in sf_forwarders_json["service-function-forwarders"][
+            "service-function-forwarder"
+        ]:
+            sff_name = sff["name"]
             if sff_name in ip_mgmt_map:
-                sff['ip-mgmt-address'] = ip_mgmt_map[sff_name]
+                sff["ip-mgmt-address"] = ip_mgmt_map[sff_name]
 
     except KeyError:
-        msg = 'Expected sff not found in given config'
+        msg = "Expected sff not found in given config"
         logger.debug(msg)
         raise ExecutionFailed(msg)
 
@@ -120,9 +128,9 @@ def replace_renderer_policy_version(renderer_policy_json, next_version):
     :return: plain renderer policy with replaced version
     """
     try:
-        renderer_policy_json['renderer-policy']['version'] = next_version
+        renderer_policy_json["renderer-policy"]["version"] = next_version
     except KeyError:
-        msg = 'Expected version element not found in given renderer-policy'
+        msg = "Expected version element not found in given renderer-policy"
         logger.debug(msg)
         raise ExecutionFailed(msg)
 
index a513360c288b622f29e5436a4b1afe541d521969..26a0988ee53f5a85ef55ebdf086e857af8ca8881 100644 (file)
@@ -6,7 +6,7 @@ op_provision = ":8181/restconf/operations/onem2m:onem2m-cse-provisioning"
 op_tree = ":8181/restconf/operational/onem2m:onem2m-resource-tree"
 op_cleanup = ":8181/restconf/operations/onem2m:onem2m-cleanup-store"
 
-cse_payload = '''
+cse_payload = """
 {    "input": {
         "onem2m-primitive": [
            {
@@ -20,55 +20,70 @@ cse_payload = '''
         ]
     }
 }
-'''
+"""
 
-resourcepayload = '''
+resourcepayload = """
 {
     %s
 }
-'''
+"""
 
-ae_payload = '''
+ae_payload = """
 {
     "m2m:ae":{%s}
 }
-'''
+"""
 
-con_payload = '''
+con_payload = """
 {
     "m2m:cnt":{%s}
 }
-'''
+"""
 
-cin_payload = '''
+cin_payload = """
 {
    "m2m:cin":{%s}
 }
-'''
+"""
 
-sub_payload = '''
+sub_payload = """
 {
     "m2m:sub":{%s}
 }
-'''
+"""
 
-acp_payload = '''
+acp_payload = """
 {
     "m2m:acp":{%s}
 }
-'''
+"""
 
-nod_payload = '''
+nod_payload = """
 {
     "m2m:nod":{%s}
 }
-'''
-
-resources = {"m2m:ae", "m2m:cnt", "m2m:cin", "m2m:sub",
-             "m2m:acp", "m2m:nod", "m2m:grp", "m2m:cb", "ch"}
+"""
+
+resources = {
+    "m2m:ae",
+    "m2m:cnt",
+    "m2m:cin",
+    "m2m:sub",
+    "m2m:acp",
+    "m2m:nod",
+    "m2m:grp",
+    "m2m:cb",
+    "ch",
+}
 
-payload_map = {1: acp_payload, 2: ae_payload, 3: con_payload,
-               4: cin_payload, 14: nod_payload, 23: sub_payload}
+payload_map = {
+    1: acp_payload,
+    2: ae_payload,
+    3: con_payload,
+    4: cin_payload,
+    14: nod_payload,
+    23: sub_payload,
+}
 
 
 def find_key(response, key, first=None):
@@ -154,7 +169,7 @@ def status(response):
     """Return the protocol status code in the response."""
     try:
         return response.status_code
-    except(TypeError, AttributeError):
+    except (TypeError, AttributeError):
         return None
 
 
@@ -162,15 +177,15 @@ def headers(response):
     """Return the protocol headers in the response."""
     try:
         return response.headers
-    except(TypeError, AttributeError):
+    except (TypeError, AttributeError):
         return None
 
 
 def error(response):
     """Return the error string in the response."""
     try:
-        return response.json()['error']
-    except(TypeError, AttributeError):
+        return response.json()["error"]
+    except (TypeError, AttributeError):
         return None
 
 
@@ -186,31 +201,37 @@ class connect:
 
     """Create the connection."""
 
-    def __init__(self, server="localhost", base='InCSE1',
-                 auth=('admin', 'admin'), protocol="http"):
+    def __init__(
+        self,
+        server="localhost",
+        base="InCSE1",
+        auth=("admin", "admin"),
+        protocol="http",
+    ):
         """Connect to a IoTDM server."""
         self.session = requests.Session()
         self.session.auth = auth
-        self.session.headers.update({'content-type': 'application/json'})
+        self.session.headers.update({"content-type": "application/json"})
         self.timeout = 5
         self.payload = cse_payload % (base)
         self.headers = {
             # Admittedly these are "magic values" but are required
             # and until a proper defaulting initializer is in place
             # are hard-coded.
-            'content-type': 'application/vnd.onem2m-res+json',
-            'X-M2M-Origin': 'iotdm-robot-tests',
-            'X-M2M-RI': '12345',
-            'X-M2M-OT': 'NOW'
+            "content-type": "application/vnd.onem2m-res+json",
+            "X-M2M-Origin": "iotdm-robot-tests",
+            "X-M2M-RI": "12345",
+            "X-M2M-OT": "NOW",
         }
         self.server = "%s://" % (protocol) + server
         self.url = self.server + op_provision
         self.response = self.session.post(
-            self.url, data=self.payload, timeout=self.timeout)
+            self.url, data=self.payload, timeout=self.timeout
+        )
 
     def modify_headers_origin(self, new_origin):
         """Modify the headers to test ACP."""
-        self.headers['X-M2M-Origin'] = new_origin
+        self.headers["X-M2M-Origin"] = new_origin
 
     def create(self, parent, restype, attr=None):
         """Create certain resource with attributes under parent URI.
@@ -225,16 +246,18 @@ class connect:
         restype = int(restype)
         payload = payload_map[restype]
         payload = payload % (attr)
-        self.headers['content-type'] = 'application/\
-            vnd.onem2m-res+json;ty=%s' % (restype)
+        self.headers["content-type"] = (
+            "application/\
+            vnd.onem2m-res+json;ty=%s"
+            % (restype)
+        )
         parent = normalize(parent)
-        self.url = self.server + ":8282/%s?&rcn=1" % (
-            parent)
+        self.url = self.server + ":8282/%s?&rcn=1" % (parent)
         self.response = self.session.post(
-            self.url, payload, timeout=self.timeout, headers=self.headers)
+            self.url, payload, timeout=self.timeout, headers=self.headers
+        )
 
-    def create_with_command(self, parent, restype,
-                            command, attr=None):
+    def create_with_command(self, parent, restype, command, attr=None):
         """Create certain resource with attributes under parent URI.
 
         Args:
@@ -248,13 +271,16 @@ class connect:
         restype = int(restype)
         payload = payload_map[restype]
         payload = payload % (attr)
-        self.headers['content-type'] = 'application/\
-            vnd.onem2m-res+json;ty=%s' % (restype)
+        self.headers["content-type"] = (
+            "application/\
+            vnd.onem2m-res+json;ty=%s"
+            % (restype)
+        )
         parent = normalize(parent)
-        self.url = self.server + ":8282/%s?%s" % (
-            parent, command)
+        self.url = self.server + ":8282/%s?%s" % (parent, command)
         self.response = self.session.post(
-            self.url, payload, timeout=self.timeout, headers=self.headers)
+            self.url, payload, timeout=self.timeout, headers=self.headers
+        )
 
     def retrieve(self, resource_uri):
         """Retrieve resource using resource_uri."""
@@ -262,8 +288,8 @@ class connect:
             return None
         resource_uri = normalize(resource_uri)
         self.url = self.server + ":8282/%s?rcn=5" % (resource_uri)
-        self.headers['X-M2M-NM'] = None
-        self.headers['content-type'] = 'application/vnd.onem2m-res+json'
+        self.headers["X-M2M-NM"] = None
+        self.headers["content-type"] = "application/vnd.onem2m-res+json"
         self.response = self.session.get(
             self.url, timeout=self.timeout, headers=self.headers
         )
@@ -276,8 +302,8 @@ class connect:
             return None
         resource_uri = normalize(resource_uri)
         self.url = self.server + ":8282/%s?%s" % (resource_uri, command)
-        self.headers['X-M2M-NM'] = None
-        self.headers['content-type'] = 'application/vnd.onem2m-res+json'
+        self.headers["X-M2M-NM"] = None
+        self.headers["content-type"] = "application/vnd.onem2m-res+json"
         self.response = self.session.get(
             self.url, timeout=self.timeout, headers=self.headers
         )
@@ -290,13 +316,13 @@ class connect:
         restype = int(restype)
         payload = payload_map[restype]
         payload = payload % (attr)
-        self.headers['content-type'] = 'application/vnd.onem2m-res+json'
+        self.headers["content-type"] = "application/vnd.onem2m-res+json"
         self.url = self.server + ":8282/%s" % (resource_uri)
         self.response = self.session.put(
-            self.url, payload, timeout=self.timeout, headers=self.headers)
+            self.url, payload, timeout=self.timeout, headers=self.headers
+        )
 
-    def update_with_command(self, resource_uri, restype,
-                            command, attr=None):
+    def update_with_command(self, resource_uri, restype, command, attr=None):
         """Update resource at resource_uri with new attributes."""
         if resource_uri is None:
             return None
@@ -304,10 +330,11 @@ class connect:
         restype = int(restype)
         payload = payload_map[restype]
         payload = payload % (attr)
-        self.headers['content-type'] = 'application/vnd.onem2m-res+json'
+        self.headers["content-type"] = "application/vnd.onem2m-res+json"
         self.url = self.server + ":8282/%s?%s" % (resource_uri, command)
         self.response = self.session.put(
-            self.url, payload, timeout=self.timeout, headers=self.headers)
+            self.url, payload, timeout=self.timeout, headers=self.headers
+        )
 
     def delete(self, resource_uri):
         """Delete the resource at the resource_uri."""
@@ -315,10 +342,11 @@ class connect:
             return None
         resource_uri = normalize(resource_uri)
         self.url = self.server + ":8282/%s" % (resource_uri)
-        self.headers['X-M2M-NM'] = None
-        self.headers['content-type'] = 'application/vnd.onem2m-res+json'
-        self.response = self.session.delete(self.url, timeout=self.timeout,
-                                            headers=self.headers)
+        self.headers["X-M2M-NM"] = None
+        self.headers["content-type"] = "application/vnd.onem2m-res+json"
+        self.response = self.session.delete(
+            self.url, timeout=self.timeout, headers=self.headers
+        )
 
     def delete_with_command(self, resource_uri, command):
         """Delete the resource at the resource_uri."""
@@ -326,10 +354,11 @@ class connect:
             return None
         resource_uri = normalize(resource_uri)
         self.url = self.server + ":8282/%s?%s" % (resource_uri, command)
-        self.headers['X-M2M-NM'] = None
-        self.headers['content-type'] = 'application/vnd.onem2m-res+json'
-        self.response = self.session.delete(self.url, timeout=self.timeout,
-                                            headers=self.headers)
+        self.headers["X-M2M-NM"] = None
+        self.headers["content-type"] = "application/vnd.onem2m-res+json"
+        self.response = self.session.delete(
+            self.url, timeout=self.timeout, headers=self.headers
+        )
 
     def tree(self):
         """Get the resource tree."""
index 617c48280758af5fa333f88a39cb999788c16962..70ddfc698f8f000d5ec82111c88a572f795475a5 100644 (file)
@@ -49,6 +49,7 @@ class IoTDataEncoder(object):
 
 class IoTDataEncodeError(Exception):
     """IoTData encoding error"""
+
     pass
 
 
@@ -62,4 +63,5 @@ class IoTDataDecoder(object):
 
 class IoTDataDecodeError(Exception):
     """Protocol message decoding error"""
+
     pass
index 02f4b5bbf205bc3232b14ec91d776b3937f61c97..7712d7c70e5ebb12c2d9ab281f66995f871d8afc 100644 (file)
@@ -35,7 +35,15 @@ class IoTDMItCommunication(IotComm):
 
     __blocking_call_timeout = 3  # seconds
 
-    def __init__(self, tx, rx, entity_id, protocol, protocol_params, auto_handling_descriptions={}):
+    def __init__(
+        self,
+        tx,
+        rx,
+        entity_id,
+        protocol,
+        protocol_params,
+        auto_handling_descriptions={},
+    ):
         super(IoTDMItCommunication, self).__init__()
         self.tx = tx
         self.rx = rx
@@ -53,12 +61,21 @@ class IoTDMItCommunication(IotComm):
         Creates and returns response to provided notification request with
         provided result code
         """
-        builder = IoTDMJsonPrimitiveBuilder() \
-            .set_communication_protocol(self.get_protocol()) \
-            .set_param(OneM2M.short_request_identifier,
-                       notification_request_primitive.get_param(OneM2M.short_request_identifier)) \
-            .set_param(OneM2M.short_response_status_code, onem2m_result_code) \
-            .set_proto_param(onem2m_http.http_result_code, onem2m_http.onem2m_to_http_result_codes[onem2m_result_code])
+        builder = (
+            IoTDMJsonPrimitiveBuilder()
+            .set_communication_protocol(self.get_protocol())
+            .set_param(
+                OneM2M.short_request_identifier,
+                notification_request_primitive.get_param(
+                    OneM2M.short_request_identifier
+                ),
+            )
+            .set_param(OneM2M.short_response_status_code, onem2m_result_code)
+            .set_proto_param(
+                onem2m_http.http_result_code,
+                onem2m_http.onem2m_to_http_result_codes[onem2m_result_code],
+            )
+        )
         return builder.build()
 
     def add_auto_reply_description(self, auto_reply_description):
@@ -70,9 +87,13 @@ class IoTDMItCommunication(IotComm):
             raise RuntimeError("Invalid automatic handling description object passed")
 
         if auto_reply_description in self.auto_handling_descriptions:
-            raise RuntimeError("Attempt to insert the same auto handling description multiple times")
+            raise RuntimeError(
+                "Attempt to insert the same auto handling description multiple times"
+            )
 
-        self.auto_handling_descriptions[auto_reply_description] = AutoHandlingStatistics()
+        self.auto_handling_descriptions[
+            auto_reply_description
+        ] = AutoHandlingStatistics()
 
     def remove_auto_reply_description(self, auto_reply_description):
         """Removes description of automatic reply"""
@@ -82,7 +103,7 @@ class IoTDMItCommunication(IotComm):
         if auto_reply_description not in self.auto_handling_descriptions:
             raise RuntimeError("No such auto handling description")
 
-        del(self.auto_handling_descriptions[auto_reply_description])
+        del self.auto_handling_descriptions[auto_reply_description]
 
     def get_auto_handling_statistics(self, auto_criteria):
         """
@@ -113,7 +134,9 @@ class IoTDMItCommunication(IotComm):
         # Use auto handling if match criteria
         for auto_response_desc, statistics in self.auto_handling_descriptions.items():
             if auto_response_desc.match(request_primitive):
-                response = self.create_auto_response(request_primitive, auto_response_desc.get_result_code())
+                response = self.create_auto_response(
+                    request_primitive, auto_response_desc.get_result_code()
+                )
                 # this request was successfully handled automatically,
                 # increment statistics and return the resulting response
                 statistics.counter += 1
@@ -141,7 +164,7 @@ class IoTDMItCommunication(IotComm):
         """Returns default primitive parameters"""
         params = {
             OneM2M.short_from: self.entity_id,
-            OneM2M.short_request_identifier: str(self.get_next_request_id())
+            OneM2M.short_request_identifier: str(self.get_next_request_id()),
         }
         return params
 
@@ -165,8 +188,11 @@ class IoTDMItCommunication(IotComm):
             self.rx_request_queue = None
             self.rx_response_queue = None
             if req_size or rsp_size:
-                raise RuntimeError("No all requests: {} or responses: {} were processed".format(
-                                   req_size, rsp_size))
+                raise RuntimeError(
+                    "No all requests: {} or responses: {} were processed".format(
+                        req_size, rsp_size
+                    )
+                )
 
     def get_next_request_id(self):
         """Returns unique request ID"""
@@ -208,7 +234,9 @@ class IoTDMItCommunicationFactory(object):
     IoTDMItCommunication class
     """
 
-    def create_http_json_primitive_communication(self, entity_id, protocol, protocol_params, rx_port, rx_interface=""):
+    def create_http_json_primitive_communication(
+        self, entity_id, protocol, protocol_params, rx_port, rx_interface=""
+    ):
         """
         Instantiates encoder/decoder and rx/tx objects required by
         IoTDMItCommunication and returns new instance of the
@@ -227,11 +255,15 @@ class IoTDMItCommunicationFactory(object):
                 encoder_rx = OneM2MHttpJsonEncoderRx()
                 decoder_rx = OneM2MHttpJsonDecoderRx()
 
-                rx = OneM2MHttpRx(decoder_rx, encoder_rx, port=rx_port, interface=rx_interface)
+                rx = OneM2MHttpRx(
+                    decoder_rx, encoder_rx, port=rx_port, interface=rx_interface
+                )
 
             return IoTDMItCommunication(tx, rx, entity_id, protocol, protocol_params)
 
-        raise RuntimeError("Unsupported communication protocol specified: {}".format(protocol))
+        raise RuntimeError(
+            "Unsupported communication protocol specified: {}".format(protocol)
+        )
 
 
 class IoTDMJsonPrimitiveBuilder(OneM2MJsonPrimitiveBuilder):
@@ -240,24 +272,29 @@ class IoTDMJsonPrimitiveBuilder(OneM2MJsonPrimitiveBuilder):
     builder classes of all supported protocols
     """
 
-    IoTDMProtoPrimitiveClasses = {
-        "http": OneM2MHttpJsonPrimitive
-    }
+    IoTDMProtoPrimitiveClasses = {"http": OneM2MHttpJsonPrimitive}
 
     def build(self):
         if not self.protocol or self.protocol not in self.IoTDMProtoPrimitiveClasses:
             return super(IoTDMJsonPrimitiveBuilder, self).build()
 
         primitive_class = self.IoTDMProtoPrimitiveClasses[self.protocol]
-        return primitive_class(self.parameters, self.content, self.protocol,
-                               self.proto_params)
+        return primitive_class(
+            self.parameters, self.content, self.protocol, self.proto_params
+        )
 
 
 class RequestAutoHandlingDescription(object):
     """Class stores auto handling matching criteria for request primitives"""
 
-    def __init__(self, parameters_match_dict, content_match_dict, proto_param_match_dict,
-                 onem2m_result_code, matching_cb=None):
+    def __init__(
+        self,
+        parameters_match_dict,
+        content_match_dict,
+        proto_param_match_dict,
+        onem2m_result_code,
+        matching_cb=None,
+    ):
         self.onem2m_result_code = onem2m_result_code
 
         self.parameters_match_dict = parameters_match_dict
@@ -335,7 +372,9 @@ class RequestAutoHandlingDescriptionBuilder(object):
 
     def _add_critieria(self, json_pointer, value, match_dict):
         if str(json_pointer) in match_dict:
-            raise RuntimeError("JSON pointer: {} already added".format(str(json_pointer)))
+            raise RuntimeError(
+                "JSON pointer: {} already added".format(str(json_pointer))
+            )
         match_dict[json_pointer] = value
 
     def add_param_criteria(self, json_pointer, value):
@@ -358,7 +397,9 @@ class RequestAutoHandlingDescriptionBuilder(object):
         if None is self.onem2m_result_code:
             raise RuntimeError("Result code not set")
 
-        return RequestAutoHandlingDescription(self.parameter_match_dict,
-                                              self.content_match_dict,
-                                              self.proto_param_match_dict,
-                                              self.onem2m_result_code)
+        return RequestAutoHandlingDescription(
+            self.parameter_match_dict,
+            self.content_match_dict,
+            self.proto_param_match_dict,
+            self.onem2m_result_code,
+        )
index 9f3f94abe35a700a185ea6864ee393c02171ec61..91c712eaafc1a3dd0b72a7f9f53b2ad14631004a 100644 (file)
@@ -47,7 +47,7 @@ http_result_code = "Result-Code"
 http_specific_headers = [
     http_header_content_type.lower(),
     http_header_content_location.lower(),
-    http_header_content_length.lower()
+    http_header_content_length.lower(),
 ]
 
 http_header_origin = "X-M2M-Origin"
@@ -63,15 +63,17 @@ http_header_rsc = "X-M2M-RSC"
 http_header_ati = "X-M2M-ATI"
 
 # TODO add missing element mappings
-http_headers = OneM2MEncodeDecodeData("HTTPHeaders")\
-    .add(http_header_content_type, http_header_content_type)\
-    .add(http_header_content_location, http_header_content_location)\
-    .add(http_header_content_length, http_header_content_length)\
-    .add(OneM2M.short_from, http_header_origin)\
-    .add(OneM2M.short_request_identifier, http_header_ri)\
-    .add(OneM2M.short_group_request_identifier, http_header_gid)\
-    .add(OneM2M.short_originating_timestamp, http_header_ot)\
+http_headers = (
+    OneM2MEncodeDecodeData("HTTPHeaders")
+    .add(http_header_content_type, http_header_content_type)
+    .add(http_header_content_location, http_header_content_location)
+    .add(http_header_content_length, http_header_content_length)
+    .add(OneM2M.short_from, http_header_origin)
+    .add(OneM2M.short_request_identifier, http_header_ri)
+    .add(OneM2M.short_group_request_identifier, http_header_gid)
+    .add(OneM2M.short_originating_timestamp, http_header_ot)
     .add(OneM2M.short_response_status_code, http_header_rsc)
+)
 
 http_query_params = [
     OneM2M.short_resource_type,
@@ -88,12 +90,10 @@ http_query_params = [
 
 onem2m_to_http_result_codes = {
     OneM2M.result_code_accepted: httplib.ACCEPTED,
-
     OneM2M.result_code_ok: httplib.OK,
     OneM2M.result_code_created: httplib.CREATED,
     OneM2M.result_code_deleted: httplib.OK,
     OneM2M.result_code_updated: httplib.OK,
-
     OneM2M.result_code_bad_request: httplib.BAD_REQUEST,
     OneM2M.result_code_not_found: httplib.NOT_FOUND,
     OneM2M.result_code_operation_not_allowed: httplib.METHOD_NOT_ALLOWED,
@@ -113,7 +113,6 @@ onem2m_to_http_result_codes = {
     OneM2M.result_code_esprim_unknown_orig_rand_id: httplib.FORBIDDEN,
     OneM2M.result_code_esprim_unknown_recv_rand_id: httplib.FORBIDDEN,
     OneM2M.result_code_esprim_bad_mac: httplib.FORBIDDEN,
-
     OneM2M.result_code_internal_server_error: httplib.INTERNAL_SERVER_ERROR,
     OneM2M.result_code_not_implemened: httplib.NOT_IMPLEMENTED,
     OneM2M.result_code_target_not_reachable: httplib.NOT_FOUND,
@@ -129,7 +128,6 @@ onem2m_to_http_result_codes = {
     OneM2M.result_code_esprim_decryption_error: httplib.INTERNAL_SERVER_ERROR,
     OneM2M.result_code_esprim_encryption_error: httplib.INTERNAL_SERVER_ERROR,
     OneM2M.result_code_sparql_update_error: httplib.INTERNAL_SERVER_ERROR,
-
     OneM2M.result_code_external_object_not_reachable: httplib.NOT_FOUND,
     OneM2M.result_code_external_object_not_found: httplib.NOT_FOUND,
     OneM2M.result_code_max_number_of_member_exceeded: httplib.BAD_REQUEST,
@@ -142,7 +140,7 @@ onem2m_to_http_result_codes = {
     OneM2M.result_code_mgmt_conversion_error: httplib.INTERNAL_SERVER_ERROR,
     OneM2M.result_code_mgmt_cancellation_failed: httplib.INTERNAL_SERVER_ERROR,
     OneM2M.result_code_already_complete: httplib.BAD_REQUEST,
-    OneM2M.result_code_mgmt_command_not_cancellable: httplib.BAD_REQUEST
+    OneM2M.result_code_mgmt_command_not_cancellable: httplib.BAD_REQUEST,
 }
 
 
@@ -203,7 +201,9 @@ class OneM2MHttpRx(IoTRx):
         if not rsp_primitive:
             code = httplib.INTERNAL_SERVER_ERROR
             reason = status_codes._codes[code]
-            start_line = httputil.ResponseStartLine(version='HTTP/1.1', code=code, reason=reason)
+            start_line = httputil.ResponseStartLine(
+                version="HTTP/1.1", code=code, reason=reason
+            )
             request.connection.write_headers(start_line, httputil.HTTPHeaders())
             request.finish()
             return
@@ -216,7 +216,9 @@ class OneM2MHttpRx(IoTRx):
         code = encoded.status_code
         reason = encoded.reason
 
-        start_line = httputil.ResponseStartLine(version='HTTP/1.1', code=code, reason=reason)
+        start_line = httputil.ResponseStartLine(
+            version="HTTP/1.1", code=code, reason=reason
+        )
         request.connection.write_headers(start_line, headers)
 
         # set content
@@ -303,7 +305,7 @@ class OneM2MHttpJsonEncoderTx(IoTDataEncoder):
         OneM2M.operation_retrieve: "get",
         OneM2M.operation_update: "put",
         OneM2M.operation_delete: "delete",
-        OneM2M.operation_notify: "post"
+        OneM2M.operation_notify: "post",
     }
 
     def _encode_operation(self, onem2m_operation):
@@ -341,7 +343,7 @@ class OneM2MHttpJsonEncoderTx(IoTDataEncoder):
                     if protocol_address in proto_params:
                         entity_address = proto_params[protocol_address]
                         if protocol_port in proto_params:
-                            entity_address += (":" + str(proto_params[protocol_port]))
+                            entity_address += ":" + str(proto_params[protocol_port])
 
                 msg.url = "http://" + entity_address + resource_uri
 
@@ -351,7 +353,7 @@ class OneM2MHttpJsonEncoderTx(IoTDataEncoder):
 
                 # Query parameters
                 if msg.url and key in http_query_params:
-                    msg.url += (delimiter + key + "=" + str(value))
+                    msg.url += delimiter + key + "=" + str(value)
                     delimiter = "&"
                     continue
 
@@ -417,7 +419,11 @@ class OneM2MHttpDecodeUtils:
                         try:
                             int(value)
                         except Exception as e:
-                            raise IoTDataDecodeError("Invalid Content-Length value: {}, error: {}".format(value, e))
+                            raise IoTDataDecodeError(
+                                "Invalid Content-Length value: {}, error: {}".format(
+                                    value, e
+                                )
+                            )
 
                     http_specifics[decoded_name] = value
                 else:
@@ -426,7 +432,11 @@ class OneM2MHttpDecodeUtils:
                         try:
                             value = int(value)
                         except Exception as e:
-                            raise IoTDataDecodeError("Invalid status code value: {}, error: {}".format(value, e))
+                            raise IoTDataDecodeError(
+                                "Invalid status code value: {}, error: {}".format(
+                                    value, e
+                                )
+                            )
 
                     primitive_param_dict[decoded_name] = value
 
@@ -442,18 +452,24 @@ class OneM2MHttpJsonDecoderRx(IoTDataDecoder):
         Decodes Tx specific HTTP message with JSON content type to OneM2M JSON
         primitive object
         """
-        builder = OneM2MHttpJsonPrimitiveBuilder() \
-            .set_communication_protocol(HTTPPROTOCOLNAME)
+        builder = OneM2MHttpJsonPrimitiveBuilder().set_communication_protocol(
+            HTTPPROTOCOLNAME
+        )
 
         primitive_param_dict = {}
         http_specifics = {}
-        OneM2MHttpDecodeUtils.decode_headers(primitive_param_dict, http_specifics, protocol_message.headers)
+        OneM2MHttpDecodeUtils.decode_headers(
+            primitive_param_dict, http_specifics, protocol_message.headers
+        )
 
         builder.set_parameters(primitive_param_dict)
         builder.set_protocol_specific_parameters(http_specifics)
 
         if protocol_message.path:
-            builder.set_param(OneM2M.short_to, OneM2MHttpDecodeUtils.translate_uri_to_onem2m(protocol_message.path))
+            builder.set_param(
+                OneM2M.short_to,
+                OneM2MHttpDecodeUtils.translate_uri_to_onem2m(protocol_message.path),
+            )
 
         if protocol_message.body:
             builder.set_content(protocol_message.body)
@@ -466,7 +482,8 @@ class OneM2MHttpJsonDecoderRx(IoTDataDecoder):
 
         if protocol_message.method:
             operation = OneM2MHttpDecodeUtils.translate_http_method_to_onem2m_operation(
-                protocol_message.method, builder.has_param(OneM2M.short_resource_type))
+                protocol_message.method, builder.has_param(OneM2M.short_resource_type)
+            )
             builder.set_param(OneM2M.short_operation, operation)
 
         return builder.build()
@@ -483,12 +500,15 @@ class OneM2MHttpJsonDecoderTx(IoTDataDecoder):
         Decodes Rx specific HTTP message with JSON content type to OneM2M JSON
         primitive object
         """
-        builder = OneM2MHttpJsonPrimitiveBuilder() \
-            .set_communication_protocol(HTTPPROTOCOLNAME)
+        builder = OneM2MHttpJsonPrimitiveBuilder().set_communication_protocol(
+            HTTPPROTOCOLNAME
+        )
 
         primitive_param_dict = {}
         http_specifics = {}
-        OneM2MHttpDecodeUtils.decode_headers(primitive_param_dict, http_specifics, protocol_message.headers)
+        OneM2MHttpDecodeUtils.decode_headers(
+            primitive_param_dict, http_specifics, protocol_message.headers
+        )
 
         # TODO decode query if needed
 
@@ -527,7 +547,9 @@ class OneM2MHttpJsonPrimitive(OneM2MJsonPrimitive):
 
         # TODO add support for other content types if needed
         if "json" not in content_type:
-            raise AssertionError("HTTP primitive with unsupported Content-Type: {}".format(content_type))
+            raise AssertionError(
+                "HTTP primitive with unsupported Content-Type: {}".format(content_type)
+            )
 
         content_length = primitive.get_proto_param(http_header_content_length)
         if not content_length:
@@ -536,7 +558,9 @@ class OneM2MHttpJsonPrimitive(OneM2MJsonPrimitive):
         if not isinstance(content_length, basestring):
             raise AssertionError(
                 "HTTP primitive with Content-Length value of invalid data type: {}, string is expected".format(
-                    content_length.__class__))
+                    content_length.__class__
+                )
+            )
 
         # verify length of content if exists
         # TODO commented out because this fails for primitives built by builder
@@ -552,7 +576,9 @@ class OneM2MHttpJsonPrimitive(OneM2MJsonPrimitive):
         return op, rqi
 
     def _check_response_common(self, response_primitive, rqi=None, rsc=None):
-        response_rsc = super(OneM2MHttpJsonPrimitive, self)._check_response_common(response_primitive, rqi, rsc)
+        response_rsc = super(OneM2MHttpJsonPrimitive, self)._check_response_common(
+            response_primitive, rqi, rsc
+        )
         self._check_http_primitive_content(response_primitive)
 
         http_res = response_primitive.get_proto_param(http_result_code)
@@ -562,28 +588,39 @@ class OneM2MHttpJsonPrimitive(OneM2MJsonPrimitive):
         if not isinstance(http_res, int):
             raise AssertionError(
                 "HTTP response primitive with Result-Code value of invalid data type: {}, expected is integer".format(
-                    http_res.__class__))
+                    http_res.__class__
+                )
+            )
 
         try:
             expected_http_res = onem2m_to_http_result_codes[response_rsc]
         except KeyError as e:
-            raise RuntimeError("Failed to map OneM2M rsc ({}) to HTTP status code: {}".format(response_rsc, e))
+            raise RuntimeError(
+                "Failed to map OneM2M rsc ({}) to HTTP status code: {}".format(
+                    response_rsc, e
+                )
+            )
 
         if expected_http_res != http_res:
             raise AssertionError(
                 "Incorrect HTTP status code mapped to OneM2M status code {}, http: {}, expected http: {}".format(
-                    response_rsc, http_res, expected_http_res))
+                    response_rsc, http_res, expected_http_res
+                )
+            )
 
         # Content-Location
         if response_rsc == OneM2M.result_code_created:
-            content_location = response_primitive.get_proto_param(http_header_content_location)
+            content_location = response_primitive.get_proto_param(
+                http_header_content_location
+            )
             if not content_location:
                 raise AssertionError("HTTP response primitive without Content-Location")
 
             if not isinstance(content_location, basestring):
                 raise AssertionError(
-                    "HTTP response primitive with invalid Content-Location value data type: {}, " +
-                    "string is expected".format(content_location.__class__))
+                    "HTTP response primitive with invalid Content-Location value data type: {}, "
+                    + "string is expected".format(content_location.__class__)
+                )
 
         return response_rsc
 
@@ -592,4 +629,6 @@ class OneM2MHttpJsonPrimitiveBuilder(OneM2MJsonPrimitiveBuilder):
     """Builder class specialized for OneM2MHttpJsonPrimitive objects"""
 
     def build(self):
-        return OneM2MHttpJsonPrimitive(self.parameters, self.content, self.protocol, self.proto_params)
+        return OneM2MHttpJsonPrimitive(
+            self.parameters, self.content, self.protocol, self.proto_params
+        )
index 0bc9cbb27fc73b13fb6dbca9046426a57034b751..c963936feea186d89a845d7bc466bbe9b5a99762 100644 (file)
@@ -32,8 +32,9 @@ class OneM2MJsonPrimitive(OneM2MPrimitive):
         XML short scheme, XML long scheme
     """
 
-    def __init__(self, parameters, content,
-                 protocol_name, protocol_parameters, short_scheme=True):
+    def __init__(
+        self, parameters, content, protocol_name, protocol_parameters, short_scheme=True
+    ):
         self.parameters = parameters
         self.content = content
         self.protocol = protocol_name
@@ -50,16 +51,20 @@ class OneM2MJsonPrimitive(OneM2MPrimitive):
         try:
             json_pointer = str(pointer_string)
             # add leading slash if missing
-            if json_pointer[0] != '/':
-                json_pointer = '/' + json_pointer
+            if json_pointer[0] != "/":
+                json_pointer = "/" + json_pointer
 
             # remove slash from the end if exists
-            if json_pointer[-1] == '/':
+            if json_pointer[-1] == "/":
                 json_pointer = json_pointer[:-1]
 
             json_pointer = JsonPointer(json_pointer)
         except Exception as e:
-            raise RuntimeError("Invalid JSON pointer passed: {}, error: {}".format(pointer_string, e.message))
+            raise RuntimeError(
+                "Invalid JSON pointer passed: {}, error: {}".format(
+                    pointer_string, e.message
+                )
+            )
         return json_pointer
 
     def _get_item_by_pointer(self, data_dict, pointer):
@@ -74,7 +79,11 @@ class OneM2MJsonPrimitive(OneM2MPrimitive):
         try:
             item = json_pointer.resolve(data_dict)
         except JsonPointerException as e:
-            raise RuntimeError("Failed to get JSON item by JSON pointer: {}, error: {}".format(pointer, e.message))
+            raise RuntimeError(
+                "Failed to get JSON item by JSON pointer: {}, error: {}".format(
+                    pointer, e.message
+                )
+            )
 
         return item
 
@@ -160,10 +169,16 @@ class OneM2MJsonPrimitive(OneM2MPrimitive):
     def _check_exchange_protocols(self, response_primitive):
         self._check_protocol_of_request()
         self._check_protocol_of_response(response_primitive)
-        if not self.get_communication_protocol() == response_primitive.get_communication_protocol():
-            raise AssertionError("Request {} and response {} primitives' communication protocols doesn't match.".
-                                 format(self.get_communication_protocol(),
-                                        response_primitive.get_communication_protocol()))
+        if (
+            not self.get_communication_protocol()
+            == response_primitive.get_communication_protocol()
+        ):
+            raise AssertionError(
+                "Request {} and response {} primitives' communication protocols doesn't match.".format(
+                    self.get_communication_protocol(),
+                    response_primitive.get_communication_protocol(),
+                )
+            )
 
     def _check_request_common(self):
         op = self.get_param(OneM2M.short_operation)
@@ -171,18 +186,27 @@ class OneM2MJsonPrimitive(OneM2MPrimitive):
             raise AssertionError("Request primitive without operation set")
 
         if not isinstance(op, int):
-            raise AssertionError("Invalid data type ({}) of operation where integer is expected".format(op.__class__))
+            raise AssertionError(
+                "Invalid data type ({}) of operation where integer is expected".format(
+                    op.__class__
+                )
+            )
 
         if op not in OneM2M.operation_valid_values:
-            raise AssertionError("Request primitive with unknown operation set: {}".format(op))
+            raise AssertionError(
+                "Request primitive with unknown operation set: {}".format(op)
+            )
 
         rqi = self.get_param(OneM2M.short_request_identifier)
         if not rqi:
             raise AssertionError("Request primitive without request id")
 
         if not isinstance(rqi, basestring):
-            raise AssertionError("Invalid data type ({}) of request identifier where string is expected".
-                                 format(rqi.__class__))
+            raise AssertionError(
+                "Invalid data type ({}) of request identifier where string is expected".format(
+                    rqi.__class__
+                )
+            )
         return op, rqi
 
     def _check_response_common(self, response_primitive, rqi=None, rsc=None):
@@ -191,26 +215,38 @@ class OneM2MJsonPrimitive(OneM2MPrimitive):
             raise AssertionError("Response primitive without request id")
 
         if not isinstance(rsp_rqi, basestring):
-            raise AssertionError("Invalid data type ({}) of request identifier where string is expected".
-                                 format(rsp_rqi.__class__))
+            raise AssertionError(
+                "Invalid data type ({}) of request identifier where string is expected".format(
+                    rsp_rqi.__class__
+                )
+            )
 
         if rqi and rqi != rsp_rqi:
-            raise AssertionError("Request IDs mismatch: req: {}, rsp: {}".format(rqi, rsp_rqi))
+            raise AssertionError(
+                "Request IDs mismatch: req: {}, rsp: {}".format(rqi, rsp_rqi)
+            )
 
         r_rsc = response_primitive.get_param(OneM2M.short_response_status_code)
         if not r_rsc:
             raise AssertionError("Response primitive without status code")
 
         if not isinstance(r_rsc, int):
-            raise AssertionError("Invalid data type ({}) of response status code where integer is expected".
-                                 format(r_rsc.__class__))
+            raise AssertionError(
+                "Invalid data type ({}) of response status code where integer is expected".format(
+                    r_rsc.__class__
+                )
+            )
 
         if r_rsc not in OneM2M.supported_result_codes:
-            raise AssertionError("Unsupported response primitive result code: {}".format(r_rsc))
+            raise AssertionError(
+                "Unsupported response primitive result code: {}".format(r_rsc)
+            )
 
         if None is not rsc:
             if r_rsc != rsc:
-                raise AssertionError("Unexpected result code: {}, expected: {}".format(r_rsc, rsc))
+                raise AssertionError(
+                    "Unexpected result code: {}, expected: {}".format(r_rsc, rsc)
+                )
 
         return r_rsc
 
@@ -220,17 +256,24 @@ class OneM2MJsonPrimitive(OneM2MPrimitive):
         r_rsc = self._check_response_common(response_primitive, rqi, rsc)
         return op, r_rsc
 
-    def _check_response_positive_result(self, response_rsc=None, request_operation=None):
+    def _check_response_positive_result(
+        self, response_rsc=None, request_operation=None
+    ):
         if response_rsc and response_rsc not in OneM2M.positive_result_codes:
-            raise AssertionError("Response with negative status code: {}".format(response_rsc))
+            raise AssertionError(
+                "Response with negative status code: {}".format(response_rsc)
+            )
 
         if None is request_operation:
             return
 
         expected_rsc = OneM2M.expected_result_codes[request_operation]
         if expected_rsc != response_rsc:
-            raise AssertionError("Unexpected positive result code for operation: {}, received: {}, expected: {}".format(
-                                 request_operation, response_rsc, expected_rsc))
+            raise AssertionError(
+                "Unexpected positive result code for operation: {}, received: {}, expected: {}".format(
+                    request_operation, response_rsc, expected_rsc
+                )
+            )
 
     def check_exchange(self, response_primitive, rsc=None):
         op, r_rsc = self._check_exchange_common(response_primitive, rsc)
@@ -245,16 +288,25 @@ class OneM2MJsonPrimitive(OneM2MPrimitive):
 
         msg = response_primitive.get_attr(OneM2M.error_message_item)
         if not msg:
-            raise AssertionError("Negative response primitive without error message, expected message: {}".format(
-                                 error_message))
+            raise AssertionError(
+                "Negative response primitive without error message, expected message: {}".format(
+                    error_message
+                )
+            )
 
         if not isinstance(msg, basestring):
-            raise AssertionError("Invalid data type ({}) of response error message where string is expected".
-                                 format(msg.__class__))
+            raise AssertionError(
+                "Invalid data type ({}) of response error message where string is expected".format(
+                    msg.__class__
+                )
+            )
 
         if not msg == error_message:
-            raise AssertionError("Negative response with unexpected error message: {}, expected: {}".format(
-                                 msg, error_message))
+            raise AssertionError(
+                "Negative response with unexpected error message: {}, expected: {}".format(
+                    msg, error_message
+                )
+            )
 
     def check_exchange_negative(self, response_primitive, rsc, error_message=None):
         op, r_rsc = self._check_exchange_common(response_primitive, rsc)
@@ -357,7 +409,6 @@ class OneM2MJsonPrimitiveBuilder(OneM2MPrimitiveBuilder, OneM2MJsonPrimitive):
         raise NotImplementedError()
 
     def build(self):
-        return OneM2MJsonPrimitive(self.parameters,
-                                   self.content,
-                                   self.protocol,
-                                   self.proto_params)
+        return OneM2MJsonPrimitive(
+            self.parameters, self.content, self.protocol, self.proto_params
+        )
index f195089d7ebd95dc76ac81229915fb63f6632778..aab0426c79d714296046492abb2d648e3b617475 100644 (file)
@@ -33,7 +33,7 @@ class OneM2MPrimitiveDefinitions:
         operation_retrieve,
         operation_update,
         operation_delete,
-        operation_notify
+        operation_notify,
     ]
 
     # Long naming schema definitions
@@ -128,12 +128,10 @@ class OneM2MPrimitiveDefinitions:
 
     supported_result_codes = [
         result_code_accepted,
-
         result_code_ok,
         result_code_created,
         result_code_deleted,
         result_code_updated,
-
         result_code_bad_request,
         result_code_not_found,
         result_code_operation_not_allowed,
@@ -153,7 +151,6 @@ class OneM2MPrimitiveDefinitions:
         result_code_esprim_unknown_orig_rand_id,
         result_code_esprim_unknown_recv_rand_id,
         result_code_esprim_bad_mac,
-
         result_code_internal_server_error,
         result_code_not_implemened,
         result_code_target_not_reachable,
@@ -169,7 +166,6 @@ class OneM2MPrimitiveDefinitions:
         result_code_esprim_decryption_error,
         result_code_esprim_encryption_error,
         result_code_sparql_update_error,
-
         result_code_external_object_not_reachable,
         result_code_external_object_not_found,
         result_code_max_number_of_member_exceeded,
@@ -182,7 +178,7 @@ class OneM2MPrimitiveDefinitions:
         result_code_mgmt_conversion_error,
         result_code_mgmt_cancellation_failed,
         result_code_already_complete,
-        result_code_mgmt_command_not_cancellable
+        result_code_mgmt_command_not_cancellable,
     ]
 
     positive_result_codes = [
@@ -190,7 +186,7 @@ class OneM2MPrimitiveDefinitions:
         result_code_deleted,
         result_code_updated,
         result_code_created,
-        result_code_accepted
+        result_code_accepted,
     ]
 
     # Expected positive result codes per operation
@@ -199,7 +195,7 @@ class OneM2MPrimitiveDefinitions:
         operation_retrieve: result_code_ok,
         operation_update: result_code_updated,
         operation_delete: result_code_deleted,
-        operation_notify: result_code_ok
+        operation_notify: result_code_ok,
     }
 
     # Error message content item
@@ -266,30 +262,44 @@ class OneM2MEncodeDecodeData(object):
             raise Exception("No data type string specified")
 
         self.data_type = data_type  # name of data type
-        self._encode = {}   # dictionary stores OneM2M: protocol mapping
-        self._decode = {}   # dictionary stores protocol: OneM2M mapping
-        self._encode_ci = {}    # stores case insensitive OneM2M: protocol mapping
-        self._decode_ci = {}    # stores case insensitive protocol: OneM2M mapping
+        self._encode = {}  # dictionary stores OneM2M: protocol mapping
+        self._decode = {}  # dictionary stores protocol: OneM2M mapping
+        self._encode_ci = {}  # stores case insensitive OneM2M: protocol mapping
+        self._decode_ci = {}  # stores case insensitive protocol: OneM2M mapping
 
     def add(self, onem2m, protocol_specific):
         """Adds new encoding/decoding pair"""
         if onem2m in self._encode:
-            raise Exception("Data type: {}, Encoding key {} already exists".format(self.data_type, onem2m))
+            raise Exception(
+                "Data type: {}, Encoding key {} already exists".format(
+                    self.data_type, onem2m
+                )
+            )
         self._encode[onem2m] = protocol_specific
         decoded_ci = onem2m if not isinstance(onem2m, basestring) else onem2m.lower()
         self._encode_ci[decoded_ci] = protocol_specific
 
         if protocol_specific in self._decode:
-            raise Exception("Data type: {}, Decoding key {} already exists".format(self.data_type, protocol_specific))
+            raise Exception(
+                "Data type: {}, Decoding key {} already exists".format(
+                    self.data_type, protocol_specific
+                )
+            )
         self._decode[protocol_specific] = onem2m
-        encoded_ci = protocol_specific if not isinstance(protocol_specific, basestring) else protocol_specific.lower()
+        encoded_ci = (
+            protocol_specific
+            if not isinstance(protocol_specific, basestring)
+            else protocol_specific.lower()
+        )
         self._decode_ci[encoded_ci] = onem2m
         return self
 
     def encode(self, key):
         """Returns key encoded to protocol specific form"""
         if key not in self._encode:
-            raise IoTDataEncodeError("Data type: {}, Encoding key {} not found".format(self.data_type, key))
+            raise IoTDataEncodeError(
+                "Data type: {}, Encoding key {} not found".format(self.data_type, key)
+            )
         return self._encode[key]
 
     def encode_default(self, key, default):
@@ -303,7 +313,10 @@ class OneM2MEncodeDecodeData(object):
         k = key if not isinstance(key, basestring) else key.lower()
         if k not in self._encode_ci:
             raise IoTDataEncodeError(
-                "Data type: {}, Case Insensitive Encoding key {} not found".format(self.data_type, key))
+                "Data type: {}, Case Insensitive Encoding key {} not found".format(
+                    self.data_type, key
+                )
+            )
         return self._encode_ci[k]
 
     def encode_default_ci(self, key, default):
@@ -319,7 +332,9 @@ class OneM2MEncodeDecodeData(object):
     def decode(self, key):
         """Decodes protocol specific key and returns decoded OneM2M string"""
         if key not in self._decode:
-            raise IoTDataDecodeError("Data type: {}, Decoding key {} not found".format(self.data_type, key))
+            raise IoTDataDecodeError(
+                "Data type: {}, Decoding key {} not found".format(self.data_type, key)
+            )
         return self._decode[key]
 
     def decode_default(self, key, default):
@@ -336,7 +351,10 @@ class OneM2MEncodeDecodeData(object):
         k = key if not isinstance(key, basestring) else key.lower()
         if k not in self._decode_ci:
             raise IoTDataDecodeError(
-                "Data type: {}, Case Insensitive Decoding key {} not found".format(self.data_type, key))
+                "Data type: {}, Case Insensitive Decoding key {} not found".format(
+                    self.data_type, key
+                )
+            )
         return self._decode_ci[k]
 
     def decode_default_ci(self, key, default):
@@ -428,6 +446,7 @@ class OneM2MPrimitive(IoTData):
 
 class OneM2MPrimitiveBuilderException(Exception):
     """OneM2M primitive build error"""
+
     pass
 
 
index 364fa452552f9436082b09e76acb206824a10a09..a1933ac50642c38a126e8c3af34050ac6854b596 100644 (file)
@@ -24,8 +24,11 @@ class TestOneM2MHttp(unittest.TestCase):
     """Class of unittests testing OneM2M HTTP communication and related classes"""
 
     params = {OneM2M.short_to: "InCSE2/Postman", "op": 2, "fr": "AE1", "rqi": 12345}
-    proto_params = {onem2m_http.protocol_address: "localhost", onem2m_http.protocol_port: 8282,
-                    "Content-Type": "application/json"}
+    proto_params = {
+        onem2m_http.protocol_address: "localhost",
+        onem2m_http.protocol_port: 8282,
+        "Content-Type": "application/json",
+    }
     content = {"content": 123}
 
     def test_primitive_encoding(self):
@@ -56,9 +59,19 @@ class TestOneM2MHttp(unittest.TestCase):
         return rsp_builder.build()
 
     def test_communicaton_send(self):
-        params = {OneM2M.short_to: "InCSE2/Postman", "op": 2, "fr": "AE1", "rqi": 12345, "rcn": 1, "ty": 4}
-        proto_params = {onem2m_http.protocol_address: "localhost", onem2m_http.protocol_port: 5000,
-                        "Content-Type": "application/json"}
+        params = {
+            OneM2M.short_to: "InCSE2/Postman",
+            "op": 2,
+            "fr": "AE1",
+            "rqi": 12345,
+            "rcn": 1,
+            "ty": 4,
+        }
+        proto_params = {
+            onem2m_http.protocol_address: "localhost",
+            onem2m_http.protocol_port: 5000,
+            "Content-Type": "application/json",
+        }
         content = {"content": 123}
 
         encoder = OneM2MHttpJsonEncoderTx()
index b5a0e517b1a366696526b328f17d48fff96e2912..0cbfa5e1ade225824f8b517756d2b189bca24ef2 100644 (file)
@@ -30,14 +30,15 @@ class TestOneM2MJsonPrimitive(unittest.TestCase):
         json_primitive = json.loads(primitive.get_primitive_str())
         self.assertNotIn(OneM2M.short_primitive_content, json_primitive)
 
-        self.assertEqual(json.dumps(json_primitive),
-                         primitive.get_parameters_str())
+        self.assertEqual(json.dumps(json_primitive), primitive.get_parameters_str())
 
     def _create_primitive(self):
-        builder = OneM2MJsonPrimitiveBuilder()\
-            .set_parameters(self.params)\
-            .set_content(self.content)\
+        builder = (
+            OneM2MJsonPrimitiveBuilder()
+            .set_parameters(self.params)
+            .set_content(self.content)
             .set_protocol_specific_parameters(self.proto_params)
+        )
         return builder.build()
 
     def test_primitive_build_with_content(self):
@@ -49,8 +50,10 @@ class TestOneM2MJsonPrimitive(unittest.TestCase):
         json_primitive = json.loads(primitive.get_primitive_str())
         self.assertIn(OneM2M.short_primitive_content, json_primitive)
 
-        self.assertEqual(json.dumps(json_primitive[OneM2M.short_primitive_content]),
-                         primitive.get_content_str())
+        self.assertEqual(
+            json.dumps(json_primitive[OneM2M.short_primitive_content]),
+            primitive.get_content_str(),
+        )
 
     def test_primitive_items_access(self):
         primitive = self._create_primitive()
index ad67a6376cd23aa5e77a643e71c2939ddac98416..680a42eca9da99d0fa4c822ba6d9f7435e55e0c2 100644 (file)
@@ -4,8 +4,7 @@ import ciotdm
 
 def connect_to_iotdm(host, user, password, prot="http"):
     """According to protocol, connect to iotdm."""
-    return ciotdm.connect(host, base="InCSE1", auth=(
-        user, password), protocol=prot)
+    return ciotdm.connect(host, base="InCSE1", auth=(user, password), protocol=prot)
 
 
 def modify_headers_origin(connection, new_origin):
@@ -20,11 +19,9 @@ def create_resource(connection, parent, restype, attribute=None):
     return connection.response
 
 
-def create_resource_with_command(connection, parent, restype,
-                                 command, attribute=None):
+def create_resource_with_command(connection, parent, restype, command, attribute=None):
     """According to command in the header, create the resource."""
-    connection.create_with_command(parent, restype,
-                                   command, attribute)
+    connection.create_with_command(parent, restype, command, attribute)
     check_response(connection.response, "create")
     return connection.response
 
@@ -32,9 +29,11 @@ def create_resource_with_command(connection, parent, restype,
 def create_subscription(connection, parent, ip, port):
     """Create subscription."""
     uri = "http://%s:%d" % (ip, int(port))
-    connection.create(parent, "subscription", {
-        "notificationURI": uri,
-        "notificationContentType": "wholeResource"})
+    connection.create(
+        parent,
+        "subscription",
+        {"notificationURI": uri, "notificationContentType": "wholeResource"},
+    )
     check_response(connection.response, "create")
     return connection.response
 
@@ -60,8 +59,7 @@ def update_resource(connection, resid, restype, attr):
     return connection.response
 
 
-def update_resource_with_command(connection, resid,
-                                 restype, command, attr):
+def update_resource_with_command(connection, resid, restype, command, attr):
     """According to command, update resource with resourceID."""
     connection.update_with_command(resid, restype, command, attr)
     check_response(connection.response, "update")
@@ -106,7 +104,7 @@ def name(response):
     """Return resourceName."""
     resource_name = ciotdm.name(response)
     if resource_name is None:
-        raise AssertionError('Cannot find this resource')
+        raise AssertionError("Cannot find this resource")
     return resource_name
 
 
@@ -167,22 +165,24 @@ def elapsed(response):
 
 def location(response):
     """Return response content-location."""
-    return response.headers['Content-Location']
+    return response.headers["Content-Location"]
 
 
 def kill_the_tree(host, cseid, username, password):
     """Delete the whole tree."""
-    connection = ciotdm.connect(host, base=cseid,
-                                auth=(username, password), protocol="http")
+    connection = ciotdm.connect(
+        host, base=cseid, auth=(username, password), protocol="http"
+    )
     connection.kill()
 
 
 def check_response(response, operation):
     """Check whether the connection is none."""
     if response is None:
-        raise AssertionError('Cannot %s this resource') % (operation)
-    elif hasattr(response, 'status_code'):
+        raise AssertionError("Cannot %s this resource") % (operation)
+    elif hasattr(response, "status_code"):
         if response.status_code < 200 or response.status_code > 299:
             raise AssertionError(
-                'Cannot %s this resource [%d] : %s' %
-                (operation, response.status_code, response.text))
+                "Cannot %s this resource [%d] : %s"
+                % (operation, response.status_code, response.text)
+            )
index 43139b6f960b82aef85ae94be3c3e5adac5aadeb..a62ad7e7b1004ba8df232e38d68b8f2ecbf1eb8a 100644 (file)
@@ -11,7 +11,7 @@ op_provision = ":8181/restconf/operations/onem2m:onem2m-cse-provisioning"
 op_tree = ":8181/restconf/operational/onem2m:onem2m-resource-tree"
 op_cleanup = ":8181/restconf/operations/onem2m:onem2m-cleanup-store"
 
-cse_payload = '''
+cse_payload = """
 {    "input": {
         "onem2m-primitive": [
            {
@@ -25,18 +25,18 @@ cse_payload = '''
         ]
     }
 }
-'''
+"""
 
-application_payload = '''
+application_payload = """
 {
   any:
   [
     {"aei":"jb", "api":"jb", "apn":"jb2", "or":"http://hey/you" %s}
   ]
 }
-'''
+"""
 
-_container_payload = '''
+_container_payload = """
 {
   any:
   [
@@ -49,9 +49,9 @@ _container_payload = '''
     }
   ]
 }
-'''
+"""
 
-container_payload = '''
+container_payload = """
 {
   any:
   [
@@ -62,9 +62,9 @@ container_payload = '''
     }
   ]
 }
-'''
+"""
 
-contentInstance_payload = '''
+contentInstance_payload = """
 {
   "any": [
     {
@@ -74,7 +74,7 @@ contentInstance_payload = '''
     }
   ]
 }
-'''
+"""
 
 
 def which_payload(restype):
@@ -92,7 +92,7 @@ def which_payload(restype):
 def find_key(response, key):
     try:
         val = response.json()
-        return val['any'][0][key]
+        return val["any"][0][key]
     except Exception:
         return None
 
@@ -141,7 +141,7 @@ def headers(response):
 def error(response):
     """Return the error string in the response."""
     try:
-        return response.json()['error']
+        return response.json()["error"]
     except Exception:
         return None
 
@@ -174,28 +174,32 @@ def attr2str(attr):
 
 
 class connect:
-    def __init__(self, server="localhost", base='InCSE1',
-                 auth=('admin', 'admin'), protocol="http"):
+    def __init__(
+        self,
+        server="localhost",
+        base="InCSE1",
+        auth=("admin", "admin"),
+        protocol="http",
+    ):
         """Connect to a IoTDM server."""
         self.s = requests.Session()
         self.s.auth = auth
-        self.s.headers.update({'content-type': 'application/json'})
+        self.s.headers.update({"content-type": "application/json"})
         self.timeout = (5, 5)
         self.payload = cse_payload % (base)
         self.headers = {
             # Admittedly these are "magic values" but are required
             # and until a proper defaulting initializer is in place
             # are hard-coded.
-            'content-type': 'application/json',
-            'X-M2M-Origin': '//localhost:10000',
-            'X-M2M-RI': '12345',
-            'X-M2M-OT': 'NOW'
+            "content-type": "application/json",
+            "X-M2M-Origin": "//localhost:10000",
+            "X-M2M-RI": "12345",
+            "X-M2M-OT": "NOW",
         }
         self.server = "http://" + server
         if base is not None:
             self.url = self.server + op_provision
-            self.r = self.s.post(self.url,
-                                 data=self.payload, timeout=self.timeout)
+            self.r = self.s.post(self.url, data=self.payload, timeout=self.timeout)
 
     def create(self, parent, restype, name=None, attr=None):
         """Create resource."""
@@ -204,13 +208,14 @@ class connect:
         payload = which_payload(restype)
         payload = payload % (attr2str(attr))
         if name is None:
-            self.headers['X-M2M-NM'] = None
+            self.headers["X-M2M-NM"] = None
         else:
-            self.headers['X-M2M-NM'] = name
+            self.headers["X-M2M-NM"] = name
         parent = normalize(parent)
         self.url = self.server + ":8282/%s?ty=%s&rcn=1" % (parent, restype)
-        self.r = self.s.post(self.url, payload,
-                             timeout=self.timeout, headers=self.headers)
+        self.r = self.s.post(
+            self.url, payload, timeout=self.timeout, headers=self.headers
+        )
         return self.r
 
     def retrieve(self, id):
@@ -219,9 +224,8 @@ class connect:
             return None
         id = normalize(id)
         self.url = self.server + ":8282/%s?rcn=5&drt=2" % (id)
-        self.headers['X-M2M-NM'] = None
-        self.r = self.s.get(self.url, timeout=self.timeout,
-                            headers=self.headers)
+        self.headers["X-M2M-NM"] = None
+        self.r = self.s.get(self.url, timeout=self.timeout, headers=self.headers)
         return self.r
 
     def update(self, id, attr=None):
@@ -237,9 +241,8 @@ class connect:
             return None
         id = normalize(id)
         self.url = self.server + ":8282/%s" % (id)
-        self.headers['X-M2M-NM'] = None
-        self.r = self.s.delete(self.url, timeout=self.timeout,
-                               headers=self.headers)
+        self.headers["X-M2M-NM"] = None
+        self.r = self.s.delete(self.url, timeout=self.timeout, headers=self.headers)
         return self.r
 
     def tree(self):
index 500f23ef34db6dae99b269fb9b17d1d759f9a9cc..8241dd424bc2651246e9fb2290b7a2bd4b951acd 100644 (file)
@@ -35,45 +35,70 @@ def __get_session(allias, session):
     return None
 
 
-def prepare_primitive_builder_raw(protocol, primitive_params, content=None, proto_specific_params=None):
+def prepare_primitive_builder_raw(
+    protocol, primitive_params, content=None, proto_specific_params=None
+):
     """Creates primitive builder without any default data"""
-    builder = IoTDMJsonPrimitiveBuilder()\
-        .set_communication_protocol(protocol)\
-        .set_content(content)\
-        .set_parameters(primitive_params)\
+    builder = (
+        IoTDMJsonPrimitiveBuilder()
+        .set_communication_protocol(protocol)
+        .set_content(content)
+        .set_parameters(primitive_params)
         .set_protocol_specific_parameters(proto_specific_params)
+    )
     return builder
 
 
-def new_primitive_raw(protocol, primitive_params, content=None, proto_specific_params=None):
+def new_primitive_raw(
+    protocol, primitive_params, content=None, proto_specific_params=None
+):
     """Creates primitive object without any default data"""
-    return prepare_primitive_builder_raw(protocol, primitive_params, content, proto_specific_params).build()
-
-
-def prepare_primitive_builder(primitive_params, content=None, proto_specific_params=None,
-                              allias="default", communication=None):
+    return prepare_primitive_builder_raw(
+        protocol, primitive_params, content, proto_specific_params
+    ).build()
+
+
+def prepare_primitive_builder(
+    primitive_params,
+    content=None,
+    proto_specific_params=None,
+    allias="default",
+    communication=None,
+):
     """Creates primitive builder with default data set according communication object used"""
     communication = __get_session(allias, communication)
 
-    builder = IoTDMJsonPrimitiveBuilder()\
-        .set_communication_protocol(communication.get_protocol())\
-        .set_parameters(communication.get_primitive_params())\
-        .set_protocol_specific_parameters(communication.get_protocol_params())\
+    builder = (
+        IoTDMJsonPrimitiveBuilder()
+        .set_communication_protocol(communication.get_protocol())
+        .set_parameters(communication.get_primitive_params())
+        .set_protocol_specific_parameters(communication.get_protocol_params())
         .set_content(content)
+    )
 
     if communication.get_protocol() == onem2m_http.HTTPPROTOCOLNAME and content:
-        builder.set_proto_param(onem2m_http.http_header_content_length, str(len(content)))
+        builder.set_proto_param(
+            onem2m_http.http_header_content_length, str(len(content))
+        )
 
-    builder.append_parameters(primitive_params)\
-           .append_protocol_specific_parameters(proto_specific_params)
+    builder.append_parameters(primitive_params).append_protocol_specific_parameters(
+        proto_specific_params
+    )
 
     return builder
 
 
-def new_primitive(primitive_params, content=None, proto_specific_params=None,
-                  allias="default", communication=None):
+def new_primitive(
+    primitive_params,
+    content=None,
+    proto_specific_params=None,
+    allias="default",
+    communication=None,
+):
     """Creates new primitive object with default data set according communication object used"""
-    return prepare_primitive_builder(primitive_params, content, proto_specific_params, allias, communication).build()
+    return prepare_primitive_builder(
+        primitive_params, content, proto_specific_params, allias, communication
+    ).build()
 
 
 def _add_param(params, name, value):
@@ -82,8 +107,15 @@ def _add_param(params, name, value):
     params[name] = value
 
 
-def prepare_request_primitive_builder(target_resource, content=None, operation=None, resource_type=None,
-                                      result_content=None, allias="default", communication=None):
+def prepare_request_primitive_builder(
+    target_resource,
+    content=None,
+    operation=None,
+    resource_type=None,
+    result_content=None,
+    allias="default",
+    communication=None,
+):
     """
     Creates builder for request primitive with default data set according
     communication object used
@@ -100,38 +132,74 @@ def prepare_request_primitive_builder(target_resource, content=None, operation=N
 
     primitive_params = json.dumps(primitive_params)
 
-    builder = prepare_primitive_builder(primitive_params, content, communication=communication)
+    builder = prepare_primitive_builder(
+        primitive_params, content, communication=communication
+    )
     return builder
 
 
-def new_create_request_primitive(target_resource, content, resource_type, result_content=None,
-                                 allias="default", communication=None):
+def new_create_request_primitive(
+    target_resource,
+    content,
+    resource_type,
+    result_content=None,
+    allias="default",
+    communication=None,
+):
     """Creates request primitive for Create operation"""
-    return prepare_request_primitive_builder(target_resource, content, operation=OneM2M.operation_create,
-                                             resource_type=resource_type, result_content=result_content,
-                                             allias=allias, communication=communication).build()
-
-
-def new_update_request_primitive(target_resource, content, result_content=None, allias="default", communication=None):
+    return prepare_request_primitive_builder(
+        target_resource,
+        content,
+        operation=OneM2M.operation_create,
+        resource_type=resource_type,
+        result_content=result_content,
+        allias=allias,
+        communication=communication,
+    ).build()
+
+
+def new_update_request_primitive(
+    target_resource, content, result_content=None, allias="default", communication=None
+):
     """Creates request primitive for Update operation"""
-    return prepare_request_primitive_builder(target_resource, content, operation=OneM2M.operation_update,
-                                             resource_type=None, result_content=result_content,
-                                             allias=allias, communication=communication).build()
-
-
-def new_retrieve_request_primitive(target_resource, result_content=None, allias="default", communication=None):
+    return prepare_request_primitive_builder(
+        target_resource,
+        content,
+        operation=OneM2M.operation_update,
+        resource_type=None,
+        result_content=result_content,
+        allias=allias,
+        communication=communication,
+    ).build()
+
+
+def new_retrieve_request_primitive(
+    target_resource, result_content=None, allias="default", communication=None
+):
     """Creates request primitive for Retrieve operation"""
-    return prepare_request_primitive_builder(target_resource, content=None,
-                                             operation=OneM2M.operation_retrieve,
-                                             resource_type=None, result_content=result_content,
-                                             allias=allias, communication=communication).build()
-
-
-def new_delete_request_primitive(target_resource, result_content=None, allias="default", communication=None):
+    return prepare_request_primitive_builder(
+        target_resource,
+        content=None,
+        operation=OneM2M.operation_retrieve,
+        resource_type=None,
+        result_content=result_content,
+        allias=allias,
+        communication=communication,
+    ).build()
+
+
+def new_delete_request_primitive(
+    target_resource, result_content=None, allias="default", communication=None
+):
     """Creates request primitive for Delete operation"""
-    return prepare_request_primitive_builder(target_resource, content=None,
-                                             operation=OneM2M.operation_delete, result_content=result_content,
-                                             allias=allias, communication=communication).build()
+    return prepare_request_primitive_builder(
+        target_resource,
+        content=None,
+        operation=OneM2M.operation_delete,
+        result_content=result_content,
+        allias=allias,
+        communication=communication,
+    ).build()
 
 
 def send_primitive(primitive, allias="default", communication=None):
@@ -146,9 +214,13 @@ def verify_exchange(request_primitive, response_primitive, status_code=None):
     request_primitive.check_exchange(response_primitive, rsc=status_code)
 
 
-def verify_exchange_negative(request_primitive, response_primitive, status_code, error_message=None):
+def verify_exchange_negative(
+    request_primitive, response_primitive, status_code, error_message=None
+):
     """Verifies request and error response primitive parameters"""
-    request_primitive.check_exchange_negative(response_primitive, status_code, error_message)
+    request_primitive.check_exchange_negative(
+        response_primitive, status_code, error_message
+    )
 
 
 def verify_request(request_primitive):
@@ -161,7 +233,9 @@ def verify_response(response_primitive, rqi=None, rsc=None, request_operation=No
     response_primitive.check_response(rqi, rsc, request_operation)
 
 
-def verify_response_negative(response_primitive, rqi=None, rsc=None, error_message=None):
+def verify_response_negative(
+    response_primitive, rqi=None, rsc=None, error_message=None
+):
     """Verifies error response primitive only"""
     response_primitive.check_response_negative(rqi, rsc, error_message)
 
@@ -178,7 +252,9 @@ def receive_request_primitive(allias="default", communication=None):
     return req
 
 
-def respond_response_primitive(response_primitive, allias="default", communication=None):
+def respond_response_primitive(
+    response_primitive, allias="default", communication=None
+):
     """
     Sends response primitive related to the last request primitive received by
     receive_request_primitive() method
@@ -187,24 +263,39 @@ def respond_response_primitive(response_primitive, allias="default", communicati
     communication.respond(response_primitive)
 
 
-def create_notification_response(notification_request_primitive, allias="default", communication=None):
+def create_notification_response(
+    notification_request_primitive, allias="default", communication=None
+):
     """Creates response primitive for provided notification request primitive"""
     communication = __get_session(allias, communication)
-    return communication.create_auto_response(notification_request_primitive,
-                                              OneM2M.result_code_ok)
-
-
-def create_notification_response_negative(notification_request_primitive, result_code, error_message,
-                                          allias="default", communication=None):
+    return communication.create_auto_response(
+        notification_request_primitive, OneM2M.result_code_ok
+    )
+
+
+def create_notification_response_negative(
+    notification_request_primitive,
+    result_code,
+    error_message,
+    allias="default",
+    communication=None,
+):
     """Creates negative response primitive for provided notification request primitive"""
     communication = __get_session(allias, communication)
-    builder = IoTDMJsonPrimitiveBuilder() \
-        .set_communication_protocol(communication.get_protocol()) \
-        .set_param(OneM2M.short_request_identifier,
-                   notification_request_primitive.get_param(OneM2M.short_request_identifier)) \
-        .set_param(OneM2M.short_response_status_code, result_code) \
-        .set_proto_param(onem2m_http.http_result_code, onem2m_http.onem2m_to_http_result_codes[result_code])\
+    builder = (
+        IoTDMJsonPrimitiveBuilder()
+        .set_communication_protocol(communication.get_protocol())
+        .set_param(
+            OneM2M.short_request_identifier,
+            notification_request_primitive.get_param(OneM2M.short_request_identifier),
+        )
+        .set_param(OneM2M.short_response_status_code, result_code)
+        .set_proto_param(
+            onem2m_http.http_result_code,
+            onem2m_http.onem2m_to_http_result_codes[result_code],
+        )
         .set_content('{"error": "' + error_message + '"}')
+    )
     return builder.build()
 
 
@@ -237,48 +328,68 @@ def _on_subscription_create_notificaton_matching_cb(request_primitive):
 
 # Description of such notification request primitive which is received
 # as result of new subscription resource
-ON_SUBSCRIPTION_CREATE_DESCRIPTION =\
-    RequestAutoHandlingDescription(None, None, None,
-                                   onem2m_result_code=OneM2M.result_code_ok,
-                                   matching_cb=_on_subscription_create_notificaton_matching_cb)
+ON_SUBSCRIPTION_CREATE_DESCRIPTION = RequestAutoHandlingDescription(
+    None,
+    None,
+    None,
+    onem2m_result_code=OneM2M.result_code_ok,
+    matching_cb=_on_subscription_create_notificaton_matching_cb,
+)
 
 
 def _prepare_notification_auto_reply_builder():
-    return RequestAutoHandlingDescriptionBuilder()\
-        .add_param_criteria(OneM2M.short_operation, OneM2M.operation_notify)\
+    return (
+        RequestAutoHandlingDescriptionBuilder()
+        .add_param_criteria(OneM2M.short_operation, OneM2M.operation_notify)
         .set_onem2m_result_code(OneM2M.result_code_ok)
+    )
 
 
-def add_notification_auto_reply_on_subscription_create(allias="default", communication=None):
+def add_notification_auto_reply_on_subscription_create(
+    allias="default", communication=None
+):
     """Sets auto reply for notification requests received due to subscription resource creation"""
     communication = __get_session(allias, communication)
     communication.add_auto_reply_description(ON_SUBSCRIPTION_CREATE_DESCRIPTION)
 
 
-def remove_notification_auto_reply_on_subscription_create(allias="default", communication=None):
+def remove_notification_auto_reply_on_subscription_create(
+    allias="default", communication=None
+):
     """Removes auto reply for notification requests received due to subscription resource creation"""
     communication = __get_session(allias, communication)
     communication.remove_auto_reply_description(ON_SUBSCRIPTION_CREATE_DESCRIPTION)
 
 
-def get_number_of_auto_replies_on_subscription_create(allias="default", communication=None):
+def get_number_of_auto_replies_on_subscription_create(
+    allias="default", communication=None
+):
     """Returns number of auto replies on notification requests received when new subscription created"""
     communication = __get_session(allias, communication)
-    return communication.get_auto_handling_statistics(ON_SUBSCRIPTION_CREATE_DESCRIPTION).counter
+    return communication.get_auto_handling_statistics(
+        ON_SUBSCRIPTION_CREATE_DESCRIPTION
+    ).counter
 
 
-def verify_number_of_auto_replies_on_subscription_create(replies, allias="default", communication=None):
+def verify_number_of_auto_replies_on_subscription_create(
+    replies, allias="default", communication=None
+):
     """Compares number of auto replies on notifications received when new subscription created"""
     count = get_number_of_auto_replies_on_subscription_create(allias, communication)
     if replies != count:
-        raise AssertionError("Unexpected number of auto replies on subscription create: {}, expected: {}".format(
-                             count, replies))
+        raise AssertionError(
+            "Unexpected number of auto replies on subscription create: {}, expected: {}".format(
+                count, replies
+            )
+        )
 
 
 __SUBSCRIPTION_RESOURCE_ID_DESCRIPTION_MAPPING = {}
 
 
-def add_auto_reply_to_notification_from_subscription(subscription_resource_id, allias="default", communication=None):
+def add_auto_reply_to_notification_from_subscription(
+    subscription_resource_id, allias="default", communication=None
+):
     """
     Sets auto reply for notifications from specific subscription resource
     identified by its CSE-relative resource ID
@@ -286,47 +397,76 @@ def add_auto_reply_to_notification_from_subscription(subscription_resource_id, a
     communication = __get_session(allias, communication)
     builder = _prepare_notification_auto_reply_builder()
     if subscription_resource_id in __SUBSCRIPTION_RESOURCE_ID_DESCRIPTION_MAPPING:
-        raise RuntimeError("Auto reply for subscription resource {} already set".format(subscription_resource_id))
-
-    builder.add_content_criteria(JSON_POINTER_NOTIFICATION_SUR, subscription_resource_id)
+        raise RuntimeError(
+            "Auto reply for subscription resource {} already set".format(
+                subscription_resource_id
+            )
+        )
+
+    builder.add_content_criteria(
+        JSON_POINTER_NOTIFICATION_SUR, subscription_resource_id
+    )
     new_description = builder.build()
-    __SUBSCRIPTION_RESOURCE_ID_DESCRIPTION_MAPPING[subscription_resource_id] = new_description
+    __SUBSCRIPTION_RESOURCE_ID_DESCRIPTION_MAPPING[
+        subscription_resource_id
+    ] = new_description
     communication.add_auto_reply_description(new_description)
 
 
-def remove_auto_reply_to_notification_from_subscription(subscription_resource_id, allias="default", communication=None):
+def remove_auto_reply_to_notification_from_subscription(
+    subscription_resource_id, allias="default", communication=None
+):
     """Removes auto reply for specific subscription identified by its CSE-relative resource ID"""
     communication = __get_session(allias, communication)
-    description = __SUBSCRIPTION_RESOURCE_ID_DESCRIPTION_MAPPING[subscription_resource_id]
+    description = __SUBSCRIPTION_RESOURCE_ID_DESCRIPTION_MAPPING[
+        subscription_resource_id
+    ]
     if not description:
-        raise RuntimeError("No auto reply set for specific subscription resource: {}".format(subscription_resource_id))
+        raise RuntimeError(
+            "No auto reply set for specific subscription resource: {}".format(
+                subscription_resource_id
+            )
+        )
     communication.remove_auto_reply_description(description)
 
 
-def get_number_of_auto_replies_to_notifications_from_subscription(subscription_resource_id,
-                                                                  allias="default", communication=None):
+def get_number_of_auto_replies_to_notifications_from_subscription(
+    subscription_resource_id, allias="default", communication=None
+):
     """
     Returns number of automatic replies for specific subscription resource
     identified by its CSE-relative resource ID
     """
     communication = __get_session(allias, communication)
-    description = __SUBSCRIPTION_RESOURCE_ID_DESCRIPTION_MAPPING[subscription_resource_id]
+    description = __SUBSCRIPTION_RESOURCE_ID_DESCRIPTION_MAPPING[
+        subscription_resource_id
+    ]
     if not description:
-        raise RuntimeError("No auto reply set for specific subscription resource: {}".format(subscription_resource_id))
+        raise RuntimeError(
+            "No auto reply set for specific subscription resource: {}".format(
+                subscription_resource_id
+            )
+        )
     return communication.get_auto_handling_statistics(description).counter
 
 
-def verify_number_of_auto_replies_to_notification_from_subscription(subscription_resource_id, replies,
-                                                                    allias="default", communication=None):
+def verify_number_of_auto_replies_to_notification_from_subscription(
+    subscription_resource_id, replies, allias="default", communication=None
+):
     """
     Compares number of automatic replies for specific subscription resource
     identified by its CSE-relative resource ID
     """
-    count = get_number_of_auto_replies_to_notifications_from_subscription(subscription_resource_id,
-                                                                          allias, communication)
+    count = get_number_of_auto_replies_to_notifications_from_subscription(
+        subscription_resource_id, allias, communication
+    )
     if replies != count:
-        raise AssertionError(("Unexpected number of auto replies to notification from subscription {}, " +
-                              "auto replies: {}, expected: {}").format(subscription_resource_id, count, replies))
+        raise AssertionError(
+            (
+                "Unexpected number of auto replies to notification from subscription {}, "
+                + "auto replies: {}, expected: {}"
+            ).format(subscription_resource_id, count, replies)
+        )
 
 
 # Primitive getters uses JSON pointer object or string
@@ -364,7 +504,9 @@ def close_iotdm_communication(allias="default", communication=None):
         del __sessions[allias]
 
 
-def create_iotdm_communication(entity_id, protocol, protocol_params=None, rx_port=None, allias="default"):
+def create_iotdm_communication(
+    entity_id, protocol, protocol_params=None, rx_port=None, allias="default"
+):
     """
     Creates communication object and starts the communication.
     :param entity_id: ID which will be used in From parameter of request primitives
@@ -375,8 +517,9 @@ def create_iotdm_communication(entity_id, protocol, protocol_params=None, rx_por
     :return: The new communication object
     """
     if protocol == onem2m_http.HTTPPROTOCOLNAME:
-        conn = IoTDMItCommunicationFactory().create_http_json_primitive_communication(entity_id, protocol,
-                                                                                      protocol_params, rx_port)
+        conn = IoTDMItCommunicationFactory().create_http_json_primitive_communication(
+            entity_id, protocol, protocol_params, rx_port
+        )
     else:
         raise RuntimeError("Unsupported protocol: {}".format(protocol))
 
@@ -409,7 +552,7 @@ def get_local_ip_from_list(iotdm_ip, local_ip_list_str):
         # TODO this is not real longest prefix match
         # TODO fix if needed
         for ip in ip_list:
-            if ip.startswith(iotdm_ip[0: i]):
+            if ip.startswith(iotdm_ip[0:i]):
                 return ip
 
     # no match, just choose the first one
@@ -420,12 +563,21 @@ def get_local_ip_from_list(iotdm_ip, local_ip_list_str):
 def create_http_default_communication_parameters(address, port, content_type):
     """Returns JSON string including default HTTP specific parameters"""
     return '{{"{}": "{}", "{}": {}, "Content-Type": "{}"}}'.format(
-        onem2m_http.protocol_address, address,
-        onem2m_http.protocol_port, port,
-        content_type)
+        onem2m_http.protocol_address,
+        address,
+        onem2m_http.protocol_port,
+        port,
+        content_type,
+    )
 
 
-def create_iotdm_http_connection(entity_id, address, port, content_type, rx_port=None, allias="default"):
+def create_iotdm_http_connection(
+    entity_id, address, port, content_type, rx_port=None, allias="default"
+):
     """Creates HTTP communication"""
-    default_params = create_http_default_communication_parameters(address, port, content_type)
-    return create_iotdm_communication(entity_id, "http", default_params, rx_port, allias)
+    default_params = create_http_default_communication_parameters(
+        address, port, content_type
+    )
+    return create_iotdm_communication(
+        entity_id, "http", default_params, rx_port, allias
+    )
index 8c2d6b00baded4f36e6c32d0faaf012592400061..89dc39c12ac8b6facf9b8d1a89bac3b28b3b39cf 100644 (file)
@@ -16,61 +16,68 @@ def create_resource(connection, parent, restype, a=None):
     else:
         x = connection.create(parent, restype, attr=a)
     if x is None:
-        raise AssertionError('Cannot create this resource')
-    elif hasattr(x, 'status_code'):
+        raise AssertionError("Cannot create this resource")
+    elif hasattr(x, "status_code"):
         if x.status_code < 200 or x.status_code > 299:
             raise AssertionError(
-                'Cannot create this resource [%d] : %s' %
-                (x.status_code, x.text))
+                "Cannot create this resource [%d] : %s" % (x.status_code, x.text)
+            )
     return x
 
+
 # this might not be necessary now that the library functions can take dicts
 
 
 def create_subscription(connection, parent, ip, port):
     uri = "http://%s:%d" % (ip, int(port))
-    x = connection.create(parent, "subscription", {
-        "notificationURI": uri,
-        "notificationContentType": "wholeResource"})
+    x = connection.create(
+        parent,
+        "subscription",
+        {"notificationURI": uri, "notificationContentType": "wholeResource"},
+    )
     if x is None:
-        raise AssertionError('Cannot create this subscription')
-    elif hasattr(x, 'status_code'):
+        raise AssertionError("Cannot create this subscription")
+    elif hasattr(x, "status_code"):
         if x.status_code < 200 or x.status_code > 299:
-            raise AssertionError('Cannot create subscription [%d] : %s' %
-                                 (x.status_code, x.text))
+            raise AssertionError(
+                "Cannot create subscription [%d] : %s" % (x.status_code, x.text)
+            )
     return x
 
 
 def retrieve_resource(connection, resid):
     x = connection.retrieve(resid)
     if x is None:
-        raise AssertionError('Cannot retrieve this resource')
-    elif hasattr(x, 'status_code'):
+        raise AssertionError("Cannot retrieve this resource")
+    elif hasattr(x, "status_code"):
         if x.status_code < 200 or x.status_code > 299:
-            raise AssertionError('Cannot retrieve this resource [%d] : %s' %
-                                 (x.status_code, x.text))
+            raise AssertionError(
+                "Cannot retrieve this resource [%d] : %s" % (x.status_code, x.text)
+            )
     return x
 
 
 def update_resource(connection, resid, attr):
     x = connection.update(resid, attr)
     if x is None:
-        raise AssertionError('Cannot update this resource')
-    elif hasattr(x, 'status_code'):
+        raise AssertionError("Cannot update this resource")
+    elif hasattr(x, "status_code"):
         if x.status_code < 200 or x.status_code > 299:
-            raise AssertionError('Cannot update this resource [%d] : %s' %
-                                 (x.status_code, x.text))
+            raise AssertionError(
+                "Cannot update this resource [%d] : %s" % (x.status_code, x.text)
+            )
     return x
 
 
 def delete_resource(connection, resid):
     x = connection.delete(resid)
     if x is None:
-        raise AssertionError('Cannot delete this resource')
-    elif hasattr(x, 'status_code'):
+        raise AssertionError("Cannot delete this resource")
+    elif hasattr(x, "status_code"):
         if x.status_code < 200 or x.status_code > 299:
-            raise AssertionError('Cannot delete this resource [%d] : %s' %
-                                 (x.status_code, x.text))
+            raise AssertionError(
+                "Cannot delete this resource [%d] : %s" % (x.status_code, x.text)
+            )
     return x
 
 
index 94caad67bcdf70cf4f6f16be91e98f5a5ccf36d0..bb1f9bd6e7209e6912c72f9b7dfcb54b3cfbaf84 100644 (file)
@@ -2,8 +2,9 @@ import json
 import pyangbind.lib.pybindJSON as pbJ
 import sys
 import os
+
 # Bindings must present in ${WORKSPACE}
-workspace = os.environ['WORKSPACE'] + '/odl-lispflowmapping-yang-files'
+workspace = os.environ["WORKSPACE"] + "/odl-lispflowmapping-yang-files"
 
 """Helper Functions """
 
@@ -59,7 +60,7 @@ def copy_eid(objA, objB):
          objB: eid object of pyangbind generated class
     """
     for name in dir(objB):
-        if name[:4] == '_eid':
+        if name[:4] == "_eid":
             value = getattr(objB, name)
             try:
                 setattr(objA, name, value)
@@ -75,7 +76,7 @@ def copy_rloc(objA, objB):
          objB: rloc object of pyangbind generated class
     """
     for name in dir(objB):
-        if name[:5] == '_rloc':
+        if name[:5] == "_rloc":
             value = getattr(objB, name)
             try:
                 setattr(objA, name, value)
@@ -91,12 +92,12 @@ def clean_hops(obj):
     """
     new_obj = {}
     for key, value in obj.items():
-        if key == 'hop':
+        if key == "hop":
             for hop in value:
-                values = hop['hop-id'].split(' ')
-                hop['hop-id'] = values[0] + " " + values[1]
-                if values[2] != '':
-                    hop['lrs-bits'] = ' '.join(values[2:])[:-1]
+                values = hop["hop-id"].split(" ")
+                hop["hop-id"] = values[0] + " " + values[1]
+                if values[2] != "":
+                    hop["lrs-bits"] = " ".join(values[2:])[:-1]
                 new_obj[key] = value
         if isinstance(value, dict):
             new_obj[key] = clean_hops(value)
@@ -126,80 +127,85 @@ def Get_LispAddress_Object(eid_string, vni=None, laddr_obj=None):
     """
     if laddr_obj is None:
         sys.path.insert(0, workspace)
-        from LISPFlowMappingYANGBindings.odl_mappingservice_rpc.add_mapping.input import input
+        from LISPFlowMappingYANGBindings.odl_mappingservice_rpc.add_mapping.input import (
+            input,
+        )
+
         rpc_input = input()
         laddr_obj = rpc_input.mapping_record.eid
 
     if vni:
         laddr_obj.virtual_network_id = vni
 
-    eid_string = eid_string.split(':')
-    prefix, text = eid_string[0], ':'.join(eid_string[1:])
+    eid_string = eid_string.split(":")
+    prefix, text = eid_string[0], ":".join(eid_string[1:])
     if prefix:
-        if prefix == 'srcdst':
+        if prefix == "srcdst":
             # Example: srcdst:192.0.2.1/32|192.0.2.2/32
-            laddr_obj.address_type = 'laddr:source-dest-key-lcaf'
-            text = text.split('|')
+            laddr_obj.address_type = "laddr:source-dest-key-lcaf"
+            text = text.split("|")
             laddr_obj.source_dest_key.source = text[0]
             laddr_obj.source_dest_key.dest = text[1]
-        elif prefix == 'no':
+        elif prefix == "no":
             # Example: no:
-            laddr_obj.address_type = 'laddr:no-address-afi'
-        elif prefix == 'ipv4':
-            if '/' in text:
+            laddr_obj.address_type = "laddr:no-address-afi"
+        elif prefix == "ipv4":
+            if "/" in text:
                 # Case: ipv4-prefix
-                laddr_obj.address_type = 'laddr:ipv4-prefix-afi'
+                laddr_obj.address_type = "laddr:ipv4-prefix-afi"
                 laddr_obj.ipv4_prefix = text
             else:
                 # Case: ipv4
-                laddr_obj.address_type = 'laddr:ipv4-afi'
+                laddr_obj.address_type = "laddr:ipv4-afi"
                 laddr_obj.ipv4 = text
-        elif prefix == 'ipv6':
-            if '/' in text:
+        elif prefix == "ipv6":
+            if "/" in text:
                 # Case: ipv6-prefix
-                laddr_obj.address_type = 'laddr:ipv6-prefix-afi'
+                laddr_obj.address_type = "laddr:ipv6-prefix-afi"
                 laddr_obj.ipv6_prefix = text
             else:
-                laddr_obj.address_type = 'laddr:ipv6-afi'
+                laddr_obj.address_type = "laddr:ipv6-afi"
                 laddr_obj.ipv6 = text
-        elif prefix == 'mac':
+        elif prefix == "mac":
             # Example: mac:00:00:5E:00:53:00
-            laddr_obj.address_type = 'laddr:mac-afi'
+            laddr_obj.address_type = "laddr:mac-afi"
             laddr_obj.mac = text
-        elif prefix == 'dn':
+        elif prefix == "dn":
             # Example: dn:stringAsIs
-            laddr_obj.address_type = 'laddr:distinguished-name-afi'
+            laddr_obj.address_type = "laddr:distinguished-name-afi"
             laddr_obj.distinguished_name = text
-        elif prefix == 'as':
+        elif prefix == "as":
             # Example: as:AS64500
-            laddr_obj.address_type = 'laddr:as-number-afi'
+            laddr_obj.address_type = "laddr:as-number-afi"
             laddr_obj.as_number = text
-        elif prefix == 'list':
+        elif prefix == "list":
             # Example: list:{192.0.2.1,192.0.2.2,2001:db8::1}
-            laddr_obj.address_type = 'laddr:afi-list-lcaf'
-            list_elements = text[1:len(text) - 1].split(',')  # removed start and end braces
+            laddr_obj.address_type = "laddr:afi-list-lcaf"
+            list_elements = text[1 : len(text) - 1].split(
+                ","
+            )  # removed start and end braces
             laddr_obj.afi_list.address_list = list_elements
-        elif prefix == 'appdata':
+        elif prefix == "appdata":
             # Example: appdata:192.0.2.1!128!17!80-81!6667-7000
-            laddr_obj.address_type = 'laddr:application-data-lcaf'
-            text = text.split('!')
+            laddr_obj.address_type = "laddr:application-data-lcaf"
+            text = text.split("!")
             laddr_obj.application_data.address = text[0]
             laddr_obj.application_data.ip_tos = text[1]
             laddr_obj.application_data.protocol = text[2]
-            local_ports = text[3].split('-')
+            local_ports = text[3].split("-")
             laddr_obj.application_data.local_port_low = local_ports[0]
             laddr_obj.application_data.local_port_high = local_ports[1]
-            remote_ports = text[4].split('-')
+            remote_ports = text[4].split("-")
             laddr_obj.application_data.remote_port_low = remote_ports[0]
             laddr_obj.application_data.remote_port_high = remote_ports[1]
-        elif prefix == 'elp':
+        elif prefix == "elp":
             # TODO: BITS_TYPE_for_lps
             # Example: elp:{192.0.2.1->192.0.2.2|lps->192.0.2.3}
-            laddr_obj.address_type = 'laddr:explicit-locator-path-lcaf'
-            text = text[1:len(text) - 1]
-            text = text.split('->')  # all the hops
+            laddr_obj.address_type = "laddr:explicit-locator-path-lcaf"
+            text = text[1 : len(text) - 1]
+            text = text.split("->")  # all the hops
             for i in range(0, len(text)):
-                cur_hop = text[i].split('|')
+                cur_hop = text[i].split("|")
                 address = cur_hop[0]
                 lrs_bits = ""
                 hop_id = "Hop " + str(i + 1) + " " + lrs_bits
@@ -213,16 +219,16 @@ def Get_LispAddress_Object(eid_string, vni=None, laddr_obj=None):
                         lrs_bits += "strict "
                 laddr_obj.explicit_locator_path.hop.add(hop_id)
                 laddr_obj.explicit_locator_path.hop[hop_id].address = address
-        elif prefix == 'kv':
+        elif prefix == "kv":
             # Example: kv:192.0.2.1->192.0.2.2
-            laddr_obj.address_type = 'laddr:key-value-address-lcaf'
-            text = text.split('->')
+            laddr_obj.address_type = "laddr:key-value-address-lcaf"
+            text = text.split("->")
             laddr_obj.key_value_address.key = text[0]
             laddr_obj.key_value_address.value = text[1]
-        elif prefix == 'sp':
+        elif prefix == "sp":
             # Example: sp:42(3)
-            laddr_obj.address_type = 'laddr:service-path-lcaf'
-            text = text.split('(')
+            laddr_obj.address_type = "laddr:service-path-lcaf"
+            text = text.split("(")
             laddr_obj.service_path.service_path_id = text[0]
             laddr_obj.service_path.service_index = text[1][:-1]
 
@@ -236,8 +242,10 @@ def Get_LispAddress_JSON(eid_string, vni=None):
          eid_string: type of lisp address
          vni: virtual network id
     """
-    pbj_dump = pbJ.dumps(Get_LispAddress_Object(eid_string, vni), filter=True, mode="ietf")
-    out_dump = '{"eid":' + pbj_dump + '}'
+    pbj_dump = pbJ.dumps(
+        Get_LispAddress_Object(eid_string, vni), filter=True, mode="ietf"
+    )
+    out_dump = '{"eid":' + pbj_dump + "}"
     return Clean_JSON(out_dump)
 
 
@@ -248,7 +256,9 @@ def Get_LispAddress_Noeid_JSON(eid_string, vni=None):
          eid_string: type of lisp address
          vni: virtual network id
     """
-    out_dump = pbJ.dumps(Get_LispAddress_Object(eid_string, vni), filter=True, mode="ietf")
+    out_dump = pbJ.dumps(
+        Get_LispAddress_Object(eid_string, vni), filter=True, mode="ietf"
+    )
     return Clean_JSON(out_dump)
 
 
@@ -262,7 +272,7 @@ def Get_LispAddress_JSON_And_Wrap_input(eid_string, vni=None):
     return Wrap_input(Get_LispAddress_JSON(eid_string, vni))
 
 
-def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1"):
+def Get_LocatorRecord_Object(rloc, weights="1/1/255/0", flags=0o01, loc_id="ISP1"):
     """ Description: Returns locator record object from pyangbind generated classes
         Returns: locator record object
         Params:
@@ -272,12 +282,15 @@ def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1
          loc_id: id of locator record object
     """
     sys.path.insert(0, workspace)
-    from LISPFlowMappingYANGBindings.odl_mappingservice_rpc.add_mapping.input import input
+    from LISPFlowMappingYANGBindings.odl_mappingservice_rpc.add_mapping.input import (
+        input,
+    )
+
     rpc_input = input()
     lrecord_obj = rpc_input.mapping_record.LocatorRecord
     # TODO: What should be the locator-id
     lrecord_obj.add(loc_id)
-    lrecord_ele = weights.split('/')
+    lrecord_ele = weights.split("/")
     lrecord_obj[loc_id].priority = lrecord_ele[0]
     lrecord_obj[loc_id].weight = lrecord_ele[1]
     lrecord_obj[loc_id].multicastPriority = lrecord_ele[2]
@@ -290,7 +303,7 @@ def Get_LocatorRecord_Object(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1
     return lrecord_obj
 
 
-def Get_LocatorRecord_JSON(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1"):
+def Get_LocatorRecord_JSON(rloc, weights="1/1/255/0", flags=0o01, loc_id="ISP1"):
     """ Description: Returns locator record dictionary
         Returns: python dictionary
         Params:
@@ -299,14 +312,20 @@ def Get_LocatorRecord_JSON(rloc, weights='1/1/255/0', flags=0o01, loc_id="ISP1")
          flags: Three bit parameter in the sequence routed->rlocProbed->routed
          loc_id: id of locator record object
     """
-    pbj_dump = pbJ.dumps(Get_LocatorRecord_Object(rloc, weights, flags, loc_id), filter=True, mode="default")
+    pbj_dump = pbJ.dumps(
+        Get_LocatorRecord_Object(rloc, weights, flags, loc_id),
+        filter=True,
+        mode="default",
+    )
     pbj_dict = json.loads(pbj_dump)
-    pbj_dict[loc_id]['rloc'] = Get_LispAddress_Noeid_JSON(rloc)
-    out_dump = '{"LocatorRecord":' + str(pbj_dict) + '}'
+    pbj_dict[loc_id]["rloc"] = Get_LispAddress_Noeid_JSON(rloc)
+    out_dump = '{"LocatorRecord":' + str(pbj_dict) + "}"
     return Clean_JSON(out_dump)
 
 
-def Get_MappingRecord_Object(eid, locators, ttl=1440, authoritative=True, action='NoAction'):
+def Get_MappingRecord_Object(
+    eid, locators, ttl=1440, authoritative=True, action="NoAction"
+):
     """ Description: Returns mapping record object from pyangbind generated classes.
         Returns: mapping record object
         Params:
@@ -317,7 +336,10 @@ def Get_MappingRecord_Object(eid, locators, ttl=1440, authoritative=True, action
          action: action
     """
     sys.path.insert(0, workspace)
-    from LISPFlowMappingYANGBindings.odl_mappingservice_rpc.add_mapping.input import input
+    from LISPFlowMappingYANGBindings.odl_mappingservice_rpc.add_mapping.input import (
+        input,
+    )
+
     rpc_input = input()
     mrecord_obj = rpc_input.mapping_record
     mrecord_obj.recordTtl = ttl
@@ -347,7 +369,9 @@ def Get_MappingRecord_Object(eid, locators, ttl=1440, authoritative=True, action
     return mrecord_obj
 
 
-def Get_MappingRecord_JSON(eid, locators, ttl=1440, authoritative=True, action='NoAction'):
+def Get_MappingRecord_JSON(
+    eid, locators, ttl=1440, authoritative=True, action="NoAction"
+):
     """ Description: Returns mapping record dictionary
         Returns: python dictionary
         Params:
@@ -357,8 +381,12 @@ def Get_MappingRecord_JSON(eid, locators, ttl=1440, authoritative=True, action='
          authoritative: authoritative
          action: action
     """
-    pbj_dump = pbJ.dumps(Get_MappingRecord_Object(eid, locators, ttl, authoritative, action), filter=True, mode="ietf")
-    out_dump = '{"mapping-record":' + pbj_dump + '}'
+    pbj_dump = pbJ.dumps(
+        Get_MappingRecord_Object(eid, locators, ttl, authoritative, action),
+        filter=True,
+        mode="ietf",
+    )
+    out_dump = '{"mapping-record":' + pbj_dump + "}"
     return Clean_JSON(out_dump)
 
 
@@ -370,7 +398,10 @@ def Get_MappingAuthkey_Object(key_string="password", key_type=1):
          key_type: key type
     """
     sys.path.insert(0, workspace)
-    from LISPFlowMappingYANGBindings.odl_mappingservice_rpc.add_key.input import input as add_key_input
+    from LISPFlowMappingYANGBindings.odl_mappingservice_rpc.add_key.input import (
+        input as add_key_input,
+    )
+
     rpc_input = add_key_input()
     authkey_obj = rpc_input.mapping_authkey
     authkey_obj.key_string = key_string
@@ -385,6 +416,8 @@ def Get_MappingAuthkey_JSON(key_string="password", key_type=1):
          key_string: key string
          key_type: key type
     """
-    pbj_dump = pbJ.dumps(Get_MappingAuthkey_Object(key_string, key_type), filter=True, mode="default")
-    out_dump = '{"mapping-authkey":' + pbj_dump + '}'
+    pbj_dump = pbJ.dumps(
+        Get_MappingAuthkey_Object(key_string, key_type), filter=True, mode="default"
+    )
+    out_dump = '{"mapping-authkey":' + pbj_dump + "}"
     return Clean_JSON(out_dump)
index c366072e603231e7d38ed86f3e0df86aa596d1e9..d6ceb5c8318fa72962741253937e8eb356ddf8d3 100644 (file)
@@ -26,10 +26,15 @@ def _send_http_request_thread_impl(rqueue, prefix_id, url, data, http_timeout):
     :param http_timeout: http response timeout
     :type http_timeout: int
     """
-    logger.info('rpc invoked with details: {}'.format(data))
+    logger.info("rpc invoked with details: {}".format(data))
     try:
-        resp = requests.post(url=url, headers={'Content-Type': 'application/xml'},
-                             data=data, auth=('admin', 'admin'), timeout=http_timeout)
+        resp = requests.post(
+            url=url,
+            headers={"Content-Type": "application/xml"},
+            data=data,
+            auth=("admin", "admin"),
+            timeout=http_timeout,
+        )
     except Exception as exc:
         resp = exc
         logger.debug(exc)
@@ -50,26 +55,35 @@ def _initiate_rpcs(host_list, index_list, url_templ, data_templ, subst_dict):
     :param subst_dict: dictionary with key value pairs to be used with template
     :type subst_dict: dict
     """
-    resqueue = _globals.pop('result_queue', queue.Queue())
-    lthreads = _globals.pop('threads', [])
+    resqueue = _globals.pop("result_queue", queue.Queue())
+    lthreads = _globals.pop("threads", [])
     for i, host in enumerate(host_list):
-        url = url_templ.substitute({'HOST': host})
-        timeout = int(subst_dict['DURATION']) + 3 * 125 + 10
-        prefix_id = subst_dict['ID_PREFIX'] + str(index_list[i])
-        subst_dict['ID'] = prefix_id
+        url = url_templ.substitute({"HOST": host})
+        timeout = int(subst_dict["DURATION"]) + 3 * 125 + 10
+        prefix_id = subst_dict["ID_PREFIX"] + str(index_list[i])
+        subst_dict["ID"] = prefix_id
         data = data_templ.substitute(subst_dict)
-        logger.info('url: {}, data: {}, timeout: {}'.format(url, data, timeout))
-        t = threading.Thread(target=_send_http_request_thread_impl,
-                             args=(resqueue, prefix_id, url, data, timeout))
+        logger.info("url: {}, data: {}, timeout: {}".format(url, data, timeout))
+        t = threading.Thread(
+            target=_send_http_request_thread_impl,
+            args=(resqueue, prefix_id, url, data, timeout),
+        )
         t.daemon = True
         t.start()
         lthreads.append(t)
 
-    _globals.update({'threads': lthreads, 'result_queue': resqueue})
+    _globals.update({"threads": lthreads, "result_queue": resqueue})
 
 
-def start_write_transactions_on_nodes(host_list, index_list, id_prefix, duration, rate, chained_flag=False,
-                                      reset_globals=True):
+def start_write_transactions_on_nodes(
+    host_list,
+    index_list,
+    id_prefix,
+    duration,
+    rate,
+    chained_flag=False,
+    reset_globals=True,
+):
     """Invoke write-transactions rpc on given nodes.
 
     :param host_list: IP addresses of odl nodes
@@ -92,21 +106,38 @@ def start_write_transactions_on_nodes(host_list, index_list, id_prefix, duration
 
     logger.info(
         "Input parameters: host_list:{}, index_list:{}, id_prefix:{}, duration:{}, rate:{}, chained_flag:{}".format(
-            host_list, index_list, id_prefix, duration, rate, chained_flag))
-    datat = string.Template('''<input xmlns="tag:opendaylight.org,2017:controller:yang:lowlevel:control">
+            host_list, index_list, id_prefix, duration, rate, chained_flag
+        )
+    )
+    datat = string.Template(
+        """<input xmlns="tag:opendaylight.org,2017:controller:yang:lowlevel:control">
   <id>$ID</id>
   <seconds>$DURATION</seconds>
   <transactions-per-second>$RATE</transactions-per-second>
   <chained-transactions>$CHAINED_FLAG</chained-transactions>
-</input>''')
-    subst_dict = {'ID_PREFIX': id_prefix, 'DURATION': duration,
-                  'RATE': rate, 'CHAINED_FLAG': 'true' if chained_flag else 'false'}
-    urlt = string.Template('''http://$HOST:8181/restconf/operations/odl-mdsal-lowlevel-control:write-transactions''')
+</input>"""
+    )
+    subst_dict = {
+        "ID_PREFIX": id_prefix,
+        "DURATION": duration,
+        "RATE": rate,
+        "CHAINED_FLAG": "true" if chained_flag else "false",
+    }
+    urlt = string.Template(
+        """http://$HOST:8181/restconf/operations/odl-mdsal-lowlevel-control:write-transactions"""
+    )
     _initiate_rpcs(host_list, index_list, urlt, datat, subst_dict)
 
 
-def start_produce_transactions_on_nodes(host_list, index_list, id_prefix,
-                                        duration, rate, isolated_transactions_flag=False, reset_globals=True):
+def start_produce_transactions_on_nodes(
+    host_list,
+    index_list,
+    id_prefix,
+    duration,
+    rate,
+    isolated_transactions_flag=False,
+    reset_globals=True,
+):
     """Invoke produce-transactions rpcs on given nodes.
 
     :param host_list: IP addresses of odl nodes
@@ -128,18 +159,27 @@ def start_produce_transactions_on_nodes(host_list, index_list, id_prefix,
         _globals.clear()
 
     msg = "host_list:{}, index_list:{} ,id_prefix:{}, duration:{}, rate:{}, isolated_transactions:{}".format(
-        host_list, index_list, id_prefix, duration, rate, isolated_transactions_flag)
+        host_list, index_list, id_prefix, duration, rate, isolated_transactions_flag
+    )
     msg = "Input parameters: " + msg
     logger.info(msg)
-    datat = string.Template('''<input xmlns="tag:opendaylight.org,2017:controller:yang:lowlevel:control">
+    datat = string.Template(
+        """<input xmlns="tag:opendaylight.org,2017:controller:yang:lowlevel:control">
   <id>$ID</id>
   <seconds>$DURATION</seconds>
   <transactions-per-second>$RATE</transactions-per-second>
   <isolated-transactions>$ISOLATED_TRANSACTIONS</isolated-transactions>
-</input>''')
-    subst_dict = {'ID_PREFIX': id_prefix, 'DURATION': duration, 'RATE': rate,
-                  'ISOLATED_TRANSACTIONS': 'true' if isolated_transactions_flag else 'false'}
-    urlt = string.Template('''http://$HOST:8181/restconf/operations/odl-mdsal-lowlevel-control:produce-transactions''')
+</input>"""
+    )
+    subst_dict = {
+        "ID_PREFIX": id_prefix,
+        "DURATION": duration,
+        "RATE": rate,
+        "ISOLATED_TRANSACTIONS": "true" if isolated_transactions_flag else "false",
+    }
+    urlt = string.Template(
+        """http://$HOST:8181/restconf/operations/odl-mdsal-lowlevel-control:produce-transactions"""
+    )
     _initiate_rpcs(host_list, index_list, urlt, datat, subst_dict)
 
 
@@ -149,8 +189,8 @@ def wait_for_transactions():
     :return: list of triples; triple consists of response time, prefix identifier and response object
     :rtype: list[(str, str, requests.Response)]
     """
-    lthreads = _globals.pop('threads')
-    resqueue = _globals.pop('result_queue')
+    lthreads = _globals.pop("threads")
+    resqueue = _globals.pop("result_queue")
 
     for t in lthreads:
         t.join()
@@ -172,7 +212,7 @@ def get_next_transactions_response():
     :return: None or a triple consisting of response time, prefix identifier and response object
     :rtype: (str, str, requests.Response)
     """
-    resqueue = _globals.get('result_queue')
+    resqueue = _globals.get("result_queue")
 
     if not resqueue.empty():
         rsp = resqueue.get()
index 9019ab1adc7fda782d0ea6cfc176dcd8d4c46040..1dc01a91cef4566abac8efc78392d6fec890f8ff 100644 (file)
@@ -24,10 +24,16 @@ __created__ = "19 March 2014"
 
 if len(sys.argv) < 5:
     print("Please povide correct inputs. Exiting!!!")
-    print("{0}  <switch_count> <host_per_switch> <base_mac: Eg:00:4b:00:00:00:00 > \
-          <base_ip: Eg:75.75.0.0>".format(sys.argv[0].split('/')[-1]))
-    print("Dpid of switches is derived from base mac and \
-           host ip address is derived from base ip")
+    print(
+        "{0}  <switch_count> <host_per_switch> <base_mac: Eg:00:4b:00:00:00:00 > \
+          <base_ip: Eg:75.75.0.0>".format(
+            sys.argv[0].split("/")[-1]
+        )
+    )
+    print(
+        "Dpid of switches is derived from base mac and \
+           host ip address is derived from base ip"
+    )
     sys.exit(1)
 
 switch_count = int(sys.argv[1])
@@ -35,11 +41,11 @@ host_per_switch = int(sys.argv[2])
 base_mac = sys.argv[3]
 base_host_ip = sys.argv[4]
 
-base_host_mac = base_mac.split(':')
-base_host_mac[0] = '10'
-base_host_mac = (':').join(base_host_mac)
-dpid_mac = base_mac.split(':')
-dpid_mac = ('').join(dpid_mac)
+base_host_mac = base_mac.split(":")
+base_host_mac[0] = "10"
+base_host_mac = (":").join(base_host_mac)
+dpid_mac = base_mac.split(":")
+dpid_mac = ("").join(dpid_mac)
 
 
 def new_mac(mac, offset):
@@ -53,7 +59,7 @@ def new_mac(mac, offset):
     """
     mac = netaddr.EUI(mac).value
     mac = mac + offset
-    mac = str(netaddr.EUI(mac)).replace('-', ':')
+    mac = str(netaddr.EUI(mac)).replace("-", ":")
     return mac
 
 
@@ -79,9 +85,9 @@ def new_dpid(mac, offset):
     """
     mac = netaddr.EUI(mac).value
     mac = mac + offset
-    mac = str(netaddr.EUI(mac)).replace('-', ':')
-    dpid_mac = mac.split(':')
-    dpid_mac = ('').join(dpid_mac)
+    mac = str(netaddr.EUI(mac)).replace("-", ":")
+    dpid_mac = mac.split(":")
+    dpid_mac = ("").join(dpid_mac)
     DPID = "0000" + dpid_mac
     return DPID
 
@@ -91,34 +97,47 @@ if __name__ == "__main__":
     HMAC = new_mac(base_host_mac, 1)
     HIP = new_ip(base_host_ip, 1)
     prefix = 8
-    configfile = open("switch.py", 'w')
-    configfile.write('\"\"\"@author: sandeep gangadharan\n             \
+    configfile = open("switch.py", "w")
+    configfile.write(
+        '"""@author: sandeep gangadharan\n             \
     This topology has {0:d} switches {1:d} hosts                       \
     \nThis topology is made out of {2:s} script                        \
     \nThis is a fully mesh topology. Not available in mininet by default.\
-    \nHence generating this python file dynamically\"\"\"     \
+    \nHence generating this python file dynamically"""     \
     \nfrom mininet.topo import Topo\nclass DemoTopo(Topo):          \
-    \n'.format(switch_count, switch_count * host_per_switch, sys.argv[0]))
-    print("This topology has %d switches %d hosts"
-          % (switch_count, switch_count * host_per_switch))
+    \n'.format(
+            switch_count, switch_count * host_per_switch, sys.argv[0]
+        )
+    )
+    print(
+        "This topology has %d switches %d hosts"
+        % (switch_count, switch_count * host_per_switch)
+    )
     configfile.write("    def __init__(self):\n ")
     configfile.write("        #  Initialize topology\n")
     configfile.write("        Topo.__init__(self)\n")
     configfile.write("        #  Add Switches\n")
     # Add switches
     for i in range(1, switch_count + 1):
-        configfile.write("        s{0:d} = self.addSwitch(\'s{1:d}\',dpid=\'{2:s}\')\
-            \n".format(i, i, DPID))
+        configfile.write(
+            "        s{0:d} = self.addSwitch('s{1:d}',dpid='{2:s}')\
+            \n".format(
+                i, i, DPID
+            )
+        )
         DPID = new_dpid(base_mac, i + 1)
 
     # Add hosts
     configfile.write("        #  Add Hosts\n")
     for i in range(1, switch_count + 1):
         for j in range(1, host_per_switch + 1):
-            configfile.write("        self.addLink(s{0:d}, \
+            configfile.write(
+                "        self.addLink(s{0:d}, \
                 self.addHost('s{1:d}h{2:d}',\
-                ip='{3:s}',mac='{4:s}',prefixLen='{5:d}'))\n"
-                             .format(i, i, j, HIP, HMAC, prefix))
+                ip='{3:s}',mac='{4:s}',prefixLen='{5:d}'))\n".format(
+                    i, i, j, HIP, HMAC, prefix
+                )
+            )
             HMAC = new_mac(HMAC, 1)
             HIP = new_ip(HIP, 1)
 
@@ -130,7 +149,11 @@ if __name__ == "__main__":
             continue
         for j in range(1, i + 1):
             if i != j:
-                configfile.write("        self.addLink(s{0:d}, s{1:d})\
-                \n".format(i, j))
+                configfile.write(
+                    "        self.addLink(s{0:d}, s{1:d})\
+                \n".format(
+                        i, j
+                    )
+                )
     configfile.write("topos = { 'demotopo': ( lambda: DemoTopo() ) }")
     configfile.close()
index f72bb87758599a11d97cc7d44831bc1ebbad25ed..4e12a1742a20fb8edccaaf5b4f785c45cbf7c4a0 100644 (file)
@@ -20,12 +20,12 @@ class PathpolicyTopo(Topo):
         Topo.__init__(self)
 
         # Add hosts and switches
-        leftHost = self.addHost('h1')
-        rightHost = self.addHost('h2')
-        leftSwitch = self.addSwitch('s1')
-        middleSwitch = self.addSwitch('s2')
-        middleSwitch2 = self.addSwitch('s4')
-        rightSwitch = self.addSwitch('s3')
+        leftHost = self.addHost("h1")
+        rightHost = self.addHost("h2")
+        leftSwitch = self.addSwitch("s1")
+        middleSwitch = self.addSwitch("s2")
+        middleSwitch2 = self.addSwitch("s4")
+        rightSwitch = self.addSwitch("s3")
 
         # Add links
         self.addLink(leftHost, leftSwitch)
@@ -36,4 +36,4 @@ class PathpolicyTopo(Topo):
         self.addLink(rightSwitch, rightHost)
 
 
-topos = {'pathpolicytopo': (lambda: PathpolicyTopo())}
+topos = {"pathpolicytopo": (lambda: PathpolicyTopo())}
index be3caa877d9668452db568379f2c6ee0fb35f4e5..cd7fa6f9c40d678c5bc9288824af5cd885dda0d1 100644 (file)
@@ -15,13 +15,13 @@ class VLANHost(Host):
 
         intf = self.defaultIntf()
         # remove IP from default, "physical" interface
-        self.cmd('ifconfig %s inet 0' % intf)
+        self.cmd("ifconfig %s inet 0" % intf)
         # create VLAN interface
-        self.cmd('vconfig add %s %d' % (intf, vlan))
+        self.cmd("vconfig add %s %d" % (intf, vlan))
         # assign the host's IP to the VLAN interface
-        self.cmd('ifconfig %s.%d inet %s' % (intf, vlan, params['ip']))
+        self.cmd("ifconfig %s.%d inet %s" % (intf, vlan, params["ip"]))
         # update the intf name and host's intf map
-        new_name = '%s.%d' % (intf, vlan)
+        new_name = "%s.%d" % (intf, vlan)
         # update the (Mininet) interface to refer to VLAN interface name
         intf.name = new_name
         # add VLAN interface to host's name to intf map
@@ -40,16 +40,16 @@ class VlanTopo(Topo):
         Topo.__init__(self)
 
         # Add hosts and switches
-        host1 = self.addHost('h1', cls=VLANHost, vlan=200)
-        host2 = self.addHost('h2', cls=VLANHost, vlan=300)
-        host3 = self.addHost('h3', cls=VLANHost, vlan=200)
-        host4 = self.addHost('h4', cls=VLANHost, vlan=300)
-        host5 = self.addHost('h5', cls=VLANHost, vlan=200)
-        host6 = self.addHost('h6', cls=VLANHost, vlan=300)
+        host1 = self.addHost("h1", cls=VLANHost, vlan=200)
+        host2 = self.addHost("h2", cls=VLANHost, vlan=300)
+        host3 = self.addHost("h3", cls=VLANHost, vlan=200)
+        host4 = self.addHost("h4", cls=VLANHost, vlan=300)
+        host5 = self.addHost("h5", cls=VLANHost, vlan=200)
+        host6 = self.addHost("h6", cls=VLANHost, vlan=300)
 
-        s1 = self.addSwitch('s1')
-        s2 = self.addSwitch('s2')
-        s3 = self.addSwitch('s3')
+        s1 = self.addSwitch("s1")
+        s2 = self.addSwitch("s2")
+        s3 = self.addSwitch("s3")
 
         self.addLink(s1, s2)
         self.addLink(s2, host1)
@@ -61,4 +61,4 @@ class VlanTopo(Topo):
         self.addLink(s3, host6)
 
 
-topos = {'vlantopo': (lambda: VlanTopo())}
+topos = {"vlantopo": (lambda: VlanTopo())}
index 97629c0cbba74c7c78c80cc2f21711fe4bd99bee..b671cff960a18af8c08812f0464d66abcfe06888 100755 (executable)
@@ -7,28 +7,28 @@ __email__ = "jose.luis.franco.arza@ericsson.com"
 
 
 def get_network_from_cidr(cidr):
-    '''
+    """
     Returns the subnetwork part from a given subnet in CIDR format,
     like 192.168.1.0/24. Returning 192.168.1.0.
-    '''
+    """
     o = ipaddr.IPv4Network(cidr)
     return str(o.network)
 
 
 def get_mask_from_cidr(cidr):
-    '''
+    """
     Returns a subnet mask from a given subnet in CIDR format,
     like 192.168.1.0/24. Returning 255.255.255.0.
-    '''
+    """
     o = ipaddr.IPv4Network(cidr)
     return str(o.netmask)
 
 
 def get_ip_address_first_octets(ip, n_octets):
-    '''
+    """
     Given an IP address, this function returns the number
     of octets determined as argument. If 4 are specified, then the output
     is the whole IP
-    '''
+    """
 
-    return ".".join(ip.split(".")[:int(n_octets)])
+    return ".".join(ip.split(".")[: int(n_octets)])
index e328d3aff86d15b7f233712452a977a440715f59..612d38ae4f9651f246bcaa50de88a914b76f36ea 100644 (file)
@@ -1,11 +1,11 @@
-'''
+"""
 The purpose of this library is the ability to spread configured flows
 over the specified tables and switches.
 
 The idea how to configure and checks inventory operational data is taken from
 ../../../../tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py
 ../../../../tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py
-'''
+"""
 import random
 import threading
 import netaddr
@@ -30,49 +30,40 @@ class Counter(object):
         return val
 
 
-_spreads = ['gauss', 'linear', 'first']    # possible defined spreads at the moment
+_spreads = ["gauss", "linear", "first"]  # possible defined spreads at the moment
 _default_flow_template_json = {  # templease used for config datastore
-    u'flow': [
+    u"flow": [
         {
-            u'hard-timeout': 65000,
-            u'idle-timeout': 65000,
-            u'cookie_mask': 4294967295,
-            u'flow-name': u'FLOW-NAME-TEMPLATE',
-            u'priority': 2,
-            u'strict': False,
-            u'cookie': 0,
-            u'table_id': 0,
-            u'installHw': False,
-            u'id': u'FLOW-ID-TEMPLATE',
-            u'match': {
-                u'ipv4-destination': u'0.0.0.0/32',
-                u'ethernet-match': {
-                    u'ethernet-type': {
-                        u'type': 2048
-                    }
-                }
+            u"hard-timeout": 65000,
+            u"idle-timeout": 65000,
+            u"cookie_mask": 4294967295,
+            u"flow-name": u"FLOW-NAME-TEMPLATE",
+            u"priority": 2,
+            u"strict": False,
+            u"cookie": 0,
+            u"table_id": 0,
+            u"installHw": False,
+            u"id": u"FLOW-ID-TEMPLATE",
+            u"match": {
+                u"ipv4-destination": u"0.0.0.0/32",
+                u"ethernet-match": {u"ethernet-type": {u"type": 2048}},
             },
-            u'instructions': {
-                u'instruction': [
+            u"instructions": {
+                u"instruction": [
                     {
-                        u'order': 0,
-                        u'apply-actions': {
-                            u'action': [
-                                {
-                                    u'drop-action': {},
-                                    u'order': 0
-                                }
-                            ]
-                        }
+                        u"order": 0,
+                        u"apply-actions": {
+                            u"action": [{u"drop-action": {}, u"order": 0}]
+                        },
                     }
                 ]
-            }
+            },
         }
     ]
 }
 
 
-_node_tmpl = "/opendaylight-inventory:nodes/opendaylight-inventory:node[opendaylight-inventory:id=\"openflow:{0}\"]"
+_node_tmpl = '/opendaylight-inventory:nodes/opendaylight-inventory:node[opendaylight-inventory:id="openflow:{0}"]'
 
 
 _default_operations_item_json = {  # template used for sal operations
@@ -86,28 +77,21 @@ _default_operations_item_json = {  # template used for sal operations
                 "hard-timeout": 65000,
                 "idle-timeout": 65000,
                 "instructions": {
-                    "instruction": [{
-                        "apply-actions": {
-                            "action": [
-                                {
-                                    "drop-action": {},
-                                    "order": 0
-                                }
-                            ]
-                        },
-                        "order": 0
-                    }]
+                    "instruction": [
+                        {
+                            "apply-actions": {
+                                "action": [{"drop-action": {}, "order": 0}]
+                            },
+                            "order": 0,
+                        }
+                    ]
                 },
                 "match": {
                     "ipv4-destination": "0.0.0.0/32",
-                    "ethernet-match": {
-                        "ethernet-type": {
-                            "type": 2048
-                        }
-                    },
+                    "ethernet-match": {"ethernet-type": {"type": 2048}},
                 },
                 "priority": 2,
-                "table_id": 0
+                "table_id": 0,
             }
         ]
     }
@@ -126,41 +110,45 @@ def _get_notes(fldet=[]):
     notes = {}
     for (sw, tab, flow) in fldet:
         if sw not in notes:
-            notes[sw] = {'total': 0}
+            notes[sw] = {"total": 0}
         if tab not in notes[sw]:
             notes[sw][tab] = 0
         notes[sw][tab] += 1
-        notes[sw]['total'] += 1
+        notes[sw]["total"] += 1
     return notes
 
 
 def _randomize(spread, maxn):
     """Returns a randomized switch or table id"""
     if spread not in _spreads:
-        raise Exception('Spread method {} not available'.format(spread))
+        raise Exception("Spread method {} not available".format(spread))
     while True:
-        if spread == 'gauss':
+        if spread == "gauss":
             ga = abs(random.gauss(0, 1))
             rv = int(ga * float(maxn) / 3)
             if rv < maxn:
                 return rv
-        elif spread == 'linear':
+        elif spread == "linear":
             rv = int(random.random() * float(maxn))
             if rv < maxn:
                 return rv
             else:
-                raise ValueError('rv >= maxn')
-        elif spread == 'first':
+                raise ValueError("rv >= maxn")
+        elif spread == "first":
             return 0
 
 
-def generate_new_flow_details(flows=10, switches=1, swspread='gauss', tables=250, tabspread='gauss'):
+def generate_new_flow_details(
+    flows=10, switches=1, swspread="gauss", tables=250, tabspread="gauss"
+):
     """Generate a list of tupples (switch_id, table_id, flow_id) which are generated
     according to the spread rules between swithces and tables.
     It also returns a dictionary with statsistics."""
     swflows = [_randomize(swspread, switches) for f in range(int(flows))]
     # we have to increse the switch index because mininet start indexing switches from 1 (not 0)
-    fltables = [(s + 1, _randomize(tabspread, tables), idx) for idx, s in enumerate(swflows)]
+    fltables = [
+        (s + 1, _randomize(tabspread, tables), idx) for idx, s in enumerate(swflows)
+    ]
     notes = _get_notes(fltables)
     return fltables, notes
 
@@ -182,20 +170,25 @@ def _prepare_add(cntl, method, flows, template=None):
     """
     fl1 = flows[0]
     sw, tab, fl, ip = fl1
-    url = 'http://' + cntl + ':' + '8181'
-    url += '/restconf/config/opendaylight-inventory:nodes/node/openflow:' + str(sw)
-    url += '/table/' + str(tab) + '/flow/' + str(fl)
-    flow = copy.deepcopy(template['flow'][0])
-    flow['cookie'] = fl
-    flow['flow-name'] = 'TestFlow-%d' % fl
-    flow['id'] = str(fl)
-    flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ip))
-    flow['table_id'] = tab
+    url = "http://" + cntl + ":" + "8181"
+    url += "/restconf/config/opendaylight-inventory:nodes/node/openflow:" + str(sw)
+    url += "/table/" + str(tab) + "/flow/" + str(fl)
+    flow = copy.deepcopy(template["flow"][0])
+    flow["cookie"] = fl
+    flow["flow-name"] = "TestFlow-%d" % fl
+    flow["id"] = str(fl)
+    flow["match"]["ipv4-destination"] = "%s/32" % str(netaddr.IPAddress(ip))
+    flow["table_id"] = tab
     fmod = dict(template)
-    fmod['flow'] = flow
+    fmod["flow"] = flow
     req_data = json.dumps(fmod)
-    req = requests.Request('PUT', url, headers={'Content-Type': 'application/json'}, data=req_data,
-                           auth=('admin', 'admin'))
+    req = requests.Request(
+        "PUT",
+        url,
+        headers={"Content-Type": "application/json"},
+        data=req_data,
+        auth=("admin", "admin"),
+    )
     return req
 
 
@@ -216,22 +209,32 @@ def _prepare_table_add(cntl, method, flows, template=None):
     """
     fl1 = flows[0]
     sw, tab, fl, ip = fl1
-    url = 'http://' + cntl + ':' + '8181'
-    url += '/restconf/config/opendaylight-inventory:nodes/node/openflow:' + str(sw) + '/table/' + str(tab)
+    url = "http://" + cntl + ":" + "8181"
+    url += (
+        "/restconf/config/opendaylight-inventory:nodes/node/openflow:"
+        + str(sw)
+        + "/table/"
+        + str(tab)
+    )
     fdets = []
     for sw, tab, fl, ip in flows:
-        flow = copy.deepcopy(template['flow'][0])
-        flow['cookie'] = fl
-        flow['flow-name'] = 'TestFlow-%d' % fl
-        flow['id'] = str(fl)
-        flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ip))
-        flow['table_id'] = tab
+        flow = copy.deepcopy(template["flow"][0])
+        flow["cookie"] = fl
+        flow["flow-name"] = "TestFlow-%d" % fl
+        flow["id"] = str(fl)
+        flow["match"]["ipv4-destination"] = "%s/32" % str(netaddr.IPAddress(ip))
+        flow["table_id"] = tab
         fdets.append(flow)
     fmod = copy.deepcopy(template)
-    fmod['flow'] = fdets
+    fmod["flow"] = fdets
     req_data = json.dumps(fmod)
-    req = requests.Request('POST', url, headers={'Content-Type': 'application/json'}, data=req_data,
-                           auth=('admin', 'admin'))
+    req = requests.Request(
+        "POST",
+        url,
+        headers={"Content-Type": "application/json"},
+        data=req_data,
+        auth=("admin", "admin"),
+    )
     return req
 
 
@@ -252,10 +255,15 @@ def _prepare_delete(cntl, method, flows, template=None):
     """
     fl1 = flows[0]
     sw, tab, fl, ip = fl1
-    url = 'http://' + cntl + ':' + '8181'
-    url += '/restconf/config/opendaylight-inventory:nodes/node/openflow:' + str(sw)
-    url += '/table/' + str(tab) + '/flow/' + str(fl)
-    req = requests.Request('DELETE', url, headers={'Content-Type': 'application/json'}, auth=('admin', 'admin'))
+    url = "http://" + cntl + ":" + "8181"
+    url += "/restconf/config/opendaylight-inventory:nodes/node/openflow:" + str(sw)
+    url += "/table/" + str(tab) + "/flow/" + str(fl)
+    req = requests.Request(
+        "DELETE",
+        url,
+        headers={"Content-Type": "application/json"},
+        auth=("admin", "admin"),
+    )
     return req
 
 
@@ -276,21 +284,26 @@ def _prepare_rpc_item(cntl, method, flows, template=None):
     """
     f1 = flows[0]
     sw, tab, fl, ip = f1
-    url = 'http://' + cntl + ':' + '8181/restconf/operations/sal-bulk-flow:' + method
+    url = "http://" + cntl + ":" + "8181/restconf/operations/sal-bulk-flow:" + method
     fdets = []
     for sw, tab, fl, ip in flows:
-        flow = copy.deepcopy(template['input']['bulk-flow-item'][0])
-        flow['node'] = _node_tmpl.format(sw)
-        flow['cookie'] = fl
-        flow['flow-name'] = 'TestFlow-%d' % fl
-        flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ip))
-        flow['table_id'] = tab
+        flow = copy.deepcopy(template["input"]["bulk-flow-item"][0])
+        flow["node"] = _node_tmpl.format(sw)
+        flow["cookie"] = fl
+        flow["flow-name"] = "TestFlow-%d" % fl
+        flow["match"]["ipv4-destination"] = "%s/32" % str(netaddr.IPAddress(ip))
+        flow["table_id"] = tab
         fdets.append(flow)
     fmod = copy.deepcopy(template)
-    fmod['input']['bulk-flow-item'] = fdets
+    fmod["input"]["bulk-flow-item"] = fdets
     req_data = json.dumps(fmod)
-    req = requests.Request('POST', url, headers={'Content-Type': 'application/json'}, data=req_data,
-                           auth=('admin', 'admin'))
+    req = requests.Request(
+        "POST",
+        url,
+        headers={"Content-Type": "application/json"},
+        data=req_data,
+        auth=("admin", "admin"),
+    )
     return req
 
 
@@ -313,28 +326,42 @@ def _prepare_ds_item(cntl, method, flows, template=None):
     """
     f1 = flows[0]
     sw, tab, fl, ip = f1
-    url = 'http://' + cntl + ':' + '8181/restconf/operations/sal-bulk-flow:' + method
+    url = "http://" + cntl + ":" + "8181/restconf/operations/sal-bulk-flow:" + method
     fdets = []
     for sw, tab, fl, ip in flows:
-        flow = copy.deepcopy(template['input']['bulk-flow-item'][0])
-        flow['node'] = _node_tmpl.format(sw)
-        flow['cookie'] = fl
-        flow['flow-name'] = 'TestFlow-%d' % fl
-        flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ip))
-        flow['table_id'] = tab
-        flow['flow-id'] = fl
+        flow = copy.deepcopy(template["input"]["bulk-flow-item"][0])
+        flow["node"] = _node_tmpl.format(sw)
+        flow["cookie"] = fl
+        flow["flow-name"] = "TestFlow-%d" % fl
+        flow["match"]["ipv4-destination"] = "%s/32" % str(netaddr.IPAddress(ip))
+        flow["table_id"] = tab
+        flow["flow-id"] = fl
         fdets.append(flow)
     fmod = copy.deepcopy(template)
-    del fmod['input']['bulk-flow-item']
-    fmod['input']['bulk-flow-ds-item'] = fdets
+    del fmod["input"]["bulk-flow-item"]
+    fmod["input"]["bulk-flow-ds-item"] = fdets
     req_data = json.dumps(fmod)
-    req = requests.Request('POST', url, headers={'Content-Type': 'application/json'}, data=req_data,
-                           auth=('admin', 'admin'))
+    req = requests.Request(
+        "POST",
+        url,
+        headers={"Content-Type": "application/json"},
+        data=req_data,
+        auth=("admin", "admin"),
+    )
     return req
 
 
-def _wt_request_sender(thread_id, preparefnc, inqueue=None, exitevent=None, controllers=[], restport='',
-                       template=None, outqueue=None, method=None):
+def _wt_request_sender(
+    thread_id,
+    preparefnc,
+    inqueue=None,
+    exitevent=None,
+    controllers=[],
+    restport="",
+    template=None,
+    outqueue=None,
+    method=None,
+):
     """The funcion sends http requests.
 
     Runs in the working thread. It reads out flow details from the queue and sends apropriate http requests
@@ -390,8 +417,15 @@ def _wt_request_sender(thread_id, preparefnc, inqueue=None, exitevent=None, cont
     outqueue.put(res)
 
 
-def _task_executor(method='', flow_template=None, flow_details=[], controllers=['127.0.0.1'],
-                   restport='8181', nrthreads=1, fpr=1):
+def _task_executor(
+    method="",
+    flow_template=None,
+    flow_details=[],
+    controllers=["127.0.0.1"],
+    restport="8181",
+    nrthreads=1,
+    fpr=1,
+):
     """The main function which drives sending of http requests.
 
     Creates 2 queues and requested number of 'working threads'.  One queue is filled with flow details and working
@@ -417,25 +451,27 @@ def _task_executor(method='', flow_template=None, flow_details=[], controllers=[
         :returns dict: dictionary of http response counts like {'http_status_code1: 'count1', etc.}
     """
     # TODO: multi controllers support
-    ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')))
+    ip_addr = Counter(int(netaddr.IPAddress("10.0.0.1")))
 
     # choose message prepare function
-    if method == 'PUT':
+    if method == "PUT":
         preparefnc = _prepare_add
         # put can contain only 1 flow, lets overwrite any value of flows per request
         fpr = 1
-    elif method == 'POST':
+    elif method == "POST":
         preparefnc = _prepare_table_add
-    elif method == 'DELETE':
+    elif method == "DELETE":
         preparefnc = _prepare_delete
         # delete flow can contain only 1 flow, lets overwrite any value of flows per request
         fpr = 1
-    elif method in ['add-flows-ds', 'remove-flows-ds']:
+    elif method in ["add-flows-ds", "remove-flows-ds"]:
         preparefnc = _prepare_ds_item
-    elif method in ['add-flows-rpc', 'remove-flows-rpc']:
+    elif method in ["add-flows-rpc", "remove-flows-rpc"]:
         preparefnc = _prepare_rpc_item
     else:
-        raise NotImplementedError('Method {0} does not have it\'s prepeare function defined'.format(method))
+        raise NotImplementedError(
+            "Method {0} does not have it's prepeare function defined".format(method)
+        )
 
     # lets enlarge the tupple of flow details with IP, to be used with the template
     flows = [(sw, tab, flo, ip_addr.increment()) for sw, tab, flo in flow_details]
@@ -455,8 +491,8 @@ def _task_executor(method='', flow_template=None, flow_details=[], controllers=[
     sendqueue = queue.Queue()
     for flowgroup, flow_list in flowgroups.items():
         while len(flow_list) > 0:
-            sendqueue.put(flow_list[:int(fpr)])
-            flow_list = flow_list[int(fpr):]
+            sendqueue.put(flow_list[: int(fpr)])
+            flow_list = flow_list[int(fpr) :]
 
     # result_gueue
     resultqueue = queue.Queue()
@@ -466,10 +502,19 @@ def _task_executor(method='', flow_template=None, flow_details=[], controllers=[
     # lets start threads whic will read flow details fro queues and send
     threads = []
     for i in range(int(nrthreads)):
-        thr = threading.Thread(target=_wt_request_sender, args=(i, preparefnc),
-                               kwargs={"inqueue": sendqueue, "exitevent": exitevent,
-                                       "controllers": controllers, "restport": restport,
-                                       "template": flow_template, "outqueue": resultqueue, "method": method})
+        thr = threading.Thread(
+            target=_wt_request_sender,
+            args=(i, preparefnc),
+            kwargs={
+                "inqueue": sendqueue,
+                "exitevent": exitevent,
+                "controllers": controllers,
+                "restport": restport,
+                "template": flow_template,
+                "outqueue": resultqueue,
+                "method": method,
+            },
+        )
         threads.append(thr)
         thr.start()
 
@@ -504,7 +549,9 @@ def configure_flows(*args, **kwargs):
     Returns:
         :returns dict: dictionary of http response counts like {'http_status_code1: 'count1', etc.}
     """
-    return _task_executor(method='PUT', flow_template=_default_flow_template_json, **kwargs)
+    return _task_executor(
+        method="PUT", flow_template=_default_flow_template_json, **kwargs
+    )
 
 
 def deconfigure_flows(*args, **kwargs):
@@ -522,7 +569,9 @@ def deconfigure_flows(*args, **kwargs):
     Returns:
         :returns dict: dictionary of http response counts like {'http_status_code1: 'count1', etc.}
     """
-    return _task_executor(method='DELETE', flow_template=_default_flow_template_json, **kwargs)
+    return _task_executor(
+        method="DELETE", flow_template=_default_flow_template_json, **kwargs
+    )
 
 
 def configure_flows_bulk(*args, **kwargs):
@@ -540,7 +589,9 @@ def configure_flows_bulk(*args, **kwargs):
     Returns:
         :returns dict: dictionary of http response counts like {'http_status_code1: 'count1', etc.}
     """
-    return _task_executor(method='POST', flow_template=_default_flow_template_json, **kwargs)
+    return _task_executor(
+        method="POST", flow_template=_default_flow_template_json, **kwargs
+    )
 
 
 def operations_add_flows_ds(*args, **kwargs):
@@ -558,7 +609,9 @@ def operations_add_flows_ds(*args, **kwargs):
     Returns:
         :returns dict: dictionary of http response counts like {'http_status_code1: 'count1', etc.}
     """
-    return _task_executor(method='add-flows-ds', flow_template=_default_operations_item_json, **kwargs)
+    return _task_executor(
+        method="add-flows-ds", flow_template=_default_operations_item_json, **kwargs
+    )
 
 
 def operations_remove_flows_ds(*args, **kwargs):
@@ -576,7 +629,9 @@ def operations_remove_flows_ds(*args, **kwargs):
     Returns:
         :returns dict: dictionary of http response counts like {'http_status_code1: 'count1', etc.}
     """
-    return _task_executor(method='remove-flows-ds', flow_template=_default_operations_item_json, **kwargs)
+    return _task_executor(
+        method="remove-flows-ds", flow_template=_default_operations_item_json, **kwargs
+    )
 
 
 def operations_add_flows_rpc(*args, **kwargs):
@@ -594,7 +649,9 @@ def operations_add_flows_rpc(*args, **kwargs):
     Returns:
         :returns dict: dictionary of http response counts like {'http_status_code1: 'count1', etc.}
     """
-    return _task_executor(method='add-flows-rpc', flow_template=_default_operations_item_json, **kwargs)
+    return _task_executor(
+        method="add-flows-rpc", flow_template=_default_operations_item_json, **kwargs
+    )
 
 
 def operations_remove_flows_rpc(*args, **kwargs):
@@ -612,7 +669,9 @@ def operations_remove_flows_rpc(*args, **kwargs):
     Returns:
         :returns dict: dictionary of http response counts like {'http_status_code1: 'count1', etc.}
     """
-    return _task_executor(method='remove-flows-rpc', flow_template=_default_operations_item_json, **kwargs)
+    return _task_executor(
+        method="remove-flows-rpc", flow_template=_default_operations_item_json, **kwargs
+    )
 
 
 def _get_operational_inventory_of_switches(controller):
@@ -624,21 +683,30 @@ def _get_operational_inventory_of_switches(controller):
     Returns:
         :returns switches: number of switches connected
     """
-    url = 'http://' + controller + ':8181/restconf/operational/opendaylight-inventory:nodes'
-    rsp = requests.get(url, headers={'Accept': 'application/json'}, stream=False, auth=('admin', 'admin'))
+    url = (
+        "http://"
+        + controller
+        + ":8181/restconf/operational/opendaylight-inventory:nodes"
+    )
+    rsp = requests.get(
+        url,
+        headers={"Accept": "application/json"},
+        stream=False,
+        auth=("admin", "admin"),
+    )
     if rsp.status_code != 200:
         return None
     inv = json.loads(rsp.content)
-    if 'nodes' not in inv:
+    if "nodes" not in inv:
         return None
-    if 'node' not in inv['nodes']:
+    if "node" not in inv["nodes"]:
         return []
-    inv = inv['nodes']['node']
-    switches = [sw for sw in inv if 'openflow:' in sw['id']]
+    inv = inv["nodes"]["node"]
+    switches = [sw for sw in inv if "openflow:" in sw["id"]]
     return switches
 
 
-def flow_stats_collected(controller=''):
+def flow_stats_collected(controller=""):
     """Provides the operational inventory counts counts of switches and flows.
 
     Args:
@@ -653,16 +721,25 @@ def flow_stats_collected(controller=''):
     if switches is None:
         return 0, 0, 0
     for sw in switches:
-        tabs = sw['flow-node-inventory:table']
+        tabs = sw["flow-node-inventory:table"]
         for t in tabs:
-            active_flows += t['opendaylight-flow-table-statistics:flow-table-statistics']['active-flows']
-            if 'flow' in t:
-                found_flows += len(t['flow'])
-    print(("Switches,ActiveFlows(reported)/FlowsFound", len(switches), active_flows, found_flows))
+            active_flows += t[
+                "opendaylight-flow-table-statistics:flow-table-statistics"
+            ]["active-flows"]
+            if "flow" in t:
+                found_flows += len(t["flow"])
+    print(
+        (
+            "Switches,ActiveFlows(reported)/FlowsFound",
+            len(switches),
+            active_flows,
+            found_flows,
+        )
+    )
     return len(switches), active_flows, found_flows
 
 
-def get_switches_count(controller=''):
+def get_switches_count(controller=""):
     """Gives the count of the switches presnt in the operational inventory nodes datastore.
 
     Args:
index c80ac876509619346e2378ea55a62c5e2ddc97b1..7e22ade8d8b991638bb0fb1ef3ef9bc9068a1e05 100644 (file)
@@ -59,14 +59,24 @@ def getAddCarPersonUrl(hostname, port):
 
 def getBuyCarRpcUrl(hostname, port):
     """POST URL for buy car rpc"""
-    return "http://" + hostname + ":" + port + "/restconf/operations/car-purchase:buy-car"
+    return (
+        "http://" + hostname + ":" + port + "/restconf/operations/car-purchase:buy-car"
+    )
 
 
 def getJolokiaURL(hostname, port, shardIndex, shardName):
     """GET URL for jolokia"""
-    return "http://" + hostname + ":" + port + \
-        "/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-" + \
-        shardIndex + "-" + shardName + ",type=DistributedConfigDatastore"
+    return (
+        "http://"
+        + hostname
+        + ":"
+        + port
+        + "/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-"
+        + shardIndex
+        + "-"
+        + shardName
+        + ",type=DistributedConfigDatastore"
+    )
 
 
 # Template for Car init resource payload
@@ -83,7 +93,8 @@ add_car_init_payload_template = Template(
             }
         ]
     }}
-    """)
+    """
+)
 
 # Template for Car resource payload
 add_car_payload_template = Template(
@@ -98,7 +109,8 @@ add_car_payload_template = Template(
             }
         ]
     }
-    """)
+    """
+)
 
 # Template for Person resource payload
 add_person_payload_template = Template(
@@ -114,7 +126,8 @@ add_person_payload_template = Template(
             }
         ]
     }}
-    """)
+    """
+)
 
 # Template for Car Person mapping  payload
 add_car_person_template = Template(
@@ -127,7 +140,8 @@ add_car_person_template = Template(
             }
         ]
     }}
-    """)
+    """
+)
 
 # Template for adding person using RPC
 add_person_rpc_payload_template = Template(
@@ -142,7 +156,8 @@ add_person_rpc_payload_template = Template(
                 "people:age":"$age"
             }
     }
-    """)
+    """
+)
 
 # Template for buing car rpc
 buy_car_rpc_template = Template(
@@ -155,4 +170,5 @@ buy_car_rpc_template = Template(
             "car-purchase:car-id" : "$carId"
         }
     }
-    """)
+    """
+)
index 2cdcb3a5133a4d6c54a3df761c4000a9ae78c890..71619b92654381b3e88cb6c2ea978e95282ae703 100644 (file)
@@ -8,26 +8,26 @@ import xml.etree.ElementTree as ElementTree
 
 
 class BaseSwitch(object):
-    '''
+    """
     Switch Base Class
-    '''
+    """
 
-    make = ''
-    model = ''
+    make = ""
+    model = ""
 
-    mgmt_protocol = ''
-    ssh_key = ''
-    mgmt_ip = ''
-    mgmt_port = ''
-    mgmt_user = ''
-    mgmt_password = ''
-    mgmt_prompt = ''
+    mgmt_protocol = ""
+    ssh_key = ""
+    mgmt_ip = ""
+    mgmt_port = ""
+    mgmt_user = ""
+    mgmt_password = ""
+    mgmt_prompt = ""
 
-    connection_index = ''
+    connection_index = ""
 
-    initialization_type = ''
+    initialization_type = ""
 
-    of_controller_ip = ''
+    of_controller_ip = ""
 
     connection_configs = []
 
@@ -44,16 +44,16 @@ class BaseSwitch(object):
 
     dump_all_flows = []
 
-    src_mac = ''
-    dst_mac = ''
-    ip_src = ''
-    ip_dst = ''
-    table_id = ''
-    action = ''
+    src_mac = ""
+    dst_mac = ""
+    ip_src = ""
+    ip_dst = ""
+    table_id = ""
+    action = ""
 
-    datapath_id_output_string = ''
-    datapath_id_output_command = ''
-    datapath_id = ''
+    datapath_id_output_string = ""
+    datapath_id_output_command = ""
+    datapath_id = ""
 
     def set_connection_index(self, idx):
         self.connection_index = idx
@@ -78,40 +78,46 @@ class BaseSwitch(object):
 
     def create_flow_match_elements(self, flow_xml):
         flow_tree = ElementTree.fromstring(flow_xml)
-        self.table_id = flow_tree.\
-            find('{urn:opendaylight:flow:inventory}table_id').text
-        instructions_element = flow_tree.\
-            find('{urn:opendaylight:flow:inventory}instructions')
-        instruction_element = instructions_element.\
-            find('{urn:opendaylight:flow:inventory}instruction')
-        apply_actions = instruction_element.\
-            find('{urn:opendaylight:flow:inventory}apply-actions')
-        action = apply_actions.\
-            find('{urn:opendaylight:flow:inventory}action')
-        output_action = action.\
-            find('{urn:opendaylight:flow:inventory}output-action')
-        output_node_connector = \
-            output_action.find('{urn:opendaylight:'
-                               'flow:inventory}output-node-connector')
+        self.table_id = flow_tree.find("{urn:opendaylight:flow:inventory}table_id").text
+        instructions_element = flow_tree.find(
+            "{urn:opendaylight:flow:inventory}instructions"
+        )
+        instruction_element = instructions_element.find(
+            "{urn:opendaylight:flow:inventory}instruction"
+        )
+        apply_actions = instruction_element.find(
+            "{urn:opendaylight:flow:inventory}apply-actions"
+        )
+        action = apply_actions.find("{urn:opendaylight:flow:inventory}action")
+        output_action = action.find("{urn:opendaylight:flow:inventory}output-action")
+        output_node_connector = output_action.find(
+            "{urn:opendaylight:" "flow:inventory}output-node-connector"
+        )
         self.action = output_node_connector.text
-        match_element = flow_tree.\
-            find('{urn:opendaylight:flow:inventory}match')
-        ethernet_match_element = match_element.\
-            find('{urn:opendaylight:flow:inventory}ethernet-match')
-        ethernet_source = ethernet_match_element.\
-            find('{urn:opendaylight:flow:inventory}ethernet-source')
-        ethernet_source_address = ethernet_source.\
-            find('{urn:opendaylight:flow:inventory}address')
+        match_element = flow_tree.find("{urn:opendaylight:flow:inventory}match")
+        ethernet_match_element = match_element.find(
+            "{urn:opendaylight:flow:inventory}ethernet-match"
+        )
+        ethernet_source = ethernet_match_element.find(
+            "{urn:opendaylight:flow:inventory}ethernet-source"
+        )
+        ethernet_source_address = ethernet_source.find(
+            "{urn:opendaylight:flow:inventory}address"
+        )
         self.src_mac = ethernet_source_address.text
-        ethernet_destination = ethernet_match_element.\
-            find('{urn:opendaylight:flow:inventory}ethernet-destination')
-        ethernet_destination_address = ethernet_destination.\
-            find('{urn:opendaylight:flow:inventory}address')
+        ethernet_destination = ethernet_match_element.find(
+            "{urn:opendaylight:flow:inventory}ethernet-destination"
+        )
+        ethernet_destination_address = ethernet_destination.find(
+            "{urn:opendaylight:flow:inventory}address"
+        )
         self.dst_mac = ethernet_destination_address.text
-        self.ip_src = match_element.\
-            find('{urn:opendaylight:flow:inventory}ipv4-source').text
-        self.ip_dst = match_element.\
-            find('{urn:opendaylight:flow:inventory}ipv4-destination').text
+        self.ip_src = match_element.find(
+            "{urn:opendaylight:flow:inventory}ipv4-source"
+        ).text
+        self.ip_dst = match_element.find(
+            "{urn:opendaylight:flow:inventory}ipv4-destination"
+        ).text
 
     def convert_hex_to_decimal_as_string(self, hex_string):
         # TODO: need to add error checking in case the hex_string is
index 49c7438b1882a8100433a233bd66370c223ce833..57541d279067e91c7a2c1804c97eab6e62740dab 100644 (file)
@@ -8,98 +8,112 @@ from BaseSwitch import BaseSwitch
 
 
 class H3C(BaseSwitch):
-    '''
+    """
     H3C Super Class
-    '''
+    """
 
-    make = 'h3c'
-    model = ''
+    make = "h3c"
+    model = ""
 
-    mgmt_protocol = 'telnet'
-    mgmt_ip = ''
-    mgmt_port = ''
-    mgmt_prompt = '(' + model + '.*>|' + model + '.*])'
+    mgmt_protocol = "telnet"
+    mgmt_ip = ""
+    mgmt_port = ""
+    mgmt_prompt = "(" + model + ".*>|" + model + ".*])"
 
-    initialization_type = 'reboot'
+    initialization_type = "reboot"
 
-    of_controller_ip = ''
-    of_instance_id = '21'
+    of_controller_ip = ""
+    of_instance_id = "21"
 
     @property
     def connection_configs(self):
-        return ['\r\r\r']
+        return ["\r\r\r"]
 
     @property
     def initialization_cmds(self):
-        return ['\rstartup saved-configuration odl_test_startup_config.cfg main\r',
-                'reboot\r',
-                'Y\r',
-                '\r',
-                'N\r',
-                'Y\r']
+        return [
+            "\rstartup saved-configuration odl_test_startup_config.cfg main\r",
+            "reboot\r",
+            "Y\r",
+            "\r",
+            "N\r",
+            "Y\r",
+        ]
 
     @property
     def cleanup_cmds(self):
-        return ['system-view',
-                'undo openflow instance ' + self.of_instance_id,
-                'return']
+        return [
+            "system-view",
+            "undo openflow instance " + self.of_instance_id,
+            "return",
+        ]
 
     @property
     def base_openflow_config(self):
-        return ['system-view',
-                'openflow instance ' + self.of_instance_id,
-                'classification vlan 1',
-                'controller ' + self.of_instance_id + ' address ip ' + self.of_controller_ip,
-                'active instance',
-                'return']
+        return [
+            "system-view",
+            "openflow instance " + self.of_instance_id,
+            "classification vlan 1",
+            "controller "
+            + self.of_instance_id
+            + " address ip "
+            + self.of_controller_ip,
+            "active instance",
+            "return",
+        ]
 
     @property
     def openflow_enable_config(self):
-        return ['system-view',
-                'openflow instance ' + self.of_instance_id,
-                'classification vlan 1',
-                'active instance',
-                'return']
+        return [
+            "system-view",
+            "openflow instance " + self.of_instance_id,
+            "classification vlan 1",
+            "active instance",
+            "return",
+        ]
 
     @property
     def openflow_validation_cmd(self):
-        return 'display openflow summary'
+        return "display openflow summary"
 
     @property
     def openflow_enable_validations(self):
-        return [self.of_instance_id + ' +Active',
-                'Connected   1          24        N']
+        return [self.of_instance_id + " +Active", "Connected   1          24        N"]
 
     @property
     def openflow_disable_config(self):
-        return ['system-view',
-                'openflow instance ' + self.of_instance_id,
-                'undo classification',
-                'active instance',
-                'return']
+        return [
+            "system-view",
+            "openflow instance " + self.of_instance_id,
+            "undo classification",
+            "active instance",
+            "return",
+        ]
 
     @property
     def openflow_disable_validations(self):
-        return [self.of_instance_id + ' +Inactive  - +- +- +- +-']
+        return [self.of_instance_id + " +Inactive  - +- +- +- +-"]
 
     @property
     def dump_all_flows(self):
-        return ['']
+        return [""]
 
     @property
     def datapath_id_output_command(self):
-        return 'display openflow summary | include 0x'
+        return "display openflow summary | include 0x"
 
-    datapath_id_output_string = ''
-    datapath_id = ''
+    datapath_id_output_string = ""
+    datapath_id = ""
 
     def update_datapath_id(self):
         if not self.datapath_id_output_string:
-            self.datapath_id = 'unknown'
+            self.datapath_id = "unknown"
         else:
             # 21    Active    0x0015cc3e5f42ad23  Connected   1          24        N
             # |---------------------------------(0)---------------------------------|
             # |------(1)-------||------(2)-----|
-            matches = re.search('(.*0x)(\w+) +Connected', self.datapath_id_output_string)
+            matches = re.search(
+                "(.*0x)(\w+) +Connected", self.datapath_id_output_string
+            )
             datapath_id_hex = matches.group(2)
             self.datapath_id = self.convert_hex_to_decimal_as_string(datapath_id_hex)
index 33e9f98039016fd3c750723e85393dbc92cea322..7ffaa01d867a7b4dc4da438c596c736e14a4781e 100644 (file)
@@ -7,8 +7,8 @@ from H3C import H3C
 
 
 class H3C_5920(H3C):
-    '''
+    """
     Comware 5920
-    '''
+    """
 
-    model = '5920'
+    model = "5920"
index 5ec9ce5ef411dcc0deeccef18bd4b2b0f624a60b..5c8745d6f3339e726ac9e87b2dd9b1d9c3aaa7ba 100644 (file)
@@ -8,30 +8,32 @@ from BaseSwitch import BaseSwitch
 
 
 class Ovs(BaseSwitch):
-    '''
+    """
     OpenVswitch Class
-    '''
+    """
 
-    make = 'OpenVswitch'
-    model = 'OVS'
+    make = "OpenVswitch"
+    model = "OVS"
 
-    mgmt_protocol = 'ssh'
-    mgmt_ip = ''
-    mgmt_port = ''
-    mgmt_user = 'mininet'
-    mgmt_password = 'mininet'
+    mgmt_protocol = "ssh"
+    mgmt_ip = ""
+    mgmt_port = ""
+    mgmt_user = "mininet"
+    mgmt_password = "mininet"
 
-    mgmt_prompt = '>'
+    mgmt_prompt = ">"
 
-    initialization_type = 'cleanup'
+    initialization_type = "cleanup"
 
     @property
     def connection_configs(self):
-        return ['pwd']
+        return ["pwd"]
 
     @property
     def cleanup_cmds(self):
-        return ['/sbin/ifconfig -a | egrep \'^s\' | awk \'{print \"sudo ovs-vsctl del-br\",$1}\' | sh']
+        return [
+            "/sbin/ifconfig -a | egrep '^s' | awk '{print \"sudo ovs-vsctl del-br\",$1}' | sh"
+        ]
 
     @property
     def initialization_cmds(self):
@@ -39,28 +41,30 @@ class Ovs(BaseSwitch):
 
     @property
     def base_openflow_config(self):
-        return ['sudo ovs-vsctl add-br s1',
-                'sudo ovs-vsctl set bridge s1 protocols=OpenFlow13',
-                'sudo ovs-vsctl set-controller s1 tcp:' + self.of_controller_ip,
-                'sudo ifconfig s1 up']
+        return [
+            "sudo ovs-vsctl add-br s1",
+            "sudo ovs-vsctl set bridge s1 protocols=OpenFlow13",
+            "sudo ovs-vsctl set-controller s1 tcp:" + self.of_controller_ip,
+            "sudo ifconfig s1 up",
+        ]
 
     @property
     def openflow_validation_cmd(self):
-        return 'sudo ovs-vsctl show'
+        return "sudo ovs-vsctl show"
 
     @property
     def openflow_enable_config(self):
-        return ['sudo ovs-vsctl set-controller s1 tcp:' + self.of_controller_ip]
+        return ["sudo ovs-vsctl set-controller s1 tcp:" + self.of_controller_ip]
 
     @property
     def openflow_enable_validations(self):
-        return ['is_connected: true']
+        return ["is_connected: true"]
 
-    invalid_of_controller_ip = '1.1.1.1'
+    invalid_of_controller_ip = "1.1.1.1"
 
     @property
     def openflow_disable_config(self):
-        return ['sudo ovs-vsctl set-controller s1 tcp:' + self.invalid_of_controller_ip]
+        return ["sudo ovs-vsctl set-controller s1 tcp:" + self.invalid_of_controller_ip]
 
     @property
     def openflow_disable_validations(self):
@@ -68,34 +72,43 @@ class Ovs(BaseSwitch):
 
     @property
     def dump_all_flows(self):
-        return 'sudo /usr/bin/ovs-ofctl dump-flows s1 -O OpenFlow13'
+        return "sudo /usr/bin/ovs-ofctl dump-flows s1 -O OpenFlow13"
 
     @property
     def flow_validations(self):
-        return ['dl_src=' + self.src_mac +
-                ',dl_dst=' + self.dst_mac +
-                ',nw_src=' + self.ip_src +
-                ',nw_dst=' + self.ip_dst +
-                ' actions=' + self.action,
-                'table=' + self.table_id]
+        return [
+            "dl_src="
+            + self.src_mac
+            + ",dl_dst="
+            + self.dst_mac
+            + ",nw_src="
+            + self.ip_src
+            + ",nw_dst="
+            + self.ip_dst
+            + " actions="
+            + self.action,
+            "table=" + self.table_id,
+        ]
 
     def create_flow_match_elements(self, flow_xml):
         super(Ovs, self).create_flow_match_elements(flow_xml)
-        if (self.action == 'INPORT'):
-            self.action = 'IN_PORT'
+        if self.action == "INPORT":
+            self.action = "IN_PORT"
 
     @property
     def datapath_id_output_command(self):
-        '''This regex will extract the macaddr of the ovs switch'''
-        return '/sbin/ifconfig s1 | grep -o -E "([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}"'
+        """This regex will extract the macaddr of the ovs switch"""
+        return (
+            '/sbin/ifconfig s1 | grep -o -E "([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}"'
+        )
 
-    datapath_id_output_string = ''
-    datapath_id = ''
+    datapath_id_output_string = ""
+    datapath_id = ""
 
     def update_datapath_id(self):
         if not self.datapath_id_output_string:
-            self.datapath_id = 'unknown'
+            self.datapath_id = "unknown"
         else:
             # 32:cc:bf:34:ed:4c
-            datapath_id_hex = re.sub(':', '', self.datapath_id_output_string)
+            datapath_id_hex = re.sub(":", "", self.datapath_id_output_string)
             self.datapath_id = self.convert_hex_to_decimal_as_string(datapath_id_hex)
index 56d6303ca2bf941c6c1cad3d885217c895fc5917..45055573c0e714b8cc17e326da69caced352bb9b 100644 (file)
@@ -8,98 +8,105 @@ from BaseSwitch import BaseSwitch
 
 
 class ProVision(BaseSwitch):
-    '''
+    """
     ProVision Super Class
-    '''
+    """
 
-    make = 'provision'
-    model = ''
+    make = "provision"
+    model = ""
 
-    mgmt_protocol = 'telnet'
-    mgmt_ip = ''
-    mgmt_port = ''
-    mgmt_prompt = model + '.*#'
+    mgmt_protocol = "telnet"
+    mgmt_ip = ""
+    mgmt_port = ""
+    mgmt_prompt = model + ".*#"
 
-    initialization_type = 'reboot'
+    initialization_type = "reboot"
 
-    of_instance_id = '21'
+    of_instance_id = "21"
 
     @property
     def connection_configs(self):
-        return ['\rend \
+        return [
+            "\rend \
                  \rconfig \
                  \rconsole local-terminal none \
                  \rno page \
-                 \rend\r']
+                 \rend\r"
+        ]
 
     @property
     def initialization_cmds(self):
-        return ['\rend\rboot system flash primary config odl_test_startup_config\r',
-                'y',
-                'n']
+        return [
+            "\rend\rboot system flash primary config odl_test_startup_config\r",
+            "y",
+            "n",
+        ]
 
     @property
     def cleanup_cmds(self):
-        return ['end',
-                'config',
-                'no openflow\r',
-                'y']
+        return ["end", "config", "no openflow\r", "y"]
 
     @property
     def base_openflow_config(self):
-        return 'end', \
-               'config', \
-               'openflow', \
-               'controller-id ' + self.of_instance_id + ' ip ' + self.of_controller_ip + \
-               ' controller-interface oobm', \
-               'instance ' + self.of_instance_id, \
-               'member vlan 10', \
-               'controller-id ' + self.of_instance_id + ' ', \
-               'version 1.3', \
-               'enable', \
-               'openflow enable', \
-               'end'
+        return (
+            "end",
+            "config",
+            "openflow",
+            "controller-id "
+            + self.of_instance_id
+            + " ip "
+            + self.of_controller_ip
+            + " controller-interface oobm",
+            "instance " + self.of_instance_id,
+            "member vlan 10",
+            "controller-id " + self.of_instance_id + " ",
+            "version 1.3",
+            "enable",
+            "openflow enable",
+            "end",
+        )
 
     @property
     def openflow_enable_config(self):
-        return ['end',
-                'config',
-                'openflow enable',
-                'end']
+        return ["end", "config", "openflow enable", "end"]
 
     @property
     def openflow_validation_cmd(self):
-        return \
-            'show openflow'
+        return "show openflow"
 
     @property
     def openflow_enable_validations(self):
-        return ['OpenFlow +: Enabled',
-                self.of_instance_id + ' +Up +2 +1 +1.3']
+        return ["OpenFlow +: Enabled", self.of_instance_id + " +Up +2 +1 +1.3"]
 
     @property
     def openflow_disable_config(self):
-        return ['end',
-                'config',
-                'openflow disable',
-                'end']
+        return ["end", "config", "openflow disable", "end"]
 
     @property
     def openflow_disable_validations(self):
-        return ['OpenFlow +: Disabled', self.of_instance_id + ' +Down +0 +0 +1.3']
+        return ["OpenFlow +: Disabled", self.of_instance_id + " +Down +0 +0 +1.3"]
 
     @property
     def dump_all_flows(self):
-        return 'show openflow instance ' + self.of_instance_id + ' flows'
+        return "show openflow instance " + self.of_instance_id + " flows"
 
     @property
     def flow_validations(self):
-        return ['(?ms)Flow Table ID : 0.*Flow Table ID : 100.*' +
-                'Source Protocol Address : ' + self.ip_src + '.*' +
-                'Target Protocol Address : ' + self.ip_dst + '.*' +
-                'Flow Table ID : ' + self.table_id + '.*' + self.action,
-                'Source MAC    : ' + self.src_mac,
-                'Destination MAC  : ' + self.dst_mac]
+        return [
+            "(?ms)Flow Table ID : 0.*Flow Table ID : 100.*"
+            + "Source Protocol Address : "
+            + self.ip_src
+            + ".*"
+            + "Target Protocol Address : "
+            + self.ip_dst
+            + ".*"
+            + "Flow Table ID : "
+            + self.table_id
+            + ".*"
+            + self.action,
+            "Source MAC    : " + self.src_mac,
+            "Destination MAC  : " + self.dst_mac,
+        ]
 
     def create_flow_match_elements(self, flow_xml):
         super(ProVision, self).create_flow_match_elements(flow_xml)
@@ -108,47 +115,46 @@ class ProVision(BaseSwitch):
         self.action = self.convert_action_to_provision_format(self.action)
 
     def format_mac_with_no_hyphens_and_one_colon(self, mac):
-        mac_no_colons = re.sub(':', '', mac)
-        mac_with_hyphen = mac_no_colons[:6] + '-' + mac_no_colons[6:]
+        mac_no_colons = re.sub(":", "", mac)
+        mac_with_hyphen = mac_no_colons[:6] + "-" + mac_no_colons[6:]
         return mac_with_hyphen
 
     def convert_action_to_provision_format(self, action):
-        if (action == 'INPORT'):
-            return 'Ingress Port'
-        if (action == 'TABLE'):
-            return 'Table'
-        if (action == 'NORMAL'):
-            return 'Normal'
-        if (action == 'FLOOD'):
-            return 'Flood'
-        if (action == 'ALL'):
-            return 'All'
-        if (action == 'CONTROLLER'):
-            return 'Controller Port'
-        if (action == 'LOCAL'):
-            return 'Local'
-        return 'UNKNOWN'
+        if action == "INPORT":
+            return "Ingress Port"
+        if action == "TABLE":
+            return "Table"
+        if action == "NORMAL":
+            return "Normal"
+        if action == "FLOOD":
+            return "Flood"
+        if action == "ALL":
+            return "All"
+        if action == "CONTROLLER":
+            return "Controller Port"
+        if action == "LOCAL":
+            return "Local"
+        return "UNKNOWN"
 
     @property
     def datapath_id_output_command(self):
-        return \
-            'show openflow instance ' + self.of_instance_id + ' | include Datapath'
+        return "show openflow instance " + self.of_instance_id + " | include Datapath"
 
-    connection_index = ''
+    connection_index = ""
 
     def set_connection_index(self, idx):
         self.connection_index = idx
 
-    datapath_id_output_string = ''
-    datapath_id = ''
+    datapath_id_output_string = ""
+    datapath_id = ""
 
     def update_datapath_id(self):
         if not self.datapath_id_output_string:
-            self.datapath_id = 'unknown'
+            self.datapath_id = "unknown"
         else:
             # Datapath ID              : 000af0921c22bac0
             # |-----------------(0)---------------------|
             # |-----------(1)----------| |------(2)-----|
-            matches = re.search('(.*: )(\w+)', self.datapath_id_output_string)
+            matches = re.search("(.*: )(\w+)", self.datapath_id_output_string)
             datapath_id_hex = matches.group(2)
             self.datapath_id = self.convert_hex_to_decimal_as_string(datapath_id_hex)
index 666ec5e0218f88f21500c2bc99cc76e63b01c777..0842c9e0163878841df7e1a0c899e958b28a040d 100644 (file)
@@ -7,8 +7,8 @@ from ProVision import ProVision
 
 
 class ProVision_3800(ProVision):
-    '''
+    """
     ProVision 3800
-    '''
+    """
 
-    model = '3800'
+    model = "3800"
index d0cab65006a97ceedd1a6428441113350f1d3f75..aa43e45051008fd0b59dc04cdec7ace4877ef7b4 100644 (file)
@@ -14,9 +14,9 @@ class SwitchManager(object):
         """
         Return all nodes.
         """
-        if isinstance(content, dict) and 'nodeProperties' in content:
+        if isinstance(content, dict) and "nodeProperties" in content:
             self.builtin.log("18")
-            return [e.get('node') for e in content['nodeProperties']]
+            return [e.get("node") for e in content["nodeProperties"]]
         else:
             self.builtin.log("21")
             return None
@@ -24,9 +24,9 @@ class SwitchManager(object):
     def extract_all_properties(self, content, property_type):
         if isinstance(content, dict) and property_type in content:
             self.builtin.log("26")
-            list1 = [e.get('properties') for e in content[property_type]]
+            list1 = [e.get("properties") for e in content[property_type]]
             self.builtin.log(list1)
-            return [e.get('properties') for e in content[property_type]]
+            return [e.get("properties") for e in content[property_type]]
         else:
             self.builtin.log("29")
             return None
@@ -36,13 +36,13 @@ class SwitchManager(object):
         return [e.get(property) for e in res]
 
     def extract_all_node_properties(self, content):
-        return self.extract_all_properties(content, 'nodeProperties')
+        return self.extract_all_properties(content, "nodeProperties")
 
     def extract_node_property_values(self, content, property):
-        return self.extract_property_value(content, property, 'nodeProperties')
+        return self.extract_property_value(content, property, "nodeProperties")
 
     def extract_all_nodeconnector_properties(self, content):
-        return self.extract_all_properties(content, 'nodeConnectorProperties')
+        return self.extract_all_properties(content, "nodeConnectorProperties")
 
     def extract_nodeconnector_property_values(self, content, property):
-        return self.extract_property_value(content, property, 'nodeConnectorProperties')
+        return self.extract_property_value(content, property, "nodeConnectorProperties")
index 4aaf3b094f86c25a85ec6356351daf94163d699a..36516b0a9cac404a7c01635a82b8e687e6a1f0ce 100644 (file)
@@ -12,12 +12,12 @@ def get_active_controller_from_json(resp, service):
     :type service: str
     :return: Index of controller
     """
-    entities = json.loads(resp)['entity-owners']['entity-type']
+    entities = json.loads(resp)["entity-owners"]["entity-type"]
     for entity in entities:
-        if entity['type'] == "org.opendaylight.mdsal.ServiceEntityType":
-            for instance in entity['entity']:
-                if service in instance['id']:
-                    return int(instance['owner'][-1:])
+        if entity["type"] == "org.opendaylight.mdsal.ServiceEntityType":
+            for instance in entity["entity"]:
+                if service in instance["id"]:
+                    return int(instance["owner"][-1:])
     return 0
 
 
@@ -52,11 +52,11 @@ def get_opposing_mode(mode):
         :returns: String with opposing SXP peer mode.
 
         """
-    if 'speaker' == mode:
-        return 'listener'
-    elif 'listener' == mode:
-        return 'speaker'
-    return 'both'
+    if "speaker" == mode:
+        return "listener"
+    elif "listener" == mode:
+        return "speaker"
+    return "both"
 
 
 def get_ip_from_number(n, base=2130706432):
@@ -105,7 +105,9 @@ def lower_version(ver1, ver2):
         return ver2
 
 
-def get_filter_entry(seq, entry_type, sgt="", esgt="", acl="", eacl="", pl="", epl="", ps=""):
+def get_filter_entry(
+    seq, entry_type, sgt="", esgt="", acl="", eacl="", pl="", epl="", ps=""
+):
     """Generate xml containing FilterEntry data
 
     :param seq: Sequence of entry
@@ -132,24 +134,24 @@ def get_filter_entry(seq, entry_type, sgt="", esgt="", acl="", eacl="", pl="", e
     entries = ""
     # Generate XML request containing combination of Matches of different types
     if sgt:
-        args = sgt.split(',')
+        args = sgt.split(",")
         entries += add_sgt_matches_xml(args)
     elif esgt:
-        args = esgt.split(',')
+        args = esgt.split(",")
         entries += add_sgt_range_xml(args[0], args[1])
     if pl:
         entries += add_pl_entry_xml(pl)
     elif epl:
-        args = epl.split(',')
+        args = epl.split(",")
         entries += add_epl_entry_xml(args[0], args[1], args[2])
     if acl:
-        args = acl.split(',')
+        args = acl.split(",")
         entries += add_acl_entry_xml(args[0], args[1])
     elif eacl:
-        args = eacl.split(',')
+        args = eacl.split(",")
         entries += add_eacl_entry_xml(args[0], args[1], args[2], args[3])
     if ps:
-        args = ps.split(',')
+        args = ps.split(",")
         entries += add_ps_entry_xml(args[0], args[1])
     # Wrap entries in ACL/PrefixList according to specified values
     if pl or epl:
@@ -167,13 +169,15 @@ def add_peers(*args):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
         <sxp-peer>
             <peer-address>$ip</peer-address>
-        </sxp-peer>''')
+        </sxp-peer>"""
+    )
     peers = ""
     for count, value in enumerate(args):
-        peers += templ.substitute({'ip': value})
+        peers += templ.substitute({"ip": value})
     return peers
 
 
@@ -185,13 +189,15 @@ def add_domains(*args):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
         <domain>
             <name>$name</name>
-        </domain>''')
+        </domain>"""
+    )
     peers = ""
     for count, value in enumerate(args):
-        peers += templ.substitute({'name': value})
+        peers += templ.substitute({"name": value})
     return peers
 
 
@@ -203,11 +209,13 @@ def add_sgt_matches_xml(sgt_entries):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
-        <matches>$sgt</matches>''')
+    templ = Template(
+        """
+        <matches>$sgt</matches>"""
+    )
     matches = ""
     for sgt in sgt_entries:
-        matches += templ.substitute({'sgt': sgt})
+        matches += templ.substitute({"sgt": sgt})
     return matches
 
 
@@ -221,10 +229,12 @@ def add_sgt_range_xml(start, end):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
         <sgt-start>$start</sgt-start>
-        <sgt-end>$end</sgt-end>''')
-    match = templ.substitute({'start': start, 'end': end})
+        <sgt-end>$end</sgt-end>"""
+    )
+    match = templ.substitute({"start": start, "end": end})
     return match
 
 
@@ -240,13 +250,16 @@ def add_acl_entry_default_xml(seq, entry_type, acl_entries):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
         <acl-entry>
             <entry-type>$entry_type</entry-type>
             <entry-seq>$seq</entry-seq>$acl_entries
-        </acl-entry>''')
+        </acl-entry>"""
+    )
     matches = templ.substitute(
-        {'seq': seq, 'entry_type': entry_type, 'acl_entries': acl_entries})
+        {"seq": seq, "entry_type": entry_type, "acl_entries": acl_entries}
+    )
     return matches
 
 
@@ -260,12 +273,14 @@ def add_acl_entry_xml(ip, mask):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
         <acl-match>
             <ip-address>$ip</ip-address>
             <wildcard-mask>$mask</wildcard-mask>
-        </acl-match>''')
-    return templ.substitute({'ip': ip, 'mask': mask})
+        </acl-match>"""
+    )
+    return templ.substitute({"ip": ip, "mask": mask})
 
 
 def add_eacl_entry_xml(ip, mask, amask, wmask):
@@ -282,7 +297,8 @@ def add_eacl_entry_xml(ip, mask, amask, wmask):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
         <acl-match>
             <ip-address>$ip</ip-address>
             <wildcard-mask>$mask</wildcard-mask>
@@ -290,8 +306,9 @@ def add_eacl_entry_xml(ip, mask, amask, wmask):
               <address-mask>$amask</address-mask>
               <wildcard-mask>$wmask</wildcard-mask>
             </mask>
-        </acl-match>''')
-    return templ.substitute({'ip': ip, 'mask': mask, 'amask': amask, 'wmask': wmask})
+        </acl-match>"""
+    )
+    return templ.substitute({"ip": ip, "mask": mask, "amask": amask, "wmask": wmask})
 
 
 def add_ps_entry_default_xml(seq, entry_type, ps_entries):
@@ -306,12 +323,16 @@ def add_ps_entry_default_xml(seq, entry_type, ps_entries):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
     <peer-sequence-entry xmlns="urn:opendaylight:sxp:controller">
           <entry-type>$entry_type</entry-type>
           <entry-seq>$seq</entry-seq>$ps_entries
-    </peer-sequence-entry>''')
-    return templ.substitute({'seq': seq, 'entry_type': entry_type, 'ps_entries': ps_entries})
+    </peer-sequence-entry>"""
+    )
+    return templ.substitute(
+        {"seq": seq, "entry_type": entry_type, "ps_entries": ps_entries}
+    )
 
 
 def add_pl_entry_default_xml(seq, entry_type, pl_entries):
@@ -326,12 +347,16 @@ def add_pl_entry_default_xml(seq, entry_type, pl_entries):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
     <prefix-list-entry xmlns="urn:opendaylight:sxp:controller">
           <entry-type>$entry_type</entry-type>
           <entry-seq>$seq</entry-seq>$pl_entries
-    </prefix-list-entry>''')
-    return templ.substitute({'seq': seq, 'entry_type': entry_type, 'pl_entries': pl_entries})
+    </prefix-list-entry>"""
+    )
+    return templ.substitute(
+        {"seq": seq, "entry_type": entry_type, "pl_entries": pl_entries}
+    )
 
 
 def add_pl_entry_xml(prefix):
@@ -342,11 +367,13 @@ def add_pl_entry_xml(prefix):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
         <prefix-list-match>
             <ip-prefix>$prefix</ip-prefix>
-        </prefix-list-match>''')
-    return templ.substitute({'prefix': prefix})
+        </prefix-list-match>"""
+    )
+    return templ.substitute({"prefix": prefix})
 
 
 def add_epl_entry_xml(prefix, op, mask):
@@ -361,15 +388,17 @@ def add_epl_entry_xml(prefix, op, mask):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
         <prefix-list-match>
             <ip-prefix>$prefix</ip-prefix>
             <mask>
                 <mask-range>$op</mask-range>
                 <mask-value>$mask</mask-value>
             </mask>
-        </prefix-list-match>''')
-    return templ.substitute({'prefix': prefix, 'mask': mask, 'op': op})
+        </prefix-list-match>"""
+    )
+    return templ.substitute({"prefix": prefix, "mask": mask, "op": op})
 
 
 def add_ps_entry_xml(op, length):
@@ -382,11 +411,13 @@ def add_ps_entry_xml(op, length):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
         <peer-sequence-length>$length</peer-sequence-length>
         <peer-sequence-range>$op</peer-sequence-range>
-        ''')
-    return templ.substitute({'length': length, 'op': op})
+        """
+    )
+    return templ.substitute({"length": length, "op": op})
 
 
 def parse_peer_groups(groups_json):
@@ -398,7 +429,7 @@ def parse_peer_groups(groups_json):
 
     """
     data = json.loads(groups_json)
-    groups = data['output']
+    groups = data["output"]
     output = []
     for group in groups.values():
         output += group
@@ -414,10 +445,10 @@ def parse_connections(connections_json):
 
     """
     data = json.loads(connections_json)
-    output = data['output']
+    output = data["output"]
     result = []
     if output:
-        connections = output['connections']
+        connections = output["connections"]
         for connection in connections.values():
             result += connection
     return result
@@ -442,11 +473,15 @@ def find_connection(connections_json, version, mode, ip, port, state):
 
     """
     for connection in parse_connections(connections_json):
-        if (connection['peer-address'] == ip and connection['tcp-port'] == int(port) and (
-                mode.strip() == 'any' or connection['mode'] == mode) and connection['version'] == version):
-            if state == 'none':
+        if (
+            connection["peer-address"] == ip
+            and connection["tcp-port"] == int(port)
+            and (mode.strip() == "any" or connection["mode"] == mode)
+            and connection["version"] == version
+        ):
+            if state == "none":
                 return True
-            elif connection['state'] == state:
+            elif connection["state"] == state:
                 return True
     return False
 
@@ -461,7 +496,7 @@ def parse_bindings(bindings_json):
     """
     data = json.loads(bindings_json)
     output = []
-    for bindings_json in data['output'].values():
+    for bindings_json in data["output"].values():
         for binding in bindings_json:
             output.append(binding)
     return output
@@ -480,8 +515,8 @@ def find_binding(bindings, sgt, prefix):
 
     """
     for binding in parse_bindings(bindings):
-        if binding['sgt'] == int(sgt):
-            for ip_prefix in binding['ip-prefix']:
+        if binding["sgt"] == int(sgt):
+            for ip_prefix in binding["ip-prefix"]:
                 if ip_prefix == prefix:
                     return True
     return False
@@ -498,12 +533,12 @@ def parse_prefix_groups(prefix_groups_json, source_):
 
     """
     data = json.loads(prefix_groups_json)
-    bindings = data['sxp-node:master-database']
+    bindings = data["sxp-node:master-database"]
     output = []
     for binding in bindings.values():
         for binding_source in binding:
-            if source_ == "any" or binding_source['binding-source'] == source_:
-                for prefix_group in binding_source['prefix-group']:
+            if source_ == "any" or binding_source["binding-source"] == source_:
+                for prefix_group in binding_source["prefix-group"]:
                     output.append(prefix_group)
     return output
 
@@ -526,14 +561,24 @@ def find_binding_legacy(prefix_groups_json, sgt, prefix, source_, action):
     """
     found = False
     for prefixgroup in parse_prefix_groups(prefix_groups_json, source_):
-        if prefixgroup['sgt'] == int(sgt):
-            for binding in prefixgroup['binding']:
-                if binding['ip-prefix'] == prefix and binding['action'] == action:
+        if prefixgroup["sgt"] == int(sgt):
+            for binding in prefixgroup["binding"]:
+                if binding["ip-prefix"] == prefix and binding["action"] == action:
                     found = True
     return found
 
 
-def add_connection_xml(version, mode, ip, port, node, password_, domain_name, bindings_timeout=0, security_mode=''):
+def add_connection_xml(
+    version,
+    mode,
+    ip,
+    port,
+    node,
+    password_,
+    domain_name,
+    bindings_timeout=0,
+    security_mode="",
+):
     """Generate xml for Add Connection request
 
     :param version: Version of SXP protocol (version1/2/3/4)
@@ -557,7 +602,8 @@ def add_connection_xml(version, mode, ip, port, node, password_, domain_name, bi
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
    <requested-node xmlns="urn:opendaylight:sxp:controller">$node</requested-node>
    $domain
    <connections xmlns="urn:opendaylight:sxp:controller">
@@ -581,11 +627,23 @@ def add_connection_xml(version, mode, ip, port, node, password_, domain_name, bi
       </connection>
    </connections>
 </input>
-''')
+"""
+    )
     data = templ.substitute(
-        {'ip': ip, 'port': port, 'mode': mode, 'version': version, 'node': node,
-         'password_': password_, 'domain': get_domain_name(domain_name), 'timeout': bindings_timeout,
-         'security_type': '<security-type>' + security_mode + '</security-type>' if security_mode else ''})
+        {
+            "ip": ip,
+            "port": port,
+            "mode": mode,
+            "version": version,
+            "node": node,
+            "password_": password_,
+            "domain": get_domain_name(domain_name),
+            "timeout": bindings_timeout,
+            "security_type": "<security-type>" + security_mode + "</security-type>"
+            if security_mode
+            else "",
+        }
+    )
     return data
 
 
@@ -603,13 +661,22 @@ def delete_connections_xml(address, port, node, domain_name):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
    <requested-node xmlns="urn:opendaylight:sxp:controller">$node</requested-node>
    $domain
    <peer-address xmlns="urn:opendaylight:sxp:controller">$address</peer-address>
    <tcp-port xmlns="urn:opendaylight:sxp:controller">$port</tcp-port>
-</input>''')
-    data = templ.substitute({'address': address, 'port': port, 'node': node, 'domain': get_domain_name(domain_name)})
+</input>"""
+    )
+    data = templ.substitute(
+        {
+            "address": address,
+            "port": port,
+            "node": node,
+            "domain": get_domain_name(domain_name),
+        }
+    )
     return data
 
 
@@ -625,14 +692,16 @@ def add_peer_group_xml(name, peers, ip):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
   <requested-node xmlns="urn:opendaylight:sxp:controller">$ip</requested-node>
   <sxp-peer-group xmlns="urn:opendaylight:sxp:controller">
     <name xmlns="urn:opendaylight:sxp:controller">$name</name>
     <sxp-peers xmlns="urn:opendaylight:sxp:controller">$peers</sxp-peers>
     </sxp-peer-group>
-</input>''')
-    data = templ.substitute({'name': name, 'peers': peers, 'ip': ip})
+</input>"""
+    )
+    data = templ.substitute({"name": name, "peers": peers, "ip": ip})
     return data
 
 
@@ -646,11 +715,13 @@ def delete_peer_group_xml(name, ip):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
   <requested-node xmlns="urn:opendaylight:sxp:controller">$ip</requested-node>
   <peer-group-name xmlns="urn:opendaylight:sxp:controller">$name</peer-group-name>
-</input>''')
-    data = templ.substitute({'name': name, 'ip': ip})
+</input>"""
+    )
+    data = templ.substitute({"name": name, "ip": ip})
     return data
 
 
@@ -662,10 +733,12 @@ def get_peer_groups_from_node_xml(ip):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
    <requested-node xmlns="urn:opendaylight:sxp:controller">$ip</requested-node>
-</input>''')
-    data = templ.substitute({'ip': ip})
+</input>"""
+    )
+    data = templ.substitute({"ip": ip})
     return data
 
 
@@ -689,16 +762,25 @@ def add_filter_xml(group, filter_type, entries, ip, policy=None):
         policy = "<filter-policy>" + policy + "</filter-policy>"
     else:
         policy = ""
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
   <requested-node xmlns="urn:opendaylight:sxp:controller">$ip</requested-node>
   <peer-group-name xmlns="urn:opendaylight:sxp:controller">$group</peer-group-name>
   <sxp-filter xmlns="urn:opendaylight:sxp:controller">
     $filter_policy
     <filter-type>$filter_type</filter-type>$entries
   </sxp-filter>
-</input>''')
+</input>"""
+    )
     data = templ.substitute(
-        {'group': group, 'filter_type': filter_type, 'ip': ip, 'entries': entries, 'filter_policy': policy})
+        {
+            "group": group,
+            "filter_type": filter_type,
+            "ip": ip,
+            "entries": entries,
+            "filter_policy": policy,
+        }
+    )
     return data
 
 
@@ -720,7 +802,8 @@ def add_domain_filter_xml(domain, domains, entries, ip, filter_name=None):
     """
     if filter_name:
         filter_name = "<filter-name>" + filter_name + "</filter-name>"
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
   <requested-node xmlns="urn:opendaylight:sxp:controller">$ip</requested-node>
   <domain-name xmlns="urn:opendaylight:sxp:controller">$domain</domain-name>
   <sxp-domain-filter xmlns="urn:opendaylight:sxp:controller">
@@ -728,9 +811,17 @@ def add_domain_filter_xml(domain, domains, entries, ip, filter_name=None):
     <domains>$domains</domains>
     $entries
   </sxp-domain-filter>
-</input>''')
+</input>"""
+    )
     data = templ.substitute(
-        {'domain': domain, 'domains': domains, 'ip': ip, 'entries': entries, 'filter_name': filter_name})
+        {
+            "domain": domain,
+            "domains": domains,
+            "ip": ip,
+            "entries": entries,
+            "filter_name": filter_name,
+        }
+    )
     return data
 
 
@@ -746,13 +837,14 @@ def delete_filter_xml(group, filter_type, ip):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
   <requested-node xmlns="urn:opendaylight:sxp:controller">$ip</requested-node>
   <peer-group-name xmlns="urn:opendaylight:sxp:controller">$group</peer-group-name>
   <filter-type xmlns="urn:opendaylight:sxp:controller">$filter_type</filter-type>
-</input>''')
-    data = templ.substitute(
-        {'group': group, 'filter_type': filter_type, 'ip': ip})
+</input>"""
+    )
+    data = templ.substitute({"group": group, "filter_type": filter_type, "ip": ip})
     return data
 
 
@@ -769,14 +861,19 @@ def delete_domain_filter_xml(domain, ip, filter_name=None):
 
     """
     if filter_name:
-        filter_name = '<filter-name xmlns="urn:opendaylight:sxp:controller">' + filter_name + "</filter-name>"
-    templ = Template('''<input>
+        filter_name = (
+            '<filter-name xmlns="urn:opendaylight:sxp:controller">'
+            + filter_name
+            + "</filter-name>"
+        )
+    templ = Template(
+        """<input>
   <requested-node xmlns="urn:opendaylight:sxp:controller">$ip</requested-node>
   <domain-name xmlns="urn:opendaylight:sxp:controller">$domain</domain-name>
   $filter_name
-</input>''')
-    data = templ.substitute(
-        {'domain': domain, 'ip': ip, 'filter_name': filter_name})
+</input>"""
+    )
+    data = templ.substitute({"domain": domain, "ip": ip, "filter_name": filter_name})
     return data
 
 
@@ -790,11 +887,13 @@ def get_connections_from_node_xml(ip, domain_name):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
    <requested-node xmlns="urn:opendaylight:sxp:controller">$ip</requested-node>
    $domain
-</input>''')
-    data = templ.substitute({'ip': ip, 'domain': get_domain_name(domain_name)})
+</input>"""
+    )
+    data = templ.substitute({"ip": ip, "domain": get_domain_name(domain_name)})
     return data
 
 
@@ -810,17 +909,30 @@ def get_bindings_from_node_xml(ip, binding_range, domain_name):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input>
+    templ = Template(
+        """<input>
   <requested-node xmlns="urn:opendaylight:sxp:controller">$ip</requested-node>
   <bindings-range xmlns="urn:opendaylight:sxp:controller">$range</bindings-range>
   $domain
-</input>''')
-    data = templ.substitute({'ip': ip, 'range': binding_range, 'domain': get_domain_name(domain_name)})
+</input>"""
+    )
+    data = templ.substitute(
+        {"ip": ip, "range": binding_range, "domain": get_domain_name(domain_name)}
+    )
     return data
 
 
-def add_node_xml(node_id, port, password, version, node_ip=None, expansion=0, bindings_timeout=0, keystores=None,
-                 retry_open_timer=1):
+def add_node_xml(
+    node_id,
+    port,
+    password,
+    version,
+    node_ip=None,
+    expansion=0,
+    bindings_timeout=0,
+    keystores=None,
+    retry_open_timer=1,
+):
     """Generate xml for Add Node request
 
     :param node_id: Ipv4 address formatted node id
@@ -842,9 +954,10 @@ def add_node_xml(node_id, port, password, version, node_ip=None, expansion=0, bi
     :returns: String containing xml data for request
 
     """
-    tls = ''
+    tls = ""
     if keystores:
-        tls = Template('''
+        tls = Template(
+            """
         <tls>
             <keystore>
               <location>$keystore</location>
@@ -860,10 +973,17 @@ def add_node_xml(node_id, port, password, version, node_ip=None, expansion=0, bi
             </truststore>
             <certificate-password>$passwd</certificate-password>
         </tls>
-    ''').substitute(
-            {'keystore': keystores['keystore'], 'truststore': keystores['truststore'], 'passwd': keystores['password']})
-
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:controller">
+    """
+        ).substitute(
+            {
+                "keystore": keystores["keystore"],
+                "truststore": keystores["truststore"],
+                "passwd": keystores["password"],
+            }
+        )
+
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:controller">
     <node-id>$id</node-id>
     <timers>
         <retry-open-time>$retry_open_timer</retry-open-time>
@@ -884,11 +1004,21 @@ def add_node_xml(node_id, port, password, version, node_ip=None, expansion=0, bi
     <version>$version</version>
     <description>ODL SXP Controller</description>
     <source-ip>$ip</source-ip>
-</input>''')
+</input>"""
+    )
     data = templ.substitute(
-        {'ip': node_ip or node_id, 'id': node_id, 'port': port, 'password': password,
-         'version': version, 'expansion': expansion, 'timeout': bindings_timeout, 'tls': tls,
-         'retry_open_timer': retry_open_timer})
+        {
+            "ip": node_ip or node_id,
+            "id": node_id,
+            "port": port,
+            "password": password,
+            "version": version,
+            "expansion": expansion,
+            "timeout": bindings_timeout,
+            "tls": tls,
+            "retry_open_timer": retry_open_timer,
+        }
+    )
     return data
 
 
@@ -900,10 +1030,12 @@ def delete_node_xml(node_id):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:controller">
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:controller">
   <node-id>$id</node-id>
-</input>''')
-    data = templ.substitute({'id': node_id})
+</input>"""
+    )
+    data = templ.substitute({"id": node_id})
     return data
 
 
@@ -923,28 +1055,39 @@ def add_domain_xml_fluorine(node_id, name, sgt, prefixes, origin):
     :returns: String containing xml data for request
 
     """
-    master_database = ''
-    if prefixes != 'None':
-        xml_prefixes = ''
-        for prefix in prefixes.split(','):
-            xml_prefixes += '\n' + '<ip-prefix>' + prefix + '</ip-prefix>'
+    master_database = ""
+    if prefixes != "None":
+        xml_prefixes = ""
+        for prefix in prefixes.split(","):
+            xml_prefixes += "\n" + "<ip-prefix>" + prefix + "</ip-prefix>"
         if xml_prefixes:
-            master_database += '''<master-database>
+            master_database += """<master-database>
             <binding>
                 <sgt>$sgt</sgt>
                 $xml_prefixes
             </binding>
-        </master-database>'''
-            master_database = Template(master_database).substitute(({'sgt': sgt, 'xml_prefixes': xml_prefixes}))
+        </master-database>"""
+            master_database = Template(master_database).substitute(
+                ({"sgt": sgt, "xml_prefixes": xml_prefixes})
+            )
 
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:controller">
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:controller">
     <node-id>$id</node-id>
     <domain-name>$name</domain-name>
     <origin>$origin</origin>
     $master_database
-</input>''')
+</input>"""
+    )
 
-    data = templ.substitute({'name': name, 'id': node_id, 'origin': origin, 'master_database': master_database})
+    data = templ.substitute(
+        {
+            "name": name,
+            "id": node_id,
+            "origin": origin,
+            "master_database": master_database,
+        }
+    )
     return data
 
 
@@ -962,27 +1105,33 @@ def add_domain_xml_oxygen(node_id, name, sgt, prefixes):
     :returns: String containing xml data for request
 
     """
-    master_database = ''
-    if prefixes != 'None':
-        xml_prefixes = ''
-        for prefix in prefixes.split(','):
-            xml_prefixes += '\n' + '<ip-prefix>' + prefix + '</ip-prefix>'
+    master_database = ""
+    if prefixes != "None":
+        xml_prefixes = ""
+        for prefix in prefixes.split(","):
+            xml_prefixes += "\n" + "<ip-prefix>" + prefix + "</ip-prefix>"
         if xml_prefixes:
-            master_database += '''<master-database>
+            master_database += """<master-database>
             <binding>
                 <sgt>$sgt</sgt>
                 $xml_prefixes
             </binding>
-        </master-database>'''
-            master_database = Template(master_database).substitute(({'sgt': sgt, 'xml_prefixes': xml_prefixes}))
+        </master-database>"""
+            master_database = Template(master_database).substitute(
+                ({"sgt": sgt, "xml_prefixes": xml_prefixes})
+            )
 
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:controller">
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:controller">
     <node-id>$id</node-id>
     <domain-name>$name</domain-name>
     $master_database
-</input>''')
+</input>"""
+    )
 
-    data = templ.substitute({'name': name, 'id': node_id, 'master_database': master_database})
+    data = templ.substitute(
+        {"name": name, "id": node_id, "master_database": master_database}
+    )
     return data
 
 
@@ -996,12 +1145,14 @@ def delete_domain_xml(node_id, name):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:controller">
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:controller">
     <node-id>$node_id</node-id>
     <domain-name>$name</domain-name>
-</input>''')
+</input>"""
+    )
 
-    data = templ.substitute({'node_id': node_id, 'name': name})
+    data = templ.substitute({"node_id": node_id, "name": name})
     return data
 
 
@@ -1013,10 +1164,14 @@ def get_domain_name(domain_name):
     :returns: String containing xml data for request
 
     """
-    if domain_name == 'global':
-        return ''
+    if domain_name == "global":
+        return ""
     else:
-        return '<domain-name xmlns="urn:opendaylight:sxp:controller">' + domain_name + '</domain-name>'
+        return (
+            '<domain-name xmlns="urn:opendaylight:sxp:controller">'
+            + domain_name
+            + "</domain-name>"
+        )
 
 
 def add_bindings_xml_fluorine(node_id, domain, sgt, prefixes, origin):
@@ -1035,10 +1190,11 @@ def add_bindings_xml_fluorine(node_id, domain, sgt, prefixes, origin):
     :returns: String containing xml data for request
 
     """
-    xml_prefixes = ''
-    for prefix in prefixes.split(','):
-        xml_prefixes += '\n' + '<ip-prefix>' + prefix + '</ip-prefix>'
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:controller">
+    xml_prefixes = ""
+    for prefix in prefixes.split(","):
+        xml_prefixes += "\n" + "<ip-prefix>" + prefix + "</ip-prefix>"
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:controller">
     <node-id>$id</node-id>
     <domain-name>$name</domain-name>
     <origin>$origin</origin>
@@ -1048,8 +1204,17 @@ def add_bindings_xml_fluorine(node_id, domain, sgt, prefixes, origin):
             $xml_prefixes
         </binding>
     </master-database>
-</input>''')
-    data = templ.substitute({'name': domain, 'id': node_id, 'sgt': sgt, 'xml_prefixes': xml_prefixes, 'origin': origin})
+</input>"""
+    )
+    data = templ.substitute(
+        {
+            "name": domain,
+            "id": node_id,
+            "sgt": sgt,
+            "xml_prefixes": xml_prefixes,
+            "origin": origin,
+        }
+    )
     return data
 
 
@@ -1067,18 +1232,22 @@ def add_bindings_xml_oxygen(node_id, domain, sgt, prefixes):
     :returns: String containing xml data for request
 
     """
-    xml_prefixes = ''
-    for prefix in prefixes.split(','):
-        xml_prefixes += '\n' + '<ip-prefix>' + prefix + '</ip-prefix>'
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:controller">
+    xml_prefixes = ""
+    for prefix in prefixes.split(","):
+        xml_prefixes += "\n" + "<ip-prefix>" + prefix + "</ip-prefix>"
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:controller">
     <node-id>$id</node-id>
     <domain-name>$name</domain-name>
         <binding>
             <sgt>$sgt</sgt>
             $xml_prefixes
         </binding>
-</input>''')
-    data = templ.substitute({'name': domain, 'id': node_id, 'sgt': sgt, 'xml_prefixes': xml_prefixes})
+</input>"""
+    )
+    data = templ.substitute(
+        {"name": domain, "id": node_id, "sgt": sgt, "xml_prefixes": xml_prefixes}
+    )
     return data
 
 
@@ -1096,18 +1265,22 @@ def delete_bindings_xml(node_id, domain, sgt, prefixes):
     :returns: String containing xml data for request
 
     """
-    xml_prefixes = ''
-    for prefix in prefixes.split(','):
-        xml_prefixes += '\n' + '<ip-prefix>' + prefix + '</ip-prefix>'
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:controller">
+    xml_prefixes = ""
+    for prefix in prefixes.split(","):
+        xml_prefixes += "\n" + "<ip-prefix>" + prefix + "</ip-prefix>"
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:controller">
     <node-id>$id</node-id>
     <domain-name>$name</domain-name>
     <binding>
         <sgt>$sgt</sgt>
         $xml_prefixes
     </binding>
-</input>''')
-    data = templ.substitute({'name': domain, 'id': node_id, 'sgt': sgt, 'xml_prefixes': xml_prefixes})
+</input>"""
+    )
+    data = templ.substitute(
+        {"name": domain, "id": node_id, "sgt": sgt, "xml_prefixes": xml_prefixes}
+    )
     return data
 
 
@@ -1124,12 +1297,12 @@ def prefix_range(start, end):
     start = int(start)
     end = int(end)
     index = 0
-    prefixes = ''
+    prefixes = ""
     while index < end:
-        prefixes += get_ip_from_number(index + start) + '/32'
+        prefixes += get_ip_from_number(index + start) + "/32"
         index += 1
         if index < end:
-            prefixes += ','
+            prefixes += ","
     return prefixes
 
 
@@ -1145,14 +1318,18 @@ def route_definition_xml(virtual_ip, net_mask, interface):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''
+    templ = Template(
+        """
     <routing-definition>
         <ip-address>$vip</ip-address>
         <interface>$interface</interface>
         <netmask>$mask</netmask>
     </routing-definition>
-    ''')
-    data = templ.substitute({'mask': net_mask, 'vip': virtual_ip, 'interface': interface})
+    """
+    )
+    data = templ.substitute(
+        {"mask": net_mask, "vip": virtual_ip, "interface": interface}
+    )
     return data
 
 
@@ -1167,13 +1344,17 @@ def route_definitions_xml(routes, old_routes=None):
 
     """
     if old_routes and "</sxp-cluster-route>" in old_routes:
-        templ = Template(old_routes.replace("</sxp-cluster-route>", "$routes</sxp-cluster-route>"))
+        templ = Template(
+            old_routes.replace("</sxp-cluster-route>", "$routes</sxp-cluster-route>")
+        )
     else:
-        templ = Template('''<sxp-cluster-route xmlns="urn:opendaylight:sxp:cluster:route">
+        templ = Template(
+            """<sxp-cluster-route xmlns="urn:opendaylight:sxp:cluster:route">
     $routes
 </sxp-cluster-route>
-    ''')
-    data = templ.substitute({'routes': routes})
+    """
+        )
+    data = templ.substitute({"routes": routes})
     return data
 
 
@@ -1187,11 +1368,13 @@ def add_binding_origin_xml(origin, priority):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:config:controller">
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:config:controller">
     <origin>$origin</origin>
     <priority>$priority</priority>
-</input>''')
-    data = templ.substitute({'origin': origin, 'priority': priority})
+</input>"""
+    )
+    data = templ.substitute({"origin": origin, "priority": priority})
     return data
 
 
@@ -1205,11 +1388,13 @@ def update_binding_origin_xml(origin, priority):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:config:controller">
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:config:controller">
     <origin>$origin</origin>
     <priority>$priority</priority>
-</input>''')
-    data = templ.substitute({'origin': origin, 'priority': priority})
+</input>"""
+    )
+    data = templ.substitute({"origin": origin, "priority": priority})
     return data
 
 
@@ -1221,10 +1406,12 @@ def delete_binding_origin_xml(origin):
     :returns: String containing xml data for request
 
     """
-    templ = Template('''<input xmlns="urn:opendaylight:sxp:config:controller">
+    templ = Template(
+        """<input xmlns="urn:opendaylight:sxp:config:controller">
     <origin>$origin</origin>
-</input>''')
-    data = templ.substitute({'origin': origin})
+</input>"""
+    )
+    data = templ.substitute({"origin": origin})
     return data
 
 
@@ -1239,7 +1426,7 @@ def find_binding_origin(origins_json, origin):
 
     """
     for json_origin in parse_binding_origins(origins_json):
-        if json_origin['origin'] == origin:
+        if json_origin["origin"] == origin:
             return True
     return False
 
@@ -1257,8 +1444,8 @@ def find_binding_origin_with_priority(origins_json, origin, priority):
 
     """
     for json_origin in parse_binding_origins(origins_json):
-        if json_origin['origin'] == origin:
-            if json_origin['priority'] == int(priority):
+        if json_origin["origin"] == origin:
+            if json_origin["priority"] == int(priority):
                 return True
     return False
 
@@ -1272,7 +1459,7 @@ def parse_binding_origins(origins_json):
 
     """
     output = []
-    for origins in origins_json['binding-origins'].values():
+    for origins in origins_json["binding-origins"].values():
         for origin in origins:
             output.append(origin)
     return output
index b96e48e364235f5120c1ca66738db92bf6d3f859..ce26581decc45889ad802f1a7126e442e06c4499 100644 (file)
@@ -7,24 +7,27 @@ from robot.libraries.BuiltIn import BuiltIn
 
 
 class Topology(object):
-    '''
+    """
     Topology class provide topology database and provide many method to get property of topology.
-    '''
+    """
+
     topo_nodes_db = [
         [],
-        [{u'type': u'OF', u'id': u'00:00:00:00:00:00:00:01'}],
-        [{u'type': u'OF', u'id': u'00:00:00:00:00:00:00:01'},
-         {u'type': u'OF', u'id': u'00:00:00:00:00:00:00:02'},
-         {u'type': u'OF', u'id': u'00:00:00:00:00:00:00:03'}]
+        [{u"type": u"OF", u"id": u"00:00:00:00:00:00:00:01"}],
+        [
+            {u"type": u"OF", u"id": u"00:00:00:00:00:00:00:01"},
+            {u"type": u"OF", u"id": u"00:00:00:00:00:00:00:02"},
+            {u"type": u"OF", u"id": u"00:00:00:00:00:00:00:03"},
+        ],
     ]
 
     def __init__(self):
         self.builtin = BuiltIn()
 
     def get_nodes_from_topology(self, topo_level):
-        '''
+        """
         get nodes from topology database by topology tree level
-        '''
+        """
         if isinstance(topo_level, str) or isinstance(topo_level, unicode):
             if topo_level.isdigit():
                 topo_level = int(topo_level)
@@ -41,7 +44,7 @@ class Topology(object):
             return None
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     topology = Topology()
     print(topology.get_nodes_from_topology(2))
-    print(topology.get_nodes_from_topology('2'))
+    print(topology.get_nodes_from_topology("2"))
index a62c429023550146daa2eed6a76b5f72955dd0ad..f8c984112dd1fe2748344ca39dca5bc2d35e82e7 100644 (file)
@@ -9,26 +9,29 @@ import Common
 
 
 class Topologynew(object):
-    '''
+    """
     Topology class provide topology database and provide many method to get property of topology.
 
     node_boilerplate = {u'type': u'MD_SAL', u'id': u'openflow:%d'}
-    '''
+    """
+
     topo_nodes_db = [
         [],
-        [{u'type': u'MD_SAL', u'id': u'openflow:1'}],
-        [{u'type': u'MD_SAL', u'id': u'openflow:1'},
-         {u'type': u'MD_SAL', u'id': u'openflow:2'},
-         {u'type': u'MD_SAL', u'id': u'openflow:3'}]
+        [{u"type": u"MD_SAL", u"id": u"openflow:1"}],
+        [
+            {u"type": u"MD_SAL", u"id": u"openflow:1"},
+            {u"type": u"MD_SAL", u"id": u"openflow:2"},
+            {u"type": u"MD_SAL", u"id": u"openflow:3"},
+        ],
     ]
 
     def __init__(self):
         self.builtin = BuiltIn()
 
     def get_nodes_from_topology(self, topo_level):
-        '''
+        """
         get nodes from topology database by topology tree level
-        '''
+        """
         if isinstance(topo_level, str) or isinstance(topo_level, unicode):
             if topo_level.isdigit():
                 topo_level = int(topo_level)
@@ -45,14 +48,14 @@ class Topologynew(object):
             return None
 
     def get_nodes_from_tree_topo(self, topo, exceptroot="0"):
-        '''
+        """
         This function generates a dictionary that contains type and id of each node.
         It follows tree level topology.
         @parameter topo: either an interer (in this case, depth is set and fanout will be 2)
                          or a string in format of "(a,b)"   (a and b are integers and they
                          stands for depth and fanout respectively)
         @return array of dicitonary objects that contains info about each node
-        '''
+        """
         depth = 0
         fanout = 2
         if isinstance(topo, str) or isinstance(topo, unicode):
@@ -63,43 +66,47 @@ class Topologynew(object):
                 depth = t[0]
                 fanout = t[1]
             else:
-                return None                 # topology consists of two parameters: depth and fanout
+                return None  # topology consists of two parameters: depth and fanout
         elif isinstance(topo, int):
             depth = topo
         else:
-            return None                     # topo parameter is not given in a desired way
+            return None  # topo parameter is not given in a desired way
 
         num_nodes = Common.num_of_nodes(depth, fanout)
         nodelist = []
         for i in xrange(1, num_nodes + 1):
-            temp = {"id": "00:00:00:00:00:00:00:%s" % format(i, '02x'), "type": "OF"}
+            temp = {"id": "00:00:00:00:00:00:00:%s" % format(i, "02x"), "type": "OF"}
             nodelist.append(temp)
         if int(exceptroot):
             del nodelist[0]
         return nodelist
 
     def get_ids_of_leaf_nodes(self, fanout, depth):
-        '''
+        """
         For a tree structure, it numerates leaf nodes
         by following depth-first strategy
         @parameter  fanout: fanout of tree
         @parameter  depth:  total depth of a tree
         @return     leafnodes:  list of ids of leaf nodes
-        '''
+        """
         leafnodes = []
         self._enumerate_nodes(0, 1, 1, fanout, depth - 1, leafnodes)
         return leafnodes
 
-    def _enumerate_nodes(self, currentdepth, nodeid, currentbranch, fanout, depth, leafnodes):
+    def _enumerate_nodes(
+        self, currentdepth, nodeid, currentbranch, fanout, depth, leafnodes
+    ):
         if currentdepth == depth:
-            leafnodes.append("00:00:00:00:00:00:00:%s" % format(nodeid, '02x'))
+            leafnodes.append("00:00:00:00:00:00:00:%s" % format(nodeid, "02x"))
             return 1
         nodes = 1
         for i in xrange(1, fanout + 1):
-            nodes += self._enumerate_nodes(currentdepth + 1, nodeid + nodes, i, fanout, depth, leafnodes)
+            nodes += self._enumerate_nodes(
+                currentdepth + 1, nodeid + nodes, i, fanout, depth, leafnodes
+            )
         return nodes
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     topologynew = Topologynew()
-    print(topologynew.get_nodes_from_tree_topo('(2,3)'))
+    print(topologynew.get_nodes_from_tree_topo("(2,3)"))
index c342386f0aa85407410cc7d12333d15156928a58..11eb79733a361631a01bbb7e2428e3120ea2df60 100644 (file)
@@ -17,14 +17,14 @@ __email__ = "syedbahm@cisco.com"
 global _cache
 
 
-def get(url, userId='admin', password='admin'):
+def get(url, userId="admin", password="admin"):
     """Helps in making GET REST calls"""
     warnings.warn(
         "Use the Robot RequestsLibrary rather than this. See DatastoreCRUD.robot for examples",
-        DeprecationWarning
+        DeprecationWarning,
     )
     headers = {}
-    headers['Accept'] = 'application/xml'
+    headers["Accept"] = "application/xml"
 
     # Send the GET request
     session = _cache.switch("CLUSTERING_GET")
@@ -38,21 +38,23 @@ def nonprintpost(url, userId, password, data):
     """Helps in making POST REST calls without outputs"""
     warnings.warn(
         "Use the Robot RequestsLibrary rather than this. See DatastoreCRUD.robot for examples",
-        DeprecationWarning
+        DeprecationWarning,
     )
 
     if userId is None:
-        userId = 'admin'
+        userId = "admin"
 
     if password is None:
-        password = 'admin'
+        password = "admin"
 
     headers = {}
-    headers['Content-Type'] = 'application/json'
+    headers["Content-Type"] = "application/json"
     # headers['Accept']= 'application/xml'
 
     session = _cache.switch("CLUSTERING_POST")
-    resp = session.post(url, data.encode('utf-8'), headers=headers, auth=(userId, password))
+    resp = session.post(
+        url, data.encode("utf-8"), headers=headers, auth=(userId, password)
+    )
 
     return resp
 
@@ -61,22 +63,24 @@ def post(url, userId, password, data):
     """Helps in making POST REST calls"""
     warnings.warn(
         "Use the Robot RequestsLibrary rather than this. See DatastoreCRUD.robot for examples",
-        DeprecationWarning
+        DeprecationWarning,
     )
 
     if userId is None:
-        userId = 'admin'
+        userId = "admin"
 
     if password is None:
-        password = 'admin'
+        password = "admin"
 
     print("post request with url " + url)
     print("post request with data " + data)
     headers = {}
-    headers['Content-Type'] = 'application/json'
+    headers["Content-Type"] = "application/json"
     # headers['Accept'] = 'application/xml'
     session = _cache.switch("CLUSTERING_POST")
-    resp = session.post(url, data.encode('utf-8'), headers=headers, auth=(userId, password))
+    resp = session.post(
+        url, data.encode("utf-8"), headers=headers, auth=(userId, password)
+    )
 
     # print(resp.raise_for_status())
     print(resp.headers)
@@ -86,11 +90,11 @@ def post(url, userId, password, data):
     return resp
 
 
-def delete(url, userId='admin', password='admin'):
+def delete(url, userId="admin", password="admin"):
     """Helps in making DELET REST calls"""
     warnings.warn(
         "Use the Robot RequestsLibrary rather than this. See DatastoreCRUD.robot for examples",
-        DeprecationWarning
+        DeprecationWarning,
     )
     print("delete all resources belonging to url" + url)
     session = _cache.switch("CLUSTERING_DELETE")
@@ -98,12 +102,12 @@ def delete(url, userId='admin', password='admin'):
 
 
 def Should_Not_Be_Type_None(var):
-    '''Keyword to check if the given variable is of type NoneType.  If the
+    """Keyword to check if the given variable is of type NoneType.  If the
         variable type does match  raise an assertion so the keyword will fail
-    '''
+    """
     if var is None:
-        raise AssertionError('the variable passed was type NoneType')
-    return 'PASS'
+        raise AssertionError("the variable passed was type NoneType")
+    return "PASS"
 
 
 def execute_ssh_command(ip, username, password, command):
@@ -124,8 +128,13 @@ def execute_ssh_command(ip, username, password, command):
 
 
 def wait_for_controller_up(ip, port="8181"):
-    url = "http://" + ip + ":" + str(port) + \
-          "/restconf/config/opendaylight-inventory:nodes/node/controller-config/yang-ext:mount/config:modules"
+    url = (
+        "http://"
+        + ip
+        + ":"
+        + str(port)
+        + "/restconf/config/opendaylight-inventory:nodes/node/controller-config/yang-ext:mount/config:modules"
+    )
 
     print("Waiting for controller " + ip + " up.")
     # Try 30*10s=5 minutes for the controller to be up.
@@ -135,7 +144,7 @@ def wait_for_controller_up(ip, port="8181"):
             resp = get(url, "admin", "admin")
             print("attempt %s response is %s" % (str(i), str(resp)))
             print(resp.text)
-            if ('clustering-it-provider' in resp.text):
+            if "clustering-it-provider" in resp.text:
                 print("Wait for controller " + ip + " succeeded")
                 return True
         except Exception as e:
@@ -192,9 +201,9 @@ def wait_for_controller_stopped(ip, username, password, karafHome):
     i = 1
     while i <= tries:
         stdout = lib.execute_command("ps -axf | grep karaf | grep -v grep | wc -l")
-        processCnt = stdout[0].strip('\n')
+        processCnt = stdout[0].strip("\n")
         print("processCnt: " + processCnt)
-        if processCnt == '0':
+        if processCnt == "0":
             break
         i = i + 1
         time.sleep(3)
@@ -211,8 +220,12 @@ def clean_journal(ip, username, password, karafHome):
 
 
 def kill_controller(ip, username, password, karafHome):
-    execute_ssh_command(ip, username, password,
-                        "ps axf | grep karaf | grep -v grep | awk '{print \"kill -9 \" $1}' | sh")
+    execute_ssh_command(
+        ip,
+        username,
+        password,
+        "ps axf | grep karaf | grep -v grep | awk '{print \"kill -9 \" $1}' | sh",
+    )
 
 
 def isolate_controller(controllers, username, password, isolated):
@@ -227,20 +240,38 @@ def isolate_controller(controllers, username, password, isolated):
     isolated_controller = controllers[isolated - 1]
     for controller in controllers:
         if controller != isolated_controller:
-            base_str = 'sudo iptables -I OUTPUT -p all --source '
-            cmd_str = base_str + isolated_controller + ' --destination ' + controller + ' -j DROP'
+            base_str = "sudo iptables -I OUTPUT -p all --source "
+            cmd_str = (
+                base_str
+                + isolated_controller
+                + " --destination "
+                + controller
+                + " -j DROP"
+            )
             execute_ssh_command(isolated_controller, username, password, cmd_str)
-            cmd_str = base_str + controller + ' --destination ' + isolated_controller + ' -j DROP'
+            cmd_str = (
+                base_str
+                + controller
+                + " --destination "
+                + isolated_controller
+                + " -j DROP"
+            )
             execute_ssh_command(isolated_controller, username, password, cmd_str)
-    ip_tables = execute_ssh_command(isolated_controller, username, password, 'sudo iptables -L')
+    ip_tables = execute_ssh_command(
+        isolated_controller, username, password, "sudo iptables -L"
+    )
     print(ip_tables)
-    iso_result = 'pass'
+    iso_result = "pass"
     for controller in controllers:
-        controller_regex_string = "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*"
+        controller_regex_string = (
+            "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*"
+        )
         controller_regex = re.compile(controller_regex_string)
         if not controller_regex.match(ip_tables):
             iso_result = ip_tables
-        controller_regex_string = "[\s\S]*" + controller + " *" + isolated_controller + "[\s\S]*"
+        controller_regex_string = (
+            "[\s\S]*" + controller + " *" + isolated_controller + "[\s\S]*"
+        )
         controller_regex = re.compile(controller_regex_string)
         if not controller_regex.match(ip_tables):
             iso_result = ip_tables
@@ -259,20 +290,38 @@ def rejoin_controller(controllers, username, password, isolated):
     isolated_controller = controllers[isolated - 1]
     for controller in controllers:
         if controller != isolated_controller:
-            base_str = 'sudo iptables -D OUTPUT -p all --source '
-            cmd_str = base_str + isolated_controller + ' --destination ' + controller + ' -j DROP'
+            base_str = "sudo iptables -D OUTPUT -p all --source "
+            cmd_str = (
+                base_str
+                + isolated_controller
+                + " --destination "
+                + controller
+                + " -j DROP"
+            )
             execute_ssh_command(isolated_controller, username, password, cmd_str)
-            cmd_str = base_str + controller + ' --destination ' + isolated_controller + ' -j DROP'
+            cmd_str = (
+                base_str
+                + controller
+                + " --destination "
+                + isolated_controller
+                + " -j DROP"
+            )
             execute_ssh_command(isolated_controller, username, password, cmd_str)
-    ip_tables = execute_ssh_command(isolated_controller, username, password, 'sudo iptables -L')
+    ip_tables = execute_ssh_command(
+        isolated_controller, username, password, "sudo iptables -L"
+    )
     print(ip_tables)
-    iso_result = 'pass'
+    iso_result = "pass"
     for controller in controllers:
-        controller_regex_string = "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*"
+        controller_regex_string = (
+            "[\s\S]*" + isolated_controller + " *" + controller + "[\s\S]*"
+        )
         controller_regex = re.compile(controller_regex_string)
         if controller_regex.match(ip_tables):
             iso_result = ip_tables
-        controller_regex_string = "[\s\S]*" + controller + " *" + isolated_controller + "[\s\S]*"
+        controller_regex_string = (
+            "[\s\S]*" + controller + " *" + isolated_controller + "[\s\S]*"
+        )
         controller_regex = re.compile(controller_regex_string)
         if controller_regex.match(ip_tables):
             iso_result = ip_tables
@@ -287,10 +336,10 @@ def flush_iptables(controllers, username, password):
     :param password: Password for all controllers.
     :return: If successful, returns "pass", otherwise returns "fail".
     """
-    flush_result = 'pass'
+    flush_result = "pass"
     for controller in controllers:
-        print('Flushing ', controller)
-        cmd_str = 'sudo iptables -v -F'
+        print("Flushing ", controller)
+        cmd_str = "sudo iptables -v -F"
         cmd_result = execute_ssh_command(controller, username, password, cmd_str)
         print(cmd_result)
         success_string = "Flushing chain `INPUT'" + "\n"
@@ -305,45 +354,47 @@ def flush_iptables(controllers, username, password):
 
 
 def build_elastic_search_JSON_request(query_String):
-    data = {'from': '0',
-            'size': '1',
-            'sort': [{'TimeStamp': {'order': 'desc'}}],
-            'query': {'query_string': {'query': query_String}}}
+    data = {
+        "from": "0",
+        "size": "1",
+        "sort": [{"TimeStamp": {"order": "desc"}}],
+        "query": {"query_string": {"query": query_String}},
+    }
     return json.dumps(data)
 
 
 def create_query_string_search(data_category, metric_name, node_id, rk_node_id):
-    query = 'TSDRDataCategory:'
+    query = "TSDRDataCategory:"
     query += data_category
-    query += ' AND MetricName:'
+    query += " AND MetricName:"
     query += metric_name
-    query += ' AND NodeID:\"'
+    query += ' AND NodeID:"'
     query += node_id
-    query += '\" AND RecordKeys.KeyValue:\"'
+    query += '" AND RecordKeys.KeyValue:"'
     query += rk_node_id
-    query += '\" AND RecordKeys.KeyName:Node AND RecordKeys.KeyValue:0 AND RecordKeys.KeyName:Table'
+    query += '" AND RecordKeys.KeyName:Node AND RecordKeys.KeyValue:0 AND RecordKeys.KeyName:Table'
     return query
 
 
 def create_query_string_count(data_category):
-    query = 'TSDRDataCategory:'
+    query = "TSDRDataCategory:"
     query += data_category
     return query
 
 
 def extract_metric_value_search(response):
-    return str(response['hits']['hits'][0]['_source']['MetricValue'])
+    return str(response["hits"]["hits"][0]["_source"]["MetricValue"])
 
 
 def extract_metric_value_count(response):
-    return int(response['hits']['total'])
+    return int(response["hits"]["total"])
 
 
 #
 # main invoked
 if __name__ != "__main__":
-    _cache = robot.utils.ConnectionCache('No sessions created')
+    _cache = robot.utils.ConnectionCache("No sessions created")
     # here create one session for each HTTP functions
-    _cache.register(requests.session(), alias='CLUSTERING_GET')
-    _cache.register(requests.session(), alias='CLUSTERING_POST')
-    _cache.register(requests.session(), alias='CLUSTERING_DELETE')
+    _cache.register(requests.session(), alias="CLUSTERING_GET")
+    _cache.register(requests.session(), alias="CLUSTERING_POST")
+    _cache.register(requests.session(), alias="CLUSTERING_DELETE")
index d5678c45164978c6395e6157485332e33802b686..509eed6d2c62ecb997dec713039d396450b90dc9 100644 (file)
@@ -18,15 +18,15 @@ def _parse_stdout(stdout):
     """ Transforms stdout to dict """
     text = stdout.replace(" ", "")
     text = text.replace("\r", "")
-    pat = re.compile(r'(?P<key>\w+):(?P<value>.+)')
+    pat = re.compile(r"(?P<key>\w+):(?P<value>.+)")
     regroups = re.finditer(pat, text)
     outdict = {}
     for g in regroups:
         print((g.group()))
-        if g.group('key') == '_uuid':
-            cntl_uuid = g.group('value')
+        if g.group("key") == "_uuid":
+            cntl_uuid = g.group("value")
             outdict[cntl_uuid] = {}
-        outdict[cntl_uuid][g.group('key')] = g.group('value')
+        outdict[cntl_uuid][g.group("key")] = g.group("value")
     return outdict
 
 
@@ -41,36 +41,44 @@ def _postprocess_data(bridges, controllers):
 
     # replacing string value of is_connected key with boolean
     for key, cntl in cntls.items():
-        if cntl['is_connected'] == 'false':
-            cntl['is_connected'] = False
-        elif cntl['is_connected'] == 'true':
-            cntl['is_connected'] = True
+        if cntl["is_connected"] == "false":
+            cntl["is_connected"] = False
+        elif cntl["is_connected"] == "true":
+            cntl["is_connected"] = True
         else:
-            cntl['is_connected'] = None
+            cntl["is_connected"] = None
 
     # replacing keys with the same values
     for key, value in bridges.items():
-        brs[value['name'][1:-1]] = brs[key]
+        brs[value["name"][1:-1]] = brs[key]
         del brs[key]
 
     for key, value in brs.items():
         # replace string with references with dict of controllers
-        ctl_refs = value['controller'][1:-1].split(',')
-        value['controller'] = {}
+        ctl_refs = value["controller"][1:-1].split(",")
+        value["controller"] = {}
         for ctl_ref in ctl_refs:
-            if ctl_ref is not '':
-                value['controller'][ctl_ref] = cntls[ctl_ref]
+            if ctl_ref is not "":
+                value["controller"][ctl_ref] = cntls[ctl_ref]
 
     for brkey, bridge in brs.items():
         new_cntls = {}
-        for cnkey, cntl in bridge['controller'].items():
+        for cnkey, cntl in bridge["controller"].items():
             # port 6654 is set by OvsMAnager.robot to disconnect from controller
-            if '6653' in cntl['target'] or '6633' in cntl['target'] or '6654' in cntl['target']:
-                new_key = cntl['target'].split(":")[1]     # getting middle from "tcp:ip:6653"
+            if (
+                "6653" in cntl["target"]
+                or "6633" in cntl["target"]
+                or "6654" in cntl["target"]
+            ):
+                new_key = cntl["target"].split(":")[
+                    1
+                ]  # getting middle from "tcp:ip:6653"
             else:
-                new_key = cntl['target'][1:-1]  # getting string without quotes "ptcp:6638"
+                new_key = cntl["target"][
+                    1:-1
+                ]  # getting string without quotes "ptcp:6638"
             new_cntls[new_key] = cntl
-        bridge['controller'] = new_cntls
+        bridge["controller"] = new_cntls
 
     return brs
 
index b8adc3e0a96cbf2c3b9bd418fc43232990369a40..6d05e7e9e21aae8272815de4dc090c0dab5b3cea 100644 (file)
@@ -1,4 +1,4 @@
-'''
+"""
 Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
 
 This program and the accompanying materials are made available under the
@@ -8,17 +8,16 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
 Created on May 21, 2014
 
 @author: <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
-'''
+"""
 from xml.dom.minidom import Element
 import ipaddr
 import xml.dom.minidom as md
 import copy
 
-KEY_NOT_FOUND = '<KEY_NOT_FOUND>'  # KeyNotFound for dictDiff
+KEY_NOT_FOUND = "<KEY_NOT_FOUND>"  # KeyNotFound for dictDiff
 
 
-class XMLtoDictParserTools():
-
+class XMLtoDictParserTools:
     @staticmethod
     def parseTreeToDict(node, returnedDict=None, ignoreList=[]):
         """
@@ -32,7 +31,7 @@ class XMLtoDictParserTools():
         @return: dict representation for the input DOM Element
         """
         returnedDict = {} if returnedDict is None else returnedDict
-        if (node.nodeType == Element.ELEMENT_NODE):
+        if node.nodeType == Element.ELEMENT_NODE:
             nodeKey = node.localName
             if nodeKey not in ignoreList:
                 if node.childNodes is not None:
@@ -40,12 +39,16 @@ class XMLtoDictParserTools():
                     for child in node.childNodes:
                         if child.nodeType == Element.TEXT_NODE:
                             nodeValue = child.nodeValue
-                            if (len(nodeValue.strip(' \t\n\r'))) > 0:
-                                XMLtoDictParserTools.addDictValue(returnedDict, nodeKey, nodeValue)
+                            if (len(nodeValue.strip(" \t\n\r"))) > 0:
+                                XMLtoDictParserTools.addDictValue(
+                                    returnedDict, nodeKey, nodeValue
+                                )
                                 nodeKey = None
                                 break
                         elif child.nodeType == Element.ELEMENT_NODE:
-                            childDict = XMLtoDictParserTools.parseTreeToDict(child, childDict, ignoreList)
+                            childDict = XMLtoDictParserTools.parseTreeToDict(
+                                child, childDict, ignoreList
+                            )
 
                     XMLtoDictParserTools.addDictValue(returnedDict, nodeKey, childDict)
 
@@ -53,11 +56,10 @@ class XMLtoDictParserTools():
 
     @staticmethod
     def addDictValue(m_dict, key, value):
-
         def _allign_address(value):
             """unifies output"""
             n = ipaddr.IPNetwork(value)
-            return '{0}/{1}'.format(n.network.exploded, n.prefixlen)
+            return "{0}/{1}".format(n.network.exploded, n.prefixlen)
 
         def _convert_numbers(value):
             if value.startswith("0x"):
@@ -65,29 +67,41 @@ class XMLtoDictParserTools():
             return str(int(value))
 
         if key is not None:
-            if (isinstance(value, str)):
+            if isinstance(value, str):
                 # we need to predict possible differences
                 # for same value in upper or lower case
                 value = value.lower()
             if key not in m_dict:
                 # lets add mask for ips withot mask
-                if key in ['ipv4-destination', 'ipv4-source', 'ipv6-destination', 'ipv6-source', 'ipv6-nd-target']:
+                if key in [
+                    "ipv4-destination",
+                    "ipv4-source",
+                    "ipv6-destination",
+                    "ipv6-source",
+                    "ipv6-nd-target",
+                ]:
                     nvalue = _allign_address(value)
                     m_dict[key] = nvalue
-                elif key in ['tunnel-mask', 'type', 'metadata-mask', 'out_port', 'out_group']:
+                elif key in [
+                    "tunnel-mask",
+                    "type",
+                    "metadata-mask",
+                    "out_port",
+                    "out_group",
+                ]:
                     nvalue = _convert_numbers(value)
                     m_dict[key] = nvalue
                 else:
                     m_dict[key] = value
             else:
                 exist_value = m_dict.get(key)
-                if (type(exist_value) is dict):
+                if type(exist_value) is dict:
                     list_values = [exist_value, value]
                     key_for_sort = XMLtoDictParserTools.searchKey(exist_value)
                     if key_for_sort is not None:
                         list_values = sorted(list_values, key=lambda k: k[key_for_sort])
                     m_dict[key] = list_values
-                elif (isinstance(exist_value, list)):
+                elif isinstance(exist_value, list):
                     exist_value.append(value)
                     list_values = exist_value
                     key_for_sort = XMLtoDictParserTools.searchKey(value)
@@ -106,7 +120,7 @@ class XMLtoDictParserTools():
         @param dictionary: dictionary with data
         @return: the array order key
         """
-        subKeyStr = ['-id', 'order']
+        subKeyStr = ["-id", "order"]
         for substr in subKeyStr:
             for key in dictionary:
                 if key == substr:
@@ -133,7 +147,7 @@ class XMLtoDictParserTools():
                 # missing key in responded dict
                 diff[key] = (key, KEY_NOT_FOUND)
             # check values of the dictionaries
-            elif (original_dict[key] != responded_dict[key]):
+            elif original_dict[key] != responded_dict[key]:
                 # values are not the same #
 
                 orig_dict_val = original_dict[key]
@@ -141,26 +155,33 @@ class XMLtoDictParserTools():
 
                 # check value is instance of dictionary
                 if isinstance(orig_dict_val, dict) and isinstance(resp_dict_val, dict):
-                    sub_dif = XMLtoDictParserTools.getDifferenceDict(orig_dict_val, resp_dict_val)
+                    sub_dif = XMLtoDictParserTools.getDifferenceDict(
+                        orig_dict_val, resp_dict_val
+                    )
                     if sub_dif:
                         diff[key] = sub_dif
 
                 # check value is instance of list
                 # TODO - > change a basic comparator to compare by id or order
-                elif isinstance(orig_dict_val, list) and isinstance(resp_dict_val, list):
+                elif isinstance(orig_dict_val, list) and isinstance(
+                    resp_dict_val, list
+                ):
                     sub_list_diff = {}
                     # the list lengths
                     orig_i, resp_i = len(orig_dict_val), len(resp_dict_val)
                     # define a max iteration length (less from both)
                     min_index = orig_i if orig_i < resp_i else resp_i
                     for index in range(0, min_index, 1):
-                        if (orig_dict_val[index] != resp_dict_val[index]):
-                            sub_list_diff[index] = (orig_dict_val[index], resp_dict_val[index])
-                    if (orig_i > min_index):
+                        if orig_dict_val[index] != resp_dict_val[index]:
+                            sub_list_diff[index] = (
+                                orig_dict_val[index],
+                                resp_dict_val[index],
+                            )
+                    if orig_i > min_index:
                         # original is longer as responded dict
                         for index in range(min_index, orig_i, 1):
                             sub_list_diff[index] = (orig_dict_val[index], None)
-                    elif (resp_i > min_index):
+                    elif resp_i > min_index:
                         # responded dict is longer as original
                         for index in range(min_index, resp_i, 1):
                             sub_list_diff[index] = (None, resp_dict_val[index])
@@ -177,40 +198,117 @@ class XMLtoDictParserTools():
         return diff
 
 
-IGNORED_TAGS_FOR_OPERATIONAL_COMPARISON = ['id', 'flow-name', 'barrier', 'cookie_mask', 'installHw', 'flags',
-                                           'strict', 'byte-count', 'duration', 'packet-count', 'in-port',
-                                           'vlan-id-present', 'out_group', 'out_port', 'hard-timeout', 'idle-timeout',
-                                           'flow-statistics', 'cookie', 'clear-actions',
-                                           'ipv4-source-address-no-mask', 'ipv4-source-arbitrary-bitmask',
-                                           'ipv4-destination-address-no-mask', 'ipv4-destination-arbitrary-bitmask',
-                                           'ipv6-source-address-no-mask', 'ipv6-source-arbitrary-bitmask',
-                                           'ipv6-destination-address-no-mask', 'ipv6-destination-arbitrary-bitmask']  # noqa
-
-IGNORED_PATHS_FOR_OC = [(['flow', 'instructions', 'instruction', 'apply-actions', 'action', 'controller-action'], True),  # noqa
-                        (['flow', 'instructions', 'instruction', 'clear-actions', 'action'], False),
-                        (['flow', 'instructions', 'instruction', 'apply-actions', 'action', 'push-vlan-action', 'vlan-id'], False),  # noqa
-                        (['flow', 'instructions', 'instruction', 'apply-actions', 'action', 'drop-action'], True),
-                        (['flow', 'instructions', 'instruction', 'apply-actions', 'action', 'flood-action'], True),
-                        ]
-
-TAGS_TO_ADD_FOR_OC = [(['flow', 'instructions', 'instruction', 'apply-actions', 'action', 'output-action'], 'max-length', '0'),  # noqa
-                      ]
-
-
-TAGS_TO_MODIFY_FOR_OC = [(['flow', 'match', 'metadata'], 'metadata', 'metadata-mask'),
-                         (['flow', 'match', 'tunnel'], 'tunnel-id', 'tunnel-mask'),
-                         ]
+IGNORED_TAGS_FOR_OPERATIONAL_COMPARISON = [
+    "id",
+    "flow-name",
+    "barrier",
+    "cookie_mask",
+    "installHw",
+    "flags",
+    "strict",
+    "byte-count",
+    "duration",
+    "packet-count",
+    "in-port",
+    "vlan-id-present",
+    "out_group",
+    "out_port",
+    "hard-timeout",
+    "idle-timeout",
+    "flow-statistics",
+    "cookie",
+    "clear-actions",
+    "ipv4-source-address-no-mask",
+    "ipv4-source-arbitrary-bitmask",
+    "ipv4-destination-address-no-mask",
+    "ipv4-destination-arbitrary-bitmask",
+    "ipv6-source-address-no-mask",
+    "ipv6-source-arbitrary-bitmask",
+    "ipv6-destination-address-no-mask",
+    "ipv6-destination-arbitrary-bitmask",
+]  # noqa
+
+IGNORED_PATHS_FOR_OC = [
+    (
+        [
+            "flow",
+            "instructions",
+            "instruction",
+            "apply-actions",
+            "action",
+            "controller-action",
+        ],
+        True,
+    ),  # noqa
+    (["flow", "instructions", "instruction", "clear-actions", "action"], False),
+    (
+        [
+            "flow",
+            "instructions",
+            "instruction",
+            "apply-actions",
+            "action",
+            "push-vlan-action",
+            "vlan-id",
+        ],
+        False,
+    ),  # noqa
+    (
+        [
+            "flow",
+            "instructions",
+            "instruction",
+            "apply-actions",
+            "action",
+            "drop-action",
+        ],
+        True,
+    ),
+    (
+        [
+            "flow",
+            "instructions",
+            "instruction",
+            "apply-actions",
+            "action",
+            "flood-action",
+        ],
+        True,
+    ),
+]
+
+TAGS_TO_ADD_FOR_OC = [
+    (
+        [
+            "flow",
+            "instructions",
+            "instruction",
+            "apply-actions",
+            "action",
+            "output-action",
+        ],
+        "max-length",
+        "0",
+    )  # noqa
+]
+
+
+TAGS_TO_MODIFY_FOR_OC = [
+    (["flow", "match", "metadata"], "metadata", "metadata-mask"),
+    (["flow", "match", "tunnel"], "tunnel-id", "tunnel-mask"),
+]
 
 
 class XmlComparator:
-
     def is_flow_configured(self, requested_flow, configured_flows):
 
         orig_tree = md.parseString(requested_flow)
         xml_resp_stream = configured_flows
         xml_resp_tree = md.parseString(xml_resp_stream)
-        nodeListOperFlows = xml_resp_tree.getElementsByTagNameNS("*", 'flow')
-        origDict = XMLtoDictParserTools.parseTreeToDict(orig_tree._get_documentElement())
+        nodeListOperFlows = xml_resp_tree.getElementsByTagNameNS("*", "flow")
+        origDict = XMLtoDictParserTools.parseTreeToDict(
+            orig_tree._get_documentElement()
+        )
 
         reportDict = {}
         index = 0
@@ -219,11 +317,15 @@ class XmlComparator:
             XMLtoDictParserTools.addDictValue(reportDict, index, nodeDict)
             index += 1
             if nodeDict == origDict:
-                return True, ''
-            if nodeDict['flow']['priority'] == origDict['flow']['priority']:
-                return False, 'Flow found with diferences {0}'.format(
-                    XMLtoDictParserTools.getDifferenceDict(nodeDict, origDict))
-        return False, ''
+                return True, ""
+            if nodeDict["flow"]["priority"] == origDict["flow"]["priority"]:
+                return (
+                    False,
+                    "Flow found with diferences {0}".format(
+                        XMLtoDictParserTools.getDifferenceDict(nodeDict, origDict)
+                    ),
+                )
+        return False, ""
 
     def is_flow_operational2(self, requested_flow, oper_resp, check_id=False):
         def _rem_unimplemented_tags(tagpath, recurs, tdict):
@@ -239,20 +341,25 @@ class XmlComparator:
             # when to delete
             if len(tagpath) == 1 and tagpath[0] in tdict:
                 del tdict[tagpath[0]]
-            if len(tagpath) > 1 and recurs is True and tagpath[0] in tdict and tdict[tagpath[0]] == {}:
+            if (
+                len(tagpath) > 1
+                and recurs is True
+                and tagpath[0] in tdict
+                and tdict[tagpath[0]] == {}
+            ):
                 del tdict[tagpath[0]]
-            if list(tdict.keys()) == ['order']:
-                del tdict['order']
+            if list(tdict.keys()) == ["order"]:
+                del tdict["order"]
 
         def _add_tags(tagpath, newtag, value, tdict):
-            '''if whole tagpath exists and the tag is not present, it is added with given value'''
+            """if whole tagpath exists and the tag is not present, it is added with given value"""
             if len(tagpath) > 0 and tagpath[0] in tdict:
                 _add_tags(tagpath[1:], newtag, value, tdict[tagpath[0]])
             elif len(tagpath) == 0 and newtag not in tdict:
                 tdict[newtag] = value
 
         def _to_be_modified_tags(tagpath, tag, related_tag, tdict):
-            '''if whole tagpath exists and the tag is not present, it is added with given value'''
+            """if whole tagpath exists and the tag is not present, it is added with given value"""
             if len(tagpath) > 0 and tagpath[0] in tdict:
                 _to_be_modified_tags(tagpath[1:], tag, related_tag, tdict[tagpath[0]])
             elif len(tagpath) == 0 and tag in tdict and related_tag in tdict:
@@ -260,27 +367,27 @@ class XmlComparator:
 
         IGNORED_TAGS_LIST = list(IGNORED_TAGS_FOR_OPERATIONAL_COMPARISON)
         if check_id:
-            IGNORED_TAGS_LIST.remove('id')
+            IGNORED_TAGS_LIST.remove("id")
         orig_tree = md.parseString(requested_flow)
         xml_resp_stream = oper_resp
         xml_resp_tree = md.parseString(xml_resp_stream)
-        nodeListOperFlows = xml_resp_tree.getElementsByTagNameNS("*", 'flow')
+        nodeListOperFlows = xml_resp_tree.getElementsByTagNameNS("*", "flow")
         origDict = XMLtoDictParserTools.parseTreeToDict(
-            orig_tree._get_documentElement(),
-            ignoreList=IGNORED_TAGS_LIST)
+            orig_tree._get_documentElement(), ignoreList=IGNORED_TAGS_LIST
+        )
 
         # origDict['flow-statistics'] = origDict.pop( 'flow' )
         reportDict = {}
         index = 0
         for node in nodeListOperFlows:
             nodeDict = XMLtoDictParserTools.parseTreeToDict(
-                node,
-                ignoreList=IGNORED_TAGS_LIST)
+                node, ignoreList=IGNORED_TAGS_LIST
+            )
             XMLtoDictParserTools.addDictValue(reportDict, index, nodeDict)
             index += 1
             if nodeDict == origDict:
-                return True, ''
-            if nodeDict['flow']['priority'] == origDict['flow']['priority']:
+                return True, ""
+            if nodeDict["flow"]["priority"] == origDict["flow"]["priority"]:
                 for p in IGNORED_PATHS_FOR_OC:
                     td = copy.copy(origDict)
                     _rem_unimplemented_tags(p[0], p[1], td)
@@ -290,37 +397,43 @@ class XmlComparator:
                         _to_be_modified_tags(p, t, rt, td)
 
                     if nodeDict == td:
-                        return True, ''
+                        return True, ""
                 if nodeDict == origDict:
-                    return True, ''
-                return False, 'Flow found with diferences {0}'.format(
-                    XMLtoDictParserTools.getDifferenceDict(nodeDict, origDict))
-        return False, ''
+                    return True, ""
+                return (
+                    False,
+                    "Flow found with diferences {0}".format(
+                        XMLtoDictParserTools.getDifferenceDict(nodeDict, origDict)
+                    ),
+                )
+        return False, ""
 
     def get_data_for_flow_put_update(self, xml):
         # action only for yet
         xml_dom_input = md.parseString(xml)
-        actionList = xml_dom_input.getElementsByTagName('action')
+        actionList = xml_dom_input.getElementsByTagName("action")
         if actionList is not None and len(actionList) > 0:
             action = actionList[0]
             for child in action.childNodes:
                 if child.nodeType == Element.ELEMENT_NODE:
-                    nodeKey = (child.localName)
-                    if nodeKey != 'order':
-                        if nodeKey != 'drop-action':
-                            new_act = child.ownerDocument.createElement('drop-action')
+                    nodeKey = child.localName
+                    if nodeKey != "order":
+                        if nodeKey != "drop-action":
+                            new_act = child.ownerDocument.createElement("drop-action")
                         else:
-                            new_act = child.ownerDocument.createElement('output-action')
-                            onc = child.ownerDocument.createElement('output-node-connector')
-                            onc_content = child.ownerDocument.createTextNode('TABLE')
+                            new_act = child.ownerDocument.createElement("output-action")
+                            onc = child.ownerDocument.createElement(
+                                "output-node-connector"
+                            )
+                            onc_content = child.ownerDocument.createTextNode("TABLE")
                             onc.appendChild(onc_content)
                             new_act.appendChild(onc)
-                            ml = child.ownerDocument.createElement('max-length')
-                            ml_content = child.ownerDocument.createTextNode('60')
+                            ml = child.ownerDocument.createElement("max-length")
+                            ml_content = child.ownerDocument.createTextNode("60")
                             ml.appendChild(ml_content)
                             new_act.appendChild(ml)
                         child.parentNode.replaceChild(new_act, child)
-        return xml_dom_input.toxml(encoding='utf-8')
+        return xml_dom_input.toxml(encoding="utf-8")
 
     def get_flow_content(self, tid=1, fid=1, priority=1):
         """Returns an xml flow content identified by given details.
@@ -331,7 +444,7 @@ class XmlComparator:
             :param priority: flow priority
         """
 
-        flow_template = '''<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+        flow_template = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
 <flow xmlns="urn:opendaylight:flow:inventory">
     <strict>false</strict>
     <instructions>
@@ -361,7 +474,13 @@ class XmlComparator:
     <flow-name>%s</flow-name>
     <priority>%s</priority>
     <barrier>false</barrier>
-</flow>'''
-
-        flow_data = flow_template % (tid, fid, fid, 'TestFlow-{0}'.format(fid), priority)
+</flow>"""
+
+        flow_data = flow_template % (
+            tid,
+            fid,
+            fid,
+            "TestFlow-{0}".format(fid),
+            priority,
+        )
         return flow_data
index b60126ac8e532371ab77a3de41b18eb85c7ffc30..93f921e0fdafd100b93ffa265aa5694cdb34515b 100644 (file)
@@ -32,32 +32,32 @@ def from_path_to_jsonpatch(matchedpath):
     :return: the corresponding json patch for removing the fragment
     """
 
-    logging.info('starting. filter path: %s', matchedpath)
+    logging.info("starting. filter path: %s", matchedpath)
 
     # First step: path format change
     # typical input: $['ietf-yang-library:modules-state']['module'][57]
     # desired output: /ietf-yang-library:modules-state/module/57
 
-    matchedpath = matchedpath.replace('$.', '/')
-    matchedpath = matchedpath.replace('$[\'', '/')
-    matchedpath = matchedpath.replace('\'][\'', '/')
-    matchedpath = matchedpath.replace('\']', '/')
+    matchedpath = matchedpath.replace("$.", "/")
+    matchedpath = matchedpath.replace("$['", "/")
+    matchedpath = matchedpath.replace("']['", "/")
+    matchedpath = matchedpath.replace("']", "/")
 
     # this one is for the $[2] pattern
-    if '$[' in matchedpath and ']' in matchedpath:
-        matchedpath = matchedpath.replace('$[', '/')
-        matchedpath = matchedpath.replace(']', '')
+    if "$[" in matchedpath and "]" in matchedpath:
+        matchedpath = matchedpath.replace("$[", "/")
+        matchedpath = matchedpath.replace("]", "")
 
-    matchedpath = matchedpath.replace('[', '')
-    matchedpath = matchedpath.replace(']', '')
-    matchedpath = matchedpath.rstrip('/')
+    matchedpath = matchedpath.replace("[", "")
+    matchedpath = matchedpath.replace("]", "")
+    matchedpath = matchedpath.rstrip("/")
 
     # Now, for input: /ietf-yang-library:modules-state/module/57
     # desired output: [{"op":"remove","path":"/ietf-yang-library:modules-state/module/57"}]
 
-    logging.info('final filter path: %s', matchedpath)
-    as_patch = '[{{"op\":\"remove\",\"path\":\"{0}\"}}]'.format(matchedpath)
-    logging.info('generated patch line: %s', as_patch)
+    logging.info("final filter path: %s", matchedpath)
+    as_patch = '[{{"op":"remove","path":"{0}"}}]'.format(matchedpath)
+    logging.info("generated patch line: %s", as_patch)
     return as_patch
 
 
@@ -70,20 +70,23 @@ def apply_filter(json_arg, filtering_line):
     :return: the filtered document
     """
 
-    logging.info('apply_filter:starting. jsonPath filter=[%s]', filtering_line)
+    logging.info("apply_filter:starting. jsonPath filter=[%s]", filtering_line)
 
-    res = jsonpath(json_arg, filtering_line, result_type='PATH')
+    res = jsonpath(json_arg, filtering_line, result_type="PATH")
     if isinstance(res, types.BooleanType) or len(res) == 0:
-        logging.info('apply_filter: The prefilter [%s] matched nothing', filtering_line)
+        logging.info("apply_filter: The prefilter [%s] matched nothing", filtering_line)
         return json_arg
     if len(res) > 1:
-        raise AssertionError('Bad pre-filter [%s] (returned [%d] entries, should return one at most',
-                             filtering_line, len(res))
+        raise AssertionError(
+            "Bad pre-filter [%s] (returned [%d] entries, should return one at most",
+            filtering_line,
+            len(res),
+        )
     as_json_patch = from_path_to_jsonpatch(res[0])
-    logging.info('apply_filter: applying patch! resolved patch =%s', as_json_patch)
+    logging.info("apply_filter: applying patch! resolved patch =%s", as_json_patch)
     patched_json = jsonpatch.apply_patch(json_arg, as_json_patch)
 
-    logging.info('apply_filter: json after patching: %s', patched_json)
+    logging.info("apply_filter: json after patching: %s", patched_json)
     return patched_json
 
 
@@ -97,15 +100,15 @@ def prefilter(json_arg, initial_prefilter):
     """
 
     if not initial_prefilter:
-        logging.info('prefilter not found!')
+        logging.info("prefilter not found!")
         # whether it is filtered or not, return as json so it can be handled uniformly from now on
         return json.loads(json_arg)
 
     with open(initial_prefilter) as f:
         lines = f.read().splitlines()
-    logging.info('prefilter:lines in prefilter file: %d ', len(lines))
-    lines = filter(lambda k: not k.startswith('#'), lines)
-    logging.info('prefilter:lines after removing comments: %d ', len(lines))
+    logging.info("prefilter:lines in prefilter file: %d ", len(lines))
+    lines = filter(lambda k: not k.startswith("#"), lines)
+    logging.info("prefilter:lines after removing comments: %d ", len(lines))
     json_args_as_json = json.loads(json_arg)
     for filtering_line in lines:
         json_args_as_json = apply_filter(json_args_as_json, filtering_line)
@@ -123,21 +126,29 @@ def prefilter_json_files_then_compare(args):
              requested)
     """
 
-    logging.info('prefilter_json_files_then_compare: starting!')
+    logging.info("prefilter_json_files_then_compare: starting!")
     with open(args.initialFile) as f:
         json_initial = file.read(f)
     with open(args.finalFile) as f2:
         json_final = file.read(f2)
 
     patch = jsonpatch.JsonPatch.from_diff(json_initial, json_final)
-    logging.info('prefilter_json_files_then_compare:differences before patching: %d', len(list(patch)))
+    logging.info(
+        "prefilter_json_files_then_compare:differences before patching: %d",
+        len(list(patch)),
+    )
 
     json_initial_filtered = prefilter(json_initial, args.initial_prefilter)
     json_final_filtered = prefilter(json_final, args.finalPreFilter)
 
-    patch_after_filtering = jsonpatch.JsonPatch.from_diff(json_initial_filtered, json_final_filtered)
+    patch_after_filtering = jsonpatch.JsonPatch.from_diff(
+        json_initial_filtered, json_final_filtered
+    )
     differences_after_patching = list(patch_after_filtering)
-    logging.info('prefilter_json_files_then_compare: differences after patching: %d', len(differences_after_patching))
+    logging.info(
+        "prefilter_json_files_then_compare: differences after patching: %d",
+        len(differences_after_patching),
+    )
 
     if args.printDifferences:
         for patchline in differences_after_patching:
@@ -148,46 +159,89 @@ def prefilter_json_files_then_compare(args):
 
 
 def Json_Diff_Check_Keyword(json_before, json_after, filter_before, filter_after):
-    input_argv = ['-i', json_before, '-f', json_after, '-ipf', filter_before, '-fpf', filter_after, '-pd']
+    input_argv = [
+        "-i",
+        json_before,
+        "-f",
+        json_after,
+        "-ipf",
+        filter_before,
+        "-fpf",
+        filter_after,
+        "-pd",
+    ]
     sys.argv[1:] = input_argv
-    logging.info('starting. constructed command line: %s', sys.argv)
+    logging.info("starting. constructed command line: %s", sys.argv)
     return Json_Diff_Check()
 
 
 def parse_args(args):
-    parser = argparse.ArgumentParser(description='both initial and final json files are compared for differences. '
-                                                 'The program returns 0 when the json contents are the same, or the '
-                                                 'number of'
-                                                 ' differences otherwise. Both json files can be prefiltered for '
-                                                 'certain patterns'
-                                                 ' before checking the differences')
-
-    parser.add_argument('-i', '--initialFile', required='true', dest='initialFile', action='store',
-                        help='initial json file')
-    parser.add_argument('-f', '--finalFile', required='true', dest='finalFile', action='store', help='final json file')
-    parser.add_argument('-ipf', '--initial_prefilter', dest='initial_prefilter',
-                        help='File with pre-filtering patterns to apply to the initial json file before comparing')
-    parser.add_argument('-fpf', '--finalPreFilter', dest='finalPreFilter',
-                        help='File with pre-filtering patterns to apply to the final json file before comparing')
-    parser.add_argument('-pd', '--printDifferences', action='store_true',
-                        help='on differences found, prints the list of paths for the found differences before exitting')
-    parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='generate log information')
+    parser = argparse.ArgumentParser(
+        description="both initial and final json files are compared for differences. "
+        "The program returns 0 when the json contents are the same, or the "
+        "number of"
+        " differences otherwise. Both json files can be prefiltered for "
+        "certain patterns"
+        " before checking the differences"
+    )
+
+    parser.add_argument(
+        "-i",
+        "--initialFile",
+        required="true",
+        dest="initialFile",
+        action="store",
+        help="initial json file",
+    )
+    parser.add_argument(
+        "-f",
+        "--finalFile",
+        required="true",
+        dest="finalFile",
+        action="store",
+        help="final json file",
+    )
+    parser.add_argument(
+        "-ipf",
+        "--initial_prefilter",
+        dest="initial_prefilter",
+        help="File with pre-filtering patterns to apply to the initial json file before comparing",
+    )
+    parser.add_argument(
+        "-fpf",
+        "--finalPreFilter",
+        dest="finalPreFilter",
+        help="File with pre-filtering patterns to apply to the final json file before comparing",
+    )
+    parser.add_argument(
+        "-pd",
+        "--printDifferences",
+        action="store_true",
+        help="on differences found, prints the list of paths for the found differences before exitting",
+    )
+    parser.add_argument(
+        "-v",
+        "--verbose",
+        dest="verbose",
+        action="store_true",
+        help="generate log information",
+    )
     return parser.parse_args(args)
 
 
 def Json_Diff_Check():
     args = parse_args(sys.argv[1:])
 
-    if hasattr(args, 'verbose'):
+    if hasattr(args, "verbose"):
         if args.verbose:
             logging.basicConfig(level=logging.DEBUG)
 
     if args.printDifferences:
-        logging.info('(will print differences)')
+        logging.info("(will print differences)")
 
     result = prefilter_json_files_then_compare(args)
     return result
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     Json_Diff_Check()
index 85c8b1ecc22ae141395adf08319477dbdcc03f01..d8f5c5879c161a2aac485cb01808da0a2a4af79a 100644 (file)
@@ -19,14 +19,22 @@ class PathConversionTest(unittest.TestCase):
     """
 
     def testArrayElementConversion(self):
-        self.assertEquals('[{"op":"remove","path":"/ietf-yang-library:modules-state/module/56"}]',
-                          JsonDiffTool.from_path_to_jsonpatch('/ietf-yang-library:modules-state/module/56'),
-                          "Array element conversion failed!")
+        self.assertEquals(
+            '[{"op":"remove","path":"/ietf-yang-library:modules-state/module/56"}]',
+            JsonDiffTool.from_path_to_jsonpatch(
+                "/ietf-yang-library:modules-state/module/56"
+            ),
+            "Array element conversion failed!",
+        )
 
     def testMapValueElementConversion(self):
-        self.assertEquals('[{"op":"remove","path":"/ietf-yang-library:modules-state/module/blablah"}]',
-                          JsonDiffTool.from_path_to_jsonpatch('/ietf-yang-library:modules-state/module/blablah'),
-                          "Array element conversion failed!")
+        self.assertEquals(
+            '[{"op":"remove","path":"/ietf-yang-library:modules-state/module/blablah"}]',
+            JsonDiffTool.from_path_to_jsonpatch(
+                "/ietf-yang-library:modules-state/module/blablah"
+            ),
+            "Array element conversion failed!",
+        )
 
 
 class JsonDiffToolTest(unittest.TestCase):
@@ -38,109 +46,161 @@ class JsonDiffToolTest(unittest.TestCase):
         """
         Identical documents
         """
-        self.assertEquals(0,
-                          JsonDiffTool.Json_Diff_Check_Keyword('testinput/arrayTwoNames.json',
-                                                               'testinput/arrayTwoNamesCopy.json',
-                                                               '',
-                                                               ''),
-                          'failed! (expected 0 differences)')
+        self.assertEquals(
+            0,
+            JsonDiffTool.Json_Diff_Check_Keyword(
+                "testinput/arrayTwoNames.json",
+                "testinput/arrayTwoNamesCopy.json",
+                "",
+                "",
+            ),
+            "failed! (expected 0 differences)",
+        )
 
     def testEqualFilesWithScrambledArrayOrder(self):
         """
         This is moving an array element from one position to other. RFC 6902 describes this as "moving
         a value", but this jsonpatch implementation constructs a patch using remove + add. Acceptable though
         """
-        self.assertEquals(2,
-                          JsonDiffTool.Json_Diff_Check_Keyword('testinput/arrayTwoNames.json',
-                                                               'testinput/arrayTwoNamesReversed.json',
-                                                               '',
-                                                               ''),
-                          'failed! (expected 2 differences)')
+        self.assertEquals(
+            2,
+            JsonDiffTool.Json_Diff_Check_Keyword(
+                "testinput/arrayTwoNames.json",
+                "testinput/arrayTwoNamesReversed.json",
+                "",
+                "",
+            ),
+            "failed! (expected 2 differences)",
+        )
 
     def testEqualFilesWithChangedAttributeOrder(self):
         """
         Attributes in different order. It's not a difference
         """
-        self.assertEquals(0,
-                          JsonDiffTool.Json_Diff_Check_Keyword('testinput/setTwoNames.json',
-                                                               'testinput/setTwoNamesReversed.json',
-                                                               '',
-                                                               ''),
-                          'failed! (expected 0 differences)')
+        self.assertEquals(
+            0,
+            JsonDiffTool.Json_Diff_Check_Keyword(
+                "testinput/setTwoNames.json",
+                "testinput/setTwoNamesReversed.json",
+                "",
+                "",
+            ),
+            "failed! (expected 0 differences)",
+        )
 
     def testSimpleDifferenceSecondFileWithExtraAttrib(self):
-        self.assertEquals(1,
-                          JsonDiffTool.Json_Diff_Check_Keyword('testinput/setTwoNames.json',
-                                                               'testinput/setTwoNamesExtraAttrib.json',
-                                                               '',
-                                                               ''),
-                          'failed! (expected 1 differences)')
+        self.assertEquals(
+            1,
+            JsonDiffTool.Json_Diff_Check_Keyword(
+                "testinput/setTwoNames.json",
+                "testinput/setTwoNamesExtraAttrib.json",
+                "",
+                "",
+            ),
+            "failed! (expected 1 differences)",
+        )
 
     def testSimpleDifferenceCountingWithoutFiltering(self):
         """
         Example coming from a true daexim export. No prefilters used
         """
-        input_argv = ['-i', 'testinput/mainTestCase/odl_backup_operational_before.json',
-                      '-f', 'testinput/mainTestCase/odl_backup_operational_after.json']
+        input_argv = [
+            "-i",
+            "testinput/mainTestCase/odl_backup_operational_before.json",
+            "-f",
+            "testinput/mainTestCase/odl_backup_operational_after.json",
+        ]
         sys.argv[1:] = input_argv
-        self.assertEquals(16,
-                          JsonDiffTool.Json_Diff_Check(),
-                          "main failed! expected 16 differences, result was: " + str(JsonDiffTool.Json_Diff_Check()))
+        self.assertEquals(
+            16,
+            JsonDiffTool.Json_Diff_Check(),
+            "main failed! expected 16 differences, result was: "
+            + str(JsonDiffTool.Json_Diff_Check()),
+        )
 
     def testSimpleDifferenceCountingUsingSingleMatchingBeforeFilter(self):
         """
         Using a prefilter for the initial file The prefilter contains one expression only
         """
-        input_argv = ['-i', 'testinput/mainTestCase/odl_backup_operational_before.json',
-                      '-f', 'testinput/mainTestCase/odl_backup_operational_after.json',
-                      '-ipf', 'testinput/mainTestCase/json_prefilter.conf', '-v']
+        input_argv = [
+            "-i",
+            "testinput/mainTestCase/odl_backup_operational_before.json",
+            "-f",
+            "testinput/mainTestCase/odl_backup_operational_after.json",
+            "-ipf",
+            "testinput/mainTestCase/json_prefilter.conf",
+            "-v",
+        ]
         sys.argv[1:] = input_argv
-        self.assertEquals(15,
-                          JsonDiffTool.Json_Diff_Check(),
-                          "main failed! expected 15 differences, result was: " + str(JsonDiffTool.Json_Diff_Check()))
+        self.assertEquals(
+            15,
+            JsonDiffTool.Json_Diff_Check(),
+            "main failed! expected 15 differences, result was: "
+            + str(JsonDiffTool.Json_Diff_Check()),
+        )
 
     def testSimpleDifferenceCountingUsingMatchingBeforeFilterMatchingTwoEntries(self):
         """
         Using a prefilter for the initial file The prefilter contains two expressions
         """
-        input_argv = ['-i', 'testinput/mainTestCase/odl_backup_operational_before.json',
-                      '-f', 'testinput/mainTestCase/odl_backup_operational_after.json',
-                      '-ipf', 'testinput/mainTestCase/json_prefilter_two_matches.conf', '-v']
+        input_argv = [
+            "-i",
+            "testinput/mainTestCase/odl_backup_operational_before.json",
+            "-f",
+            "testinput/mainTestCase/odl_backup_operational_after.json",
+            "-ipf",
+            "testinput/mainTestCase/json_prefilter_two_matches.conf",
+            "-v",
+        ]
         sys.argv[1:] = input_argv
-        self.assertEquals(14,
-                          JsonDiffTool.Json_Diff_Check(),
-                          "main failed! expected 14 differences, result was: " + str(JsonDiffTool.Json_Diff_Check()))
+        self.assertEquals(
+            14,
+            JsonDiffTool.Json_Diff_Check(),
+            "main failed! expected 14 differences, result was: "
+            + str(JsonDiffTool.Json_Diff_Check()),
+        )
 
     def testSimpleDifferenceCountingUsingSingleMatchingBeforeFilter(self):
         """
         Using a prefilter for both initial and final files
         """
-        input_argv = ['-i', 'testinput/mainTestCase/odl_backup_operational_before.json',
-                      '-f', 'testinput/mainTestCase/odl_backup_operational_after.json',
-                      '-ipf', 'testinput/mainTestCase/json_prefilter.conf',
-                      '-fpf', 'testinput/mainTestCase/json_postfilter.conf',
-                      '-v']
+        input_argv = [
+            "-i",
+            "testinput/mainTestCase/odl_backup_operational_before.json",
+            "-f",
+            "testinput/mainTestCase/odl_backup_operational_after.json",
+            "-ipf",
+            "testinput/mainTestCase/json_prefilter.conf",
+            "-fpf",
+            "testinput/mainTestCase/json_postfilter.conf",
+            "-v",
+        ]
         sys.argv[1:] = input_argv
-        self.assertEquals(16,
-                          JsonDiffTool.Json_Diff_Check(),
-                          "main failed! expected 16 differences, result was: " + str(JsonDiffTool.Json_Diff_Check()))
+        self.assertEquals(
+            16,
+            JsonDiffTool.Json_Diff_Check(),
+            "main failed! expected 16 differences, result was: "
+            + str(JsonDiffTool.Json_Diff_Check()),
+        )
 
     def testUsingANonExistingFile(self):
         """
         The second file does not exist. Exception expected
         """
-        self.assertRaises(IOError,
-                          JsonDiffTool.Json_Diff_Check_Keyword,
-                          'testinput/arrayTwoNames.json',
-                          'testinput/thisFileDoesNotExist.json',
-                          '',
-                          '')
+        self.assertRaises(
+            IOError,
+            JsonDiffTool.Json_Diff_Check_Keyword,
+            "testinput/arrayTwoNames.json",
+            "testinput/thisFileDoesNotExist.json",
+            "",
+            "",
+        )
 
     def testNotPassingAMandatoryParameter(self):
         """
         Both initial and final json files are mandatory
         """
-        input_argv = ['-f', 'testinput/mainTestCase/odl_backup_operational_after.json']
+        input_argv = ["-f", "testinput/mainTestCase/odl_backup_operational_after.json"]
         # parser = JsonDiffTool.parseArgs(input_argv)
 
         with self.assertRaises(SystemExit) as cm:
@@ -153,16 +213,24 @@ class JsonDiffToolTest(unittest.TestCase):
         """
         Using prefilter files whose expressions match nothing
         """
-        input_argv = ['-i', 'testinput/mainTestCase/odl_backup_operational_before.json',
-                      '-f', 'testinput/mainTestCase/odl_backup_operational_after.json',
-                      '-ipf', 'testinput/mainTestCase/json_prefilter_zero_matches.conf',
-                      '-fpf', 'testinput/mainTestCase/json_prefilter_zero_matches.conf',
-                      '-v']
+        input_argv = [
+            "-i",
+            "testinput/mainTestCase/odl_backup_operational_before.json",
+            "-f",
+            "testinput/mainTestCase/odl_backup_operational_after.json",
+            "-ipf",
+            "testinput/mainTestCase/json_prefilter_zero_matches.conf",
+            "-fpf",
+            "testinput/mainTestCase/json_prefilter_zero_matches.conf",
+            "-v",
+        ]
         sys.argv[1:] = input_argv
-        self.assertEquals(16,
-                          JsonDiffTool.Json_Diff_Check(),
-                          "main failed! expected 16 differences, result was: " + str(
-                              JsonDiffTool.Json_Diff_Check()))
+        self.assertEquals(
+            16,
+            JsonDiffTool.Json_Diff_Check(),
+            "main failed! expected 16 differences, result was: "
+            + str(JsonDiffTool.Json_Diff_Check()),
+        )
 
 
 if __name__ == "__main__":
index 76ebf8a9439322e55aaf2840898fdbcc717abd98..3c09fd9e3357511d152d82645e2d3ee3d5f2f545 100644 (file)
@@ -15,7 +15,7 @@ import sys
 
 __author__ = "Phil Budne"
 __revision__ = "$Revision: 1.13 $"
-__version__ = '0.54'
+__version__ = "0.54"
 
 #   Copyright (c) 2007 Stefan Goessner (goessner.net)
 #       Copyright (c) 2008 Kate Rhodes (masukomi.org)
@@ -53,13 +53,14 @@ __version__ = '0.54'
 # internally keep paths as lists to preserve integer types
 #       (instead of as ';' delimited strings)
 
-__all__ = ['jsonpath']
+__all__ = ["jsonpath"]
 
 
 # XXX precompile RE objects on load???
 # re_1 = re.compile(.....)
 # re_2 = re.compile(.....)
 
+
 def normalize(x):
     """normalize the path expression; outside jsonpath to allow testing"""
     subx = []
@@ -92,12 +93,12 @@ def normalize(x):
     return x
 
 
-def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
+def jsonpath(obj, expr, result_type="VALUE", debug=0, use_eval=True):
     """traverse JSON object using jsonpath expr, returning values or paths"""
 
     def s(x, y):
         """concatenate path elements"""
-        return str(x) + ';' + str(y)
+        return str(x) + ";" + str(y)
 
     def isint(x):
         """check if argument represents a decimal integer"""
@@ -106,8 +107,8 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
     def as_path(path):
         """convert internal path representation to
            "full bracket notation" for PATH output"""
-        p = '$'
-        for piece in path.split(';')[1:]:
+        p = "$"
+        for piece in path.split(";")[1:]:
             # make a guess on how to index
             # XXX need to apply \ quoting on '!!
             if isint(piece):
@@ -117,11 +118,11 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
         return p
 
     def store(path, object):
-        if result_type == 'VALUE':
+        if result_type == "VALUE":
             result.append(object)
-        elif result_type == 'IPATH':  # Index format path (Python ext)
+        elif result_type == "IPATH":  # Index format path (Python ext)
             # return list of list of indices -- can be used w/o "eval" or split
-            result.append(path.split(';')[1:])
+            result.append(path.split(";")[1:])
         else:  # PATH
             result.append(as_path(path))
         return path
@@ -130,12 +131,13 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
         if debug:
             print("trace", expr, "/", path)
         if expr:
-            x = expr.split(';')
+            x = expr.split(";")
             loc = x[0]
-            x = ';'.join(x[1:])
+            x = ";".join(x[1:])
             if debug:
                 print("\t", loc, type(obj))
             if loc == "*":
+
                 def f03(key, loc, expr, obj, path):
                     if debug > 1:
                         print("\tf03", key, loc, expr, path)
@@ -150,10 +152,10 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
                         print("\tf04", key, loc, expr, path)
                     if isinstance(obj, dict):
                         if key in obj:
-                            trace(s('..', expr), obj[key], s(path, key))
+                            trace(s("..", expr), obj[key], s(path, key))
                     else:
                         if key < len(obj):
-                            trace(s('..', expr), obj[key], s(path, key))
+                            trace(s("..", expr), obj[key], s(path, key))
 
                 walk(loc, x, obj, path, f04)
             elif loc == "!":
@@ -197,9 +199,10 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
                     walk(loc, x, obj, path, f05)
                     return
 
-                m = re.match(r'(-?[0-9]*):(-?[0-9]*):?(-?[0-9]*)$', loc)
+                m = re.match(r"(-?[0-9]*):(-?[0-9]*):?(-?[0-9]*)$", loc)
                 if m:
                     if isinstance(obj, (dict, list)):
+
                         def max(x, y):
                             if x > y:
                                 return x
@@ -283,18 +286,18 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
                 return ret
 
             g1 = m.group(1)
-            elts = g1.split('.')
+            elts = g1.split(".")
             if elts[-1] == "length":
                 return "len(%s)" % brackets(elts[1:-1])
             return brackets(elts[1:])
 
-        loc = re.sub(r'(?<!\\)(@\.[a-zA-Z@_.]+)', varmatch, loc)
+        loc = re.sub(r"(?<!\\)(@\.[a-zA-Z@_.]+)", varmatch, loc)
 
         # removed = -> == translation
         # causes problems if a string contains =
 
         # replace @  w/ "__obj", but \@ means a literal @
-        loc = re.sub(r'(?<!\\)@', "__obj", loc).replace(r'\@', '@')
+        loc = re.sub(r"(?<!\\)@", "__obj", loc).replace(r"\@", "@")
         if not use_eval:
             if debug:
                 print("eval disabled")
@@ -303,7 +306,7 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
             print("eval", loc)
         try:
             # eval w/ caller globals, w/ local "__obj"!
-            v = eval(loc, caller_globals, {'__obj': obj})
+            v = eval(loc, caller_globals, {"__obj": obj})
         except Exception as e:
             if debug:
                 print(e)
@@ -324,14 +327,14 @@ def jsonpath(obj, expr, result_type='VALUE', debug=0, use_eval=True):
             cleaned_expr = cleaned_expr[2:]
 
         # XXX wrap this in a try??
-        trace(cleaned_expr, obj, '$')
+        trace(cleaned_expr, obj, "$")
 
         if len(result) > 0:
             return result
     return False
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     try:
         import json  # v2.6
     except ImportError:
@@ -347,7 +350,7 @@ if __name__ == '__main__':
 
     object = json.load(file(sys.argv[1]))
     path = sys.argv[2]
-    format = 'VALUE'
+    format = "VALUE"
 
     if len(sys.argv) > 3:
         # XXX verify?
index b3a303eef250c3f515766368f5b147d24e5e1e24..3e1c4d83303b069d224aabc55034faef8aadbc8b 100644 (file)
@@ -7,171 +7,222 @@ import re
 # Make sure to have unique matches in different lines
 # Order the list in alphabetical order based on the "issue" key
 _whitelist = [
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-972",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "Node was created by other transaction",
-         "Optimistic lock failed for path /(urn:opendaylight:inventory?revision=2013-08-19)nodes/node/node" +
-         "[{(urn:opendaylight:inventory?revision=2013-08-19)id=openflow",
-         "table/table[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=21}]/flow/flow" +
-         "[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=L3."
-     ]},
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-972",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "Node was created by other transaction",
+            "Optimistic lock failed for path /(urn:opendaylight:inventory?revision=2013-08-19)nodes/node/node"
+            + "[{(urn:opendaylight:inventory?revision=2013-08-19)id=openflow",
+            "table/table[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=21}]/flow/flow"
+            + "[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=L3.",
+        ],
+    },
     # oxygen
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-972",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "Node was created by other transaction",
-         "OptimisticLockFailedException: Optimistic lock failed."
-         "Conflicting modification for path /(urn:opendaylight:inventory?revision=2013-08-19)nodes/node/node" +
-         "[{(urn:opendaylight:inventory?revision=2013-08-19)id=",
-         "table/table[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=21}]/flow/flow" +
-         "[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=L3.", ".21.", ".42."
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1135",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "Node was created by other transaction",
-         "Optimistic lock failed for path /(urn:opendaylight:inventory?revision=2013-08-19)nodes/node/node" +
-         "[{(urn:opendaylight:inventory?revision=2013-08-19)id=openflow:",
-     ]},
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-972",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "Node was created by other transaction",
+            "OptimisticLockFailedException: Optimistic lock failed."
+            "Conflicting modification for path /(urn:opendaylight:inventory?revision=2013-08-19)nodes/node/node"
+            + "[{(urn:opendaylight:inventory?revision=2013-08-19)id=",
+            "table/table[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=21}]/flow/flow"
+            + "[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=L3.",
+            ".21.",
+            ".42.",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1135",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "Node was created by other transaction",
+            "Optimistic lock failed for path /(urn:opendaylight:inventory?revision=2013-08-19)nodes/node/node"
+            + "[{(urn:opendaylight:inventory?revision=2013-08-19)id=openflow:",
+        ],
+    },
     # oxygen
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1135",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "OptimisticLockFailedException: Optimistic lock failed."
-         "Conflicting modification for path /(urn:opendaylight:inventory?revision=2013-08-19)nodes/node/node" +
-         "[{(urn:opendaylight:inventory?revision=2013-08-19)id=openflow:",
-         "table/table[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=47}]/flow/flow" +
-         "[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=SNAT.", ".47."
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1136",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "Node was deleted by other transaction",
-         "Optimistic lock failed for path /(urn:opendaylight:netvirt:elan?revision=2015-06-02)elan-" +
-         "forwarding-tables/mac-table/mac-table[{(urn:opendaylight:netvirt:elan?revision=2015-06-02)" +
-         "elan-instance-name=",
-     ]},
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1135",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "OptimisticLockFailedException: Optimistic lock failed."
+            "Conflicting modification for path /(urn:opendaylight:inventory?revision=2013-08-19)nodes/node/node"
+            + "[{(urn:opendaylight:inventory?revision=2013-08-19)id=openflow:",
+            "table/table[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=47}]/flow/flow"
+            + "[{(urn:opendaylight:flow:inventory?revision=2013-08-19)id=SNAT.",
+            ".47.",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1136",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "Node was deleted by other transaction",
+            "Optimistic lock failed for path /(urn:opendaylight:netvirt:elan?revision=2015-06-02)elan-"
+            + "forwarding-tables/mac-table/mac-table[{(urn:opendaylight:netvirt:elan?revision=2015-06-02)"
+            + "elan-instance-name=",
+        ],
+    },
     # oxygen version of NETVIRT-1136
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1136",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "Node was deleted by other transaction",
-         "OptimisticLockFailedException: Optimistic lock failed.",
-         "Conflicting modification for path /(urn:opendaylight:netvirt:elan?revision=2015-06-02)elan-" +
-         "forwarding-tables/mac-table/mac-table[{(urn:opendaylight:netvirt:elan?revision=2015-06-02)" +
-         "elan-instance-name="
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1260",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "Optimistic lock failed for path /(urn:ietf:params:xml:ns:yang:ietf-interfaces?revision=2014-05-08)" +
-         "interfaces/interface/interface[{(urn:ietf:params:xml:ns:yang:ietf-interfaces?revision=2014-05-08)name=",
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1270",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "OptimisticLockFailedException",
-         "/(urn:opendaylight:netvirt:l3vpn?revision=2013-09-11)" +
-         "vpn-instance-op-data/vpn-instance-op-data-entry/vpn-instance-op-data-entry" +
-         "[{(urn:opendaylight:netvirt:l3vpn?revision=2013-09-11)vrf-id=",
-         "vrf-id=", "/vpn-to-dpn-list/vpn-to-dpn-list", "dpnId="
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1270",
-     "id": "ExecutionException",
-     "context": [
-         "OptimisticLockFailedException: Optimistic lock failed",
-         "removeOrUpdateVpnToDpnList: Error removing from dpnToVpnList for vpn "
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1270",
-     "id": "OptimisticLockFailedException",
-     "context": [
-         "OptimisticLockFailedException",
-         "VpnInterfaceOpListener",
-         "Direct Exception (not failed Future) when executing job, won't even retry: JobEntry{key='VPNINTERFACE-",
-         "vpn-instance-op-data/vpn-instance-op-data-entry/vpn-instance-op-data-entry" +
-         "[{(urn:opendaylight:netvirt:l3vpn?revision=2013-09-11)vrf-id=",
-         "vrf-id=", "/vpn-to-dpn-list/vpn-to-dpn-list", "dpnId="
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1281",
-     "id": "OptimisticLockFailedException",
-     "context": [
-         "OptimisticLockFailedException: Optimistic lock failed.",
-         "ConflictingModificationAppliedException: Node children was modified by other transaction",
-         "Direct Exception (not failed Future) when executing job, won't even retry: JobEntry{key='VPNINTERFACE-"
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1304",
-     "id": "ModifiedNodeDoesNotExistException",
-     "context": [
-         "ModifiedNodeDoesNotExistException",
-         "/(urn:opendaylight:netvirt:fibmanager?revision=2015-03-30)fibEntries/" +
-         "vrfTables/vrfTables[{(urn:opendaylight:netvirt:fibmanager?revision=2015-03-30)routeDistinguisher="
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1304",
-     "id": "TransactionCommitFailedException",
-     "context": [
-         "TransactionCommitFailedException",
-         "/(urn:opendaylight:netvirt:fibmanager?revision=2015-03-30)fibEntries/" +
-         "vrfTables/vrfTables[{(urn:opendaylight:netvirt:fibmanager?revision=2015-03-30)routeDistinguisher="
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1427",
-     "id": "ModifiedNodeDoesNotExistException",
-     "context": [
-         "/(urn:huawei:params:xml:ns:yang:l3vpn?revision=2014-08-15)vpn-interfaces/vpn-interface/vpn-interface" +
-         "[{(urn:huawei:params:xml:ns:yang:l3vpn?revision=2014-08-15)name=",
-         "AugmentationIdentifier{childNames=[(urn:opendaylight:netvirt:l3vpn?revision=2013-09-11)adjacency]}"
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1428",
-     "id": "ModifiedNodeDoesNotExistException",
-     "context": [
-         "/(urn:huawei:params:xml:ns:yang:l3vpn?revision=2014-08-15)vpn-interfaces/vpn-interface/vpn-interface" +
-         "[{(urn:huawei:params:xml:ns:yang:l3vpn?revision=2014-08-15)name=",
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NEUTRON-157",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "Optimistic lock failed for path /(urn:opendaylight:neutron?revision=2015-07-12)" +
-         "neutron/networks/network/network[{(urn:opendaylight:neutron?revision=2015-07-12)uuid=",
-         "Conflicting modification for path /(urn:opendaylight:neutron?revision=2015-07-12)" +
-         "neutron/networks/network/network[{(urn:opendaylight:neutron?revision=2015-07-12)uuid="
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NEUTRON-157",
-     "id": "OptimisticLockFailedException",
-     "context": [
-         "Got OptimisticLockFailedException",
-         "AbstractTranscriberInterface"
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NEUTRON-157",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "Optimistic lock failed for path /(urn:opendaylight:neutron?revision=2015-07-12)neutron"
-     ]},
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1136",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "Node was deleted by other transaction",
+            "OptimisticLockFailedException: Optimistic lock failed.",
+            "Conflicting modification for path /(urn:opendaylight:netvirt:elan?revision=2015-06-02)elan-"
+            + "forwarding-tables/mac-table/mac-table[{(urn:opendaylight:netvirt:elan?revision=2015-06-02)"
+            + "elan-instance-name=",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1260",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "Optimistic lock failed for path /(urn:ietf:params:xml:ns:yang:ietf-interfaces?revision=2014-05-08)"
+            + "interfaces/interface/interface[{(urn:ietf:params:xml:ns:yang:ietf-interfaces?revision=2014-05-08)name="
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1270",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "OptimisticLockFailedException",
+            "/(urn:opendaylight:netvirt:l3vpn?revision=2013-09-11)"
+            + "vpn-instance-op-data/vpn-instance-op-data-entry/vpn-instance-op-data-entry"
+            + "[{(urn:opendaylight:netvirt:l3vpn?revision=2013-09-11)vrf-id=",
+            "vrf-id=",
+            "/vpn-to-dpn-list/vpn-to-dpn-list",
+            "dpnId=",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1270",
+        "id": "ExecutionException",
+        "context": [
+            "OptimisticLockFailedException: Optimistic lock failed",
+            "removeOrUpdateVpnToDpnList: Error removing from dpnToVpnList for vpn ",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1270",
+        "id": "OptimisticLockFailedException",
+        "context": [
+            "OptimisticLockFailedException",
+            "VpnInterfaceOpListener",
+            "Direct Exception (not failed Future) when executing job, won't even retry: JobEntry{key='VPNINTERFACE-",
+            "vpn-instance-op-data/vpn-instance-op-data-entry/vpn-instance-op-data-entry"
+            + "[{(urn:opendaylight:netvirt:l3vpn?revision=2013-09-11)vrf-id=",
+            "vrf-id=",
+            "/vpn-to-dpn-list/vpn-to-dpn-list",
+            "dpnId=",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1281",
+        "id": "OptimisticLockFailedException",
+        "context": [
+            "OptimisticLockFailedException: Optimistic lock failed.",
+            "ConflictingModificationAppliedException: Node children was modified by other transaction",
+            "Direct Exception (not failed Future) when executing job, won't even retry: JobEntry{key='VPNINTERFACE-",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1304",
+        "id": "ModifiedNodeDoesNotExistException",
+        "context": [
+            "ModifiedNodeDoesNotExistException",
+            "/(urn:opendaylight:netvirt:fibmanager?revision=2015-03-30)fibEntries/"
+            + "vrfTables/vrfTables[{(urn:opendaylight:netvirt:fibmanager?revision=2015-03-30)routeDistinguisher=",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1304",
+        "id": "TransactionCommitFailedException",
+        "context": [
+            "TransactionCommitFailedException",
+            "/(urn:opendaylight:netvirt:fibmanager?revision=2015-03-30)fibEntries/"
+            + "vrfTables/vrfTables[{(urn:opendaylight:netvirt:fibmanager?revision=2015-03-30)routeDistinguisher=",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1427",
+        "id": "ModifiedNodeDoesNotExistException",
+        "context": [
+            "/(urn:huawei:params:xml:ns:yang:l3vpn?revision=2014-08-15)vpn-interfaces/vpn-interface/vpn-interface"
+            + "[{(urn:huawei:params:xml:ns:yang:l3vpn?revision=2014-08-15)name=",
+            "AugmentationIdentifier{childNames=[(urn:opendaylight:netvirt:l3vpn?revision=2013-09-11)adjacency]}",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1428",
+        "id": "ModifiedNodeDoesNotExistException",
+        "context": [
+            "/(urn:huawei:params:xml:ns:yang:l3vpn?revision=2014-08-15)vpn-interfaces/vpn-interface/vpn-interface"
+            + "[{(urn:huawei:params:xml:ns:yang:l3vpn?revision=2014-08-15)name="
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NEUTRON-157",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "Optimistic lock failed for path /(urn:opendaylight:neutron?revision=2015-07-12)"
+            + "neutron/networks/network/network[{(urn:opendaylight:neutron?revision=2015-07-12)uuid=",
+            "Conflicting modification for path /(urn:opendaylight:neutron?revision=2015-07-12)"
+            + "neutron/networks/network/network[{(urn:opendaylight:neutron?revision=2015-07-12)uuid=",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NEUTRON-157",
+        "id": "OptimisticLockFailedException",
+        "context": [
+            "Got OptimisticLockFailedException",
+            "AbstractTranscriberInterface",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NEUTRON-157",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "Optimistic lock failed for path /(urn:opendaylight:neutron?revision=2015-07-12)neutron"
+        ],
+    },
     # oxygen
-    {"issue": "https://jira.opendaylight.org/browse/NEUTRON-157",
-     "id": "ConflictingModificationAppliedException",
-     "context": [
-         "OptimisticLockFailedException: Optimistic lock failed.",
-         "Conflicting modification for path /(urn:opendaylight:neutron?revision=2015-07-12)" +
-         "neutron/networks/network/network[{(urn:opendaylight:neutron?revision=2015-07-12)uuid=",
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/OPNFLWPLUG-917",
-     "id": "IllegalStateException",
-     "context": [
-         "java.lang.IllegalStateException: Deserializer for key: msgVersion: 4 objectClass: " +
-         "org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.entries.grouping.MatchEntry " +
-         "msgType: 1 oxm_field: 33 experimenterID: null was not found " +
-         "- please verify that all needed deserializers ale loaded correctly"
-     ]},
-    {"issue": "https://jira.opendaylight.org/browse/NETVIRT-1640",
-     "id": "ElasticsearchAppender",
-     "context": [
-         "Can't append into Elasticsearch",
-         "org.apache.karaf.decanter.appender.elasticsearch - 1.0.0"
-     ]}
+    {
+        "issue": "https://jira.opendaylight.org/browse/NEUTRON-157",
+        "id": "ConflictingModificationAppliedException",
+        "context": [
+            "OptimisticLockFailedException: Optimistic lock failed.",
+            "Conflicting modification for path /(urn:opendaylight:neutron?revision=2015-07-12)"
+            + "neutron/networks/network/network[{(urn:opendaylight:neutron?revision=2015-07-12)uuid=",
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/OPNFLWPLUG-917",
+        "id": "IllegalStateException",
+        "context": [
+            "java.lang.IllegalStateException: Deserializer for key: msgVersion: 4 objectClass: "
+            + "org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.oxm.rev150225.match.entries.grouping.MatchEntry "
+            + "msgType: 1 oxm_field: 33 experimenterID: null was not found "
+            + "- please verify that all needed deserializers ale loaded correctly"
+        ],
+    },
+    {
+        "issue": "https://jira.opendaylight.org/browse/NETVIRT-1640",
+        "id": "ElasticsearchAppender",
+        "context": [
+            "Can't append into Elasticsearch",
+            "org.apache.karaf.decanter.appender.elasticsearch - 1.0.0",
+        ],
+    },
 ]
 
 _re_ts = re.compile(r"^[0-9]{4}(-[0-9]{2}){2}T([0-9]{2}:){2}[0-9]{2},[0-9]{3}")
-_re_ts_we = re.compile(r"^[0-9]{4}(-[0-9]{2}){2}T([0-9]{2}:){2}[0-9]{2},[0-9]{3}( \| ERROR \| | \| WARN  \| )")
+_re_ts_we = re.compile(
+    r"^[0-9]{4}(-[0-9]{2}){2}T([0-9]{2}:){2}[0-9]{2},[0-9]{3}( \| ERROR \| | \| WARN  \| )"
+)
 _re_ex = re.compile(r"(?i)exception")
 _ex_map = collections.OrderedDict()
 _ts_list = []
@@ -219,7 +270,7 @@ def get_exceptions(lines):
         if ex:
             index = len(_ts_list) - 1
             if index not in _ex_map:
-                _ex_map[index] = {"warnerr_list": list(warnerr_deq), 'lines': cur_list}
+                _ex_map[index] = {"warnerr_list": list(warnerr_deq), "lines": cur_list}
                 warnerr_deq.clear()  # reset the deque to only track new ERROR and WARN lines
 
     return _ex_map
index 62b9ef0d33ceb7cf228f1e69ab2c54ba74452b37..dfa20491575e1dc13f5d72e233b0dc896b3eb1ea 100644 (file)
@@ -7,6 +7,7 @@
 
 import collections as _collections
 import jmespath
+
 try:
     import simplejson as _json
 except ImportError:  # Python2.7 calls it json.
@@ -62,7 +63,9 @@ class _Hsfod(_collections.OrderedDict):
         sup = super(_Hsfod, self)  # possibly something else than OrderedDict
         sup.__init__(items_sorted)
         # Repr string is used for sorting, keys are more important than values.
-        self.__repr = '{' + repr(list(self.keys())) + ':' + repr(list(self.values())) + '}'
+        self.__repr = (
+            "{" + repr(list(self.keys())) + ":" + repr(list(self.values())) + "}"
+        )
         self.__hash = hash(self.__repr)
 
     def __repr__(self):
@@ -113,8 +116,8 @@ def dumps_indented(obj, indent=1):
     Also, allows to use something different from RequestsLibrary.To_Json
 
     """
-    pretty_json = _json.dumps(obj, separators=(',', ': '), indent=indent)
-    return pretty_json + '\n'  # to avoid diff "no newline" warning line
+    pretty_json = _json.dumps(obj, separators=(",", ": "), indent=indent)
+    return pretty_json + "\n"  # to avoid diff "no newline" warning line
 
 
 def sort_bits(obj, keys_with_bits=[]):
@@ -167,7 +170,9 @@ def hide_volatile(obj, keys_with_volatiles=[]):
             # Unicode is not str and vice versa, isinstance has to check for both.
             # Luckily, "in" recognizes equivalent strings in different encodings.
             # Type "bytes" is added for Python 3 compatibility.
-            if key in keys_with_volatiles and isinstance(value, (str, bytes, int, bool)):
+            if key in keys_with_volatiles and isinstance(
+                value, (str, bytes, int, bool)
+            ):
                 obj[key] = "*"
             else:
                 hide_volatile(value, keys_with_volatiles)
@@ -178,7 +183,14 @@ def hide_volatile(obj, keys_with_volatiles=[]):
     return obj
 
 
-def normalize_json_text(text, strict=False, indent=1, keys_with_bits=[], keys_with_volatiles=[], jmes_path=None):
+def normalize_json_text(
+    text,
+    strict=False,
+    indent=1,
+    keys_with_bits=[],
+    keys_with_volatiles=[],
+    jmes_path=None,
+):
     """
     Attempt to return sorted indented JSON string.
 
@@ -208,7 +220,7 @@ def normalize_json_text(text, strict=False, indent=1, keys_with_bits=[], keys_wi
         if strict:
             raise err
         else:
-            return str(err) + '\n' + text
+            return str(err) + "\n" + text
     if keys_with_bits:
         sort_bits(object_decoded, keys_with_bits)
     if keys_with_volatiles:
index b0725210888f6d88d1dec40f3a898e3f5a70ed36..c876fb425808aca233c4ea71e2aa4a4bf051417d 100644 (file)
@@ -9,55 +9,52 @@ def generate():
     BODY = {}
 
     ts = time.time()
-    formatted_ts = \
-        datetime.fromtimestamp(ts).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
-    BODY['@timestamp'] = formatted_ts
+    formatted_ts = datetime.fromtimestamp(ts).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+    BODY["@timestamp"] = formatted_ts
 
     # Plots are obtained from csv files ( in archives directory in $WORKSPACE).
 
-    csv_files = glob.glob('archives/*.csv')
-    BODY['project'] = 'opendaylight'
-    BODY['subject'] = 'test'
+    csv_files = glob.glob("archives/*.csv")
+    BODY["project"] = "opendaylight"
+    BODY["subject"] = "test"
 
     # If there are no csv files, then it is a functional test.
     # Parse csv files and fill perfomance parameter values
 
     if len(csv_files) == 0:
-        BODY['test-type'] = 'functional'
+        BODY["test-type"] = "functional"
     else:
-        BODY['test-type'] = 'performance'
-        BODY['plots'] = {}
+        BODY["test-type"] = "performance"
+        BODY["plots"] = {}
         for f in csv_files:
-            key = (f.split('/')[-1])[:-4]
-            BODY['plots'][key] = {}
+            key = (f.split("/")[-1])[:-4]
+            BODY["plots"][key] = {}
             with open(f) as file:
                 lines = file.readlines()
-            props = lines[0].strip().split(',')
-            vals = lines[1].strip().split(',')
+            props = lines[0].strip().split(",")
+            vals = lines[1].strip().split(",")
             for i in range(len(props)):
-                BODY['plots'][key][props[i]] = float(vals[i])
+                BODY["plots"][key][props[i]] = float(vals[i])
 
     # Fill the required parameters whose values are obtained from environment.
 
-    BODY['jenkins-silo'] = os.environ['SILO']
-    BODY['test-name'] = os.environ['JOB_NAME']
-    BODY['test-run'] = int(os.environ['BUILD_NUMBER'])
+    BODY["jenkins-silo"] = os.environ["SILO"]
+    BODY["test-name"] = os.environ["JOB_NAME"]
+    BODY["test-run"] = int(os.environ["BUILD_NUMBER"])
 
     # Parsing robot log for stats on start-time, pass/fail tests and duration.
 
-    robot_log = os.environ['WORKSPACE'] + '/output.xml'
+    robot_log = os.environ["WORKSPACE"] + "/output.xml"
     tree = ET.parse(robot_log)
-    BODY['id'] = '{}-{}'.format(os.environ['JOB_NAME'],
-                                os.environ['BUILD_NUMBER'])
-    BODY['start-time'] = tree.getroot().attrib['generated']
-    BODY['pass-tests'] = int(tree.getroot().find('statistics')
-                             [0][1].get('pass'))
-    BODY['fail-tests'] = int(tree.getroot().find('statistics')
-                             [0][1].get('fail'))
-    endtime = tree.getroot().find('suite').find('status').get('endtime')
-    starttime = tree.getroot().find('suite').find('status').get('starttime')
-    elap_time = datetime.strptime(endtime, '%Y%m%d %H:%M:%S.%f') \
-        - datetime.strptime(starttime, '%Y%m%d %H:%M:%S.%f')
-    BODY['duration'] = str(elap_time)
+    BODY["id"] = "{}-{}".format(os.environ["JOB_NAME"], os.environ["BUILD_NUMBER"])
+    BODY["start-time"] = tree.getroot().attrib["generated"]
+    BODY["pass-tests"] = int(tree.getroot().find("statistics")[0][1].get("pass"))
+    BODY["fail-tests"] = int(tree.getroot().find("statistics")[0][1].get("fail"))
+    endtime = tree.getroot().find("suite").find("status").get("endtime")
+    starttime = tree.getroot().find("suite").find("status").get("starttime")
+    elap_time = datetime.strptime(endtime, "%Y%m%d %H:%M:%S.%f") - datetime.strptime(
+        starttime, "%Y%m%d %H:%M:%S.%f"
+    )
+    BODY["duration"] = str(elap_time)
 
     return BODY
index b94ebba774491ede24950596228c6444567f118d..bc57626236e474835263849c234c217dfd37a448 100644 (file)
@@ -22,17 +22,11 @@ def p(x):
 class panelsJSON:
     def __init__(self):
         self.content = {
-            'gridData': {
-                'h': None,
-                'i': None,
-                'w': None,
-                'x': None,
-                'y': None
-            },
-            'id': None,
-            'panelIndex': None,
-            'type': 'visualization',
-            'version': '6.2.4'
+            "gridData": {"h": None, "i": None, "w": None, "x": None, "y": None},
+            "id": None,
+            "panelIndex": None,
+            "type": "visualization",
+            "version": "6.2.4",
         }
 
         self.counter = 0
@@ -40,25 +34,25 @@ class panelsJSON:
     def create(self, co_ords, id):
         self.counter += 1
         temp = copy.deepcopy(self.content)
-        temp['gridData']['h'] = co_ords['h']
-        temp['gridData']['i'] = str(self.counter)
-        temp['gridData']['w'] = co_ords['w']
-        temp['gridData']['x'] = co_ords['x']
-        temp['gridData']['y'] = co_ords['y']
+        temp["gridData"]["h"] = co_ords["h"]
+        temp["gridData"]["i"] = str(self.counter)
+        temp["gridData"]["w"] = co_ords["w"]
+        temp["gridData"]["x"] = co_ords["x"]
+        temp["gridData"]["y"] = co_ords["y"]
 
-        temp['id'] = id
-        temp['panelIndex'] = str(self.counter)
+        temp["id"] = id
+        temp["panelIndex"] = str(self.counter)
 
         return temp
 
 
 def generate(viz_config):
     dash = panelsJSON()
-    viz = [dash.create(i['co_ords'], i['id']) for _, i in viz_config.items()]
+    viz = [dash.create(i["co_ords"], i["id"]) for _, i in viz_config.items()]
     return viz
 
 
-if __name__ == '__main__':
-    with open('dashboard.yaml', 'r') as f:
+if __name__ == "__main__":
+    with open("dashboard.yaml", "r") as f:
         config = yaml.safe_load(f)
-        p(generate(config['dashboard']['viz']))
+        p(generate(config["dashboard"]["viz"]))
index c8a78d82a9ceabe2d313e6e9af4c8bb29883c929..8a709597821f68a7468b5a550b5b93ace46cfbad 100644 (file)
@@ -1,18 +1,16 @@
 from copy import deepcopy as dc
 
 # Template for search source format
-SEARCH_SOURCE_FORMAT = {"index": None, "filter": [],
-                        "query": {"language": "lucene", "query": ""}}
+SEARCH_SOURCE_FORMAT = {
+    "index": None,
+    "filter": [],
+    "query": {"language": "lucene", "query": ""},
+}
 
 # Template for filter format
 FILTER_FORMAT = {
     "query": {
-        "match": {
-            "placeholder_field": {
-                "query": "query_phrase",
-                "type": "phrase"
-            }
-        }
+        "match": {"placeholder_field": {"query": "query_phrase", "type": "phrase"}}
     }
 }
 
@@ -30,39 +28,37 @@ def generate(dash_config, viz_config, index_pattern):
     #           match-with: pattern
 
     try:
-        filters = dash_config['filter']
+        filters = dash_config["filter"]
         for _, value in filters.items():
             try:
                 temp = dc(FILTER_FORMAT)
-                temp['query']['match'][value['field']
-                                       ] = \
-                    temp['query']['match']['placeholder_field']
-                temp['query']['match'][value['field']
-                                       ]['query'] = value['match-with']
-                del temp['query']['match']['placeholder_field']
-                search_source['filter'].append(temp)
+                temp["query"]["match"][value["field"]] = temp["query"]["match"][
+                    "placeholder_field"
+                ]
+                temp["query"]["match"][value["field"]]["query"] = value["match-with"]
+                del temp["query"]["match"]["placeholder_field"]
+                search_source["filter"].append(temp)
             except KeyError:
                 continue
     except KeyError:
         pass
 
     try:
-        filters = viz_config['filter']
+        filters = viz_config["filter"]
         for _, value in filters.items():
             try:
                 temp = dc(FILTER_FORMAT)
-                temp['query']['match'][value['field']
-                                       ] = \
-                    temp['query']['match']['placeholder_field']
-                temp['query']['match'][value['field']
-                                       ]['query'] = value['match-with']
-                del temp['query']['match']['placeholder_field']
-                search_source['filter'].append(temp)
+                temp["query"]["match"][value["field"]] = temp["query"]["match"][
+                    "placeholder_field"
+                ]
+                temp["query"]["match"][value["field"]]["query"] = value["match-with"]
+                del temp["query"]["match"]["placeholder_field"]
+                search_source["filter"].append(temp)
             except KeyError:
                 continue
     except KeyError:
         pass
 
-    search_source['index'] = index_pattern
+    search_source["index"] = index_pattern
 
     return search_source
index e99da6924890a3c7f9565ad321a235a3125bdc21..bffa0305f8a88ec19fc99f8312d832ed1caa07e7 100644 (file)
@@ -1,9 +1,5 @@
 # Template for UIState (Currently supports only colors)
-UI_STATE_BODY = {
-    "vis": {
-        "colors": None
-    }
-}
+UI_STATE_BODY = {"vis": {"colors": None}}
 
 
 def generate(dash_config, viz_config):
@@ -17,45 +13,45 @@ def generate(dash_config, viz_config):
     # and avoids duplication
 
     try:
-        series = dash_config['y-axis']['series']
+        series = dash_config["y-axis"]["series"]
         for _, value in series.items():
             try:
-                colors[value['label']] = value['color']
+                colors[value["label"]] = value["color"]
             except KeyError:
                 continue
     except KeyError:
         pass
 
     try:
-        series = viz_config['series']
+        series = viz_config["series"]
         for _, value in series.items():
             try:
-                colors[value['label']] = value['color']
+                colors[value["label"]] = value["color"]
             except KeyError:
                 continue
     except KeyError:
         pass
 
     try:
-        seriesParams = dash_config['y-axis']['seriesParams']
+        seriesParams = dash_config["y-axis"]["seriesParams"]
         for _, value in seriesParams.items():
             try:
-                colors[value['label']] = value['color']
+                colors[value["label"]] = value["color"]
             except KeyError:
                 continue
     except KeyError:
         pass
 
     try:
-        seriesParams = viz_config['seriesParams']
+        seriesParams = viz_config["seriesParams"]
         for _, value in seriesParams.items():
             try:
-                colors[value['label']] = value['color']
+                colors[value["label"]] = value["color"]
             except KeyError:
                 continue
     except KeyError:
         pass
 
-    UI_STATE_BODY['vis']['colors'] = colors
+    UI_STATE_BODY["vis"]["colors"] = colors
 
     return UI_STATE_BODY
index b3327881b5c2f75453969f6b9ea2dfb33c631b85..f8d1cd269f66e13892f508d7e2cb9e18c3136e42 100644 (file)
@@ -23,60 +23,59 @@ class visState:
     # viState template
     def __init__(self):
         self.content = {
-            'title': None,
-            'type': None,
-            'params': {
-                'type': None,
-                'grid': {
-                    'categoryLines': False,
-                    'style': {
-                        'color': '#eee'
-                    }
-                },
-                'categoryAxes': None,
-                'valueAxes': None,
-                'seriesParams': None,
-                'addTooltip': True,
-                'addLegend': True,
-                'legendPosition': 'right',
-                'times': [],
-                'addTimeMarker': False
+            "title": None,
+            "type": None,
+            "params": {
+                "type": None,
+                "grid": {"categoryLines": False, "style": {"color": "#eee"}},
+                "categoryAxes": None,
+                "valueAxes": None,
+                "seriesParams": None,
+                "addTooltip": True,
+                "addLegend": True,
+                "legendPosition": "right",
+                "times": [],
+                "addTimeMarker": False,
             },
-            'aggs': None
+            "aggs": None,
         }
 
     def create(self, config):
         temp = self.content
-        temp['title'] = config['title']
-        temp['type'] = temp['params']['type'] = config['type']
+        temp["title"] = config["title"]
+        temp["type"] = temp["params"]["type"] = config["type"]
 
         cat = categoryAxes()
-        temp['params']['categoryAxes'] = [dc(
-            cat.create()) for i in range(config['num_cat_axes'])]
+        temp["params"]["categoryAxes"] = [
+            dc(cat.create()) for i in range(config["num_cat_axes"])
+        ]
 
         val = ValueAxes()
-        temp['params']['valueAxes'] = [dc(val.create(position=i['position'],
-                                                     title=i['title']))
-                                       for _, i in
-                                       config['value_axes'].items()]
+        temp["params"]["valueAxes"] = [
+            dc(val.create(position=i["position"], title=i["title"]))
+            for _, i in config["value_axes"].items()
+        ]
 
         agg = aggs()
 
-        temp['aggs'] = \
-            [dc(agg.create(id=i,
-                           field=config['aggs'][i]['field'],
-                           custom_label=config['aggs'][i]['custom_label'],
-                           schema=config['aggs'][i]['schema']))
-             for i in range(1, len(config['aggs']) + 1)]
-
-        temp['params']['seriesParams'] = [seriesParams(i['data_type'],
-                                                       i['mode'],
-                                                       i['label'],
-                                                       i['agg_id'],
-                                                       i['value_axis'])
-                                          .create()
-                                          for _, i in
-                                          config['seriesParams'].items()]
+        temp["aggs"] = [
+            dc(
+                agg.create(
+                    id=i,
+                    field=config["aggs"][i]["field"],
+                    custom_label=config["aggs"][i]["custom_label"],
+                    schema=config["aggs"][i]["schema"],
+                )
+            )
+            for i in range(1, len(config["aggs"]) + 1)
+        ]
+
+        temp["params"]["seriesParams"] = [
+            seriesParams(
+                i["data_type"], i["mode"], i["label"], i["agg_id"], i["value_axis"]
+            ).create()
+            for _, i in config["seriesParams"].items()
+        ]
 
         return temp
 
@@ -84,19 +83,14 @@ class visState:
 class categoryAxes:
     def __init__(self):
         self.content = {
-            'id': None,
-            'type': 'category',
-            'position': 'bottom',
-            'show': True,
-            'style': {},
-            'scale': {
-                'type': 'linear'
-            },
-            'labels': {
-                'show': True,
-                'truncate': 100
-            },
-            'title': {}
+            "id": None,
+            "type": "category",
+            "position": "bottom",
+            "show": True,
+            "style": {},
+            "scale": {"type": "linear"},
+            "labels": {"show": True, "truncate": 100},
+            "title": {},
         }
         self.counter = 0
 
@@ -104,52 +98,43 @@ class categoryAxes:
     def create(self):
         self.counter += 1
         temp = dc(self.content)
-        temp['id'] = 'CategoryAxis-{}'.format(self.counter)
+        temp["id"] = "CategoryAxis-{}".format(self.counter)
         return temp
 
 
 class ValueAxes:
     def __init__(self):
         self.content = {
-            'id': None,
-            'name': None,
-            'type': 'value',
-            'position': 'left',
-            'show': True,
-            'style': {},
-            'scale': {
-                'type': 'linear',
-                'mode': 'normal'
-            },
-            'labels': {
-                'show': True,
-                'rotate': 0,
-                'filter': False,
-                'truncate': 100
-            },
-            'title': {
-                'text': None
-            }
+            "id": None,
+            "name": None,
+            "type": "value",
+            "position": "left",
+            "show": True,
+            "style": {},
+            "scale": {"type": "linear", "mode": "normal"},
+            "labels": {"show": True, "rotate": 0, "filter": False, "truncate": 100},
+            "title": {"text": None},
         }
         self.counter = 0
 
-    def create(self, position='left', title='Value'):
+    def create(self, position="left", title="Value"):
         self.counter += 1
         temp = dc(self.content)
-        temp['id'] = 'ValueAxis-{}'.format(self.counter)
-        if position == 'left':
-            temp['name'] = 'LeftAxis-{}'.format(self.counter)
-        elif position == 'right':
-            temp['name'] = 'RightAxis-{}'.format(self.counter)
+        temp["id"] = "ValueAxis-{}".format(self.counter)
+        if position == "left":
+            temp["name"] = "LeftAxis-{}".format(self.counter)
+        elif position == "right":
+            temp["name"] = "RightAxis-{}".format(self.counter)
         else:
             # raise ValueError('Not one of left or right')
             # assuming default
-            temp['name'] = 'LeftAxis-{}'.format(self.counter)
+            temp["name"] = "LeftAxis-{}".format(self.counter)
 
-        temp['title']['text'] = title
+        temp["title"]["text"] = title
 
         return temp
 
+
 # 'seriesParams' are the ones that actually show up in the plots.
 # They point to a data source a.k.a 'aggs' (short for aggregation)
 # to get their data.
@@ -158,21 +143,22 @@ class ValueAxes:
 class seriesParams:
     def __init__(self, data_type, mode, label, agg_id, value_axis):
         self.content = {
-            'show': True,
-            'type': data_type,
-            'mode': mode,
-            'data': {
-                'label': label,
-                'id': str(agg_id)  # the id of the aggregation they point to
+            "show": True,
+            "type": data_type,
+            "mode": mode,
+            "data": {
+                "label": label,
+                "id": str(agg_id),  # the id of the aggregation they point to
             },
-            'valueAxis': 'ValueAxis-{}'.format(value_axis),
-            'drawLinesBetweenPoints': True,
-            'showCircles': True
+            "valueAxis": "ValueAxis-{}".format(value_axis),
+            "drawLinesBetweenPoints": True,
+            "showCircles": True,
         }
 
     def create(self):
         return self.content
 
+
 # 'aggs' or aggregation refers to collection of values. They are the data
 # source which are used by seriesParams. and as expected they take 'field'
 # as the nested name of the key.
@@ -192,31 +178,28 @@ class seriesParams:
 class aggs:
     def __init__(self):
         self.content = {
-            'id': None,
-            'enabled': True,
-            'type': None,
-            'schema': None,
-            'params': {
-                'field': None,
-                'customLabel': None
-            }
+            "id": None,
+            "enabled": True,
+            "type": None,
+            "schema": None,
+            "params": {"field": None, "customLabel": None},
         }
         self.counter = 0
 
     def create(self, id, field, custom_label, schema):
         temp = dc(self.content)
-        temp['id'] = id
-        temp['params']['field'] = field
-        temp['params']['customLabel'] = custom_label
-        temp['schema'] = schema
-        if schema == 'metric':
-            temp['type'] = 'max'
+        temp["id"] = id
+        temp["params"]["field"] = field
+        temp["params"]["customLabel"] = custom_label
+        temp["schema"] = schema
+        if schema == "metric":
+            temp["type"] = "max"
             return temp
-        elif schema == 'segment':
-            temp['type'] = 'terms'
-            temp['params']['size'] = 20  # default
-            temp['params']['order'] = 'asc'
-            temp['params']['orderBy'] = '_term'
+        elif schema == "segment":
+            temp["type"] = "terms"
+            temp["params"]["size"] = 20  # default
+            temp["params"]["order"] = "asc"
+            temp["params"]["orderBy"] = "_term"
         return temp
 
 
@@ -230,6 +213,7 @@ class aggs:
 # be sent to Kibana. Hence, any error occuring in the visualizaton side
 # must first be checked by looking at the intermediate format.
 
+
 def generate(dash_config, viz_config):
 
     format = {
@@ -241,15 +225,10 @@ def generate(dash_config, viz_config):
         "id": None,
         "aggs": {},
         "title": None,
-        "num_cat_axes": None
+        "num_cat_axes": None,
     }
 
-    value_axes_format = {
-        "index": {
-            "position": None,
-            "title": None
-        }
-    }
+    value_axes_format = {"index": {"position": None, "title": None}}
 
     seriesParams_format = {
         "index": {
@@ -257,22 +236,22 @@ def generate(dash_config, viz_config):
             "data_type": None,
             "mode": None,
             "label": None,
-            "agg_id": None
+            "agg_id": None,
         }
     }
 
-    aggs_format = {
-        "index": {
-            "custom_label": None,
-            "field": None,
-            "schema": None
-        }
-    }
+    aggs_format = {"index": {"custom_label": None, "field": None, "schema": None}}
 
     # all general description must be present in either of the config files
     for config in [viz_config, dash_config]:
-        general_fields = ['type', 'index_pattern',
-                          'num_cat_axes', 'title', 'desc', 'id']
+        general_fields = [
+            "type",
+            "index_pattern",
+            "num_cat_axes",
+            "title",
+            "desc",
+            "id",
+        ]
         for i in general_fields:
             try:
                 format[i] = config[i]
@@ -280,60 +259,61 @@ def generate(dash_config, viz_config):
                 pass
 
     # setting any default values if available
-    mappings = {'value_axes': value_axes_format,
-                'seriesParams': seriesParams_format, 'aggs': aggs_format}
+    mappings = {
+        "value_axes": value_axes_format,
+        "seriesParams": seriesParams_format,
+        "aggs": aggs_format,
+    }
     for index, container in mappings.items():
         try:
-            default_values = viz_config[index]['default']
+            default_values = viz_config[index]["default"]
             for i in default_values:
-                container['index'][i] = default_values[i]
+                container["index"][i] = default_values[i]
         except Exception:
             pass
 
     ####################################################################
     # Extract 'value_axes', 'seriesParams' or 'aggs' if present in viz_config
     value_axes_counter = 1
-    for m in viz_config['value_axes']:
+    for m in viz_config["value_axes"]:
         if m != "default":
             temp = dc(value_axes_format)
-            temp[str(value_axes_counter)] = temp['index']
-            for i in ['position', 'title']:
+            temp[str(value_axes_counter)] = temp["index"]
+            for i in ["position", "title"]:
                 try:
-                    temp[str(value_axes_counter)
-                         ][i] = viz_config['value_axes'][m][i]
+                    temp[str(value_axes_counter)][i] = viz_config["value_axes"][m][i]
                 except KeyError:
                     pass
-            format['value_axes'].update(temp)
+            format["value_axes"].update(temp)
             value_axes_counter += 1
 
-    seriesParams_fields = ['value_axis',
-                           'data_type', 'mode', 'label', 'agg_id']
+    seriesParams_fields = ["value_axis", "data_type", "mode", "label", "agg_id"]
     try:
-        for m in viz_config['seriesParams']:
-            if m != 'default':
+        for m in viz_config["seriesParams"]:
+            if m != "default":
                 temp = dc(seriesParams_format)
-                temp[m] = temp['index']
+                temp[m] = temp["index"]
                 for i in seriesParams_fields:
                     try:
-                        temp[m][i] = viz_config['seriesParams'][m][i]
+                        temp[m][i] = viz_config["seriesParams"][m][i]
                     except KeyError:
                         pass
-                format['seriesParams'].update(temp)
+                format["seriesParams"].update(temp)
     except KeyError:
         pass
 
     agg_counter = 1
     try:
-        for m in viz_config['aggs']:
-            if m != 'default':
+        for m in viz_config["aggs"]:
+            if m != "default":
                 temp = dc(aggs_format)
-                temp[m] = temp['index']
-                for i in ['field', 'custom_label', 'schema']:
+                temp[m] = temp["index"]
+                for i in ["field", "custom_label", "schema"]:
                     try:
-                        temp[m][i] = viz_config['aggs'][m][i]
+                        temp[m][i] = viz_config["aggs"][m][i]
                     except KeyError:
                         pass
-                format['aggs'].update(temp)
+                format["aggs"].update(temp)
     except KeyError:
         pass
     ####################################################################
@@ -341,14 +321,14 @@ def generate(dash_config, viz_config):
     # collect 'series' from both the configs
     configs = []
     try:
-        viz_config['series']
+        viz_config["series"]
         configs.append(viz_config)
     except KeyError:
         pass
 
     try:
-        dash_config['y-axis']['series']
-        configs.append(dash_config['y-axis'])
+        dash_config["y-axis"]["series"]
+        configs.append(dash_config["y-axis"])
     except KeyError:
         pass
 
@@ -357,68 +337,65 @@ def generate(dash_config, viz_config):
     for config in configs:
         try:
             value_axes_counter = 1
-            for key in config['value_axes']:
+            for key in config["value_axes"]:
 
                 value_axes_temp = dc(value_axes_format)
-                value_axes_temp[str(value_axes_counter)
-                                ] = value_axes_temp['index']
+                value_axes_temp[str(value_axes_counter)] = value_axes_temp["index"]
 
-                for index in ['position', 'title']:
+                for index in ["position", "title"]:
                     try:
-                        value_axes_temp[str(
-                            value_axes_counter)][index] = \
-                            config['value_axes'][key][index]
+                        value_axes_temp[str(value_axes_counter)][index] = config[
+                            "value_axes"
+                        ][key][index]
                     except KeyError as e:
                         pass
-                format['value_axes'].update(value_axes_temp)
+                format["value_axes"].update(value_axes_temp)
                 value_axes_counter += 1
 
         except KeyError as e:
             pass
 
         try:
-            for key in config['series']:
+            for key in config["series"]:
                 try:
                     # check if this key is present or not
-                    config['series'][key]['not_in_seriesParams']
+                    config["series"][key]["not_in_seriesParams"]
                 except KeyError:
                     seriesParams_temp = dc(seriesParams_format)
-                    seriesParams_temp[key] = seriesParams_temp['index']
-                    for index in ['value_axis', 'data_type', 'mode', 'label']:
+                    seriesParams_temp[key] = seriesParams_temp["index"]
+                    for index in ["value_axis", "data_type", "mode", "label"]:
                         try:
-                            seriesParams_temp[key][index] = \
-                                config['series'][key][index]
+                            seriesParams_temp[key][index] = config["series"][key][index]
                         except KeyError as e:
                             pass
-                    seriesParams_temp[key]['agg_id'] = key
-                    format['seriesParams'].update(seriesParams_temp)
+                    seriesParams_temp[key]["agg_id"] = key
+                    format["seriesParams"].update(seriesParams_temp)
                 finally:
                     agg_temp = dc(aggs_format)
-                    agg_temp[key] = agg_temp['index']
-                    for index in ['field', 'schema']:
+                    agg_temp[key] = agg_temp["index"]
+                    for index in ["field", "schema"]:
                         try:
-                            agg_temp[key][index] = config['series'][key][index]
+                            agg_temp[key][index] = config["series"][key][index]
                         except KeyError as e:
                             pass
-                    agg_temp[key]['custom_label'] = \
-                        config['series'][key]['label']
-                    format['aggs'].update(agg_temp)
+                    agg_temp[key]["custom_label"] = config["series"][key]["label"]
+                    format["aggs"].update(agg_temp)
         except KeyError as e:
             print("required fields are empty!")
 
     ##########################################################################
 
     # to remove the default template index
-    for i in ['value_axes', 'seriesParams', 'aggs']:
+    for i in ["value_axes", "seriesParams", "aggs"]:
         try:
-            format[i].pop('index')
+            format[i].pop("index")
         except KeyError:
             # print("No default index found")
             pass
 
     missing = config_validator(format)
     if len(missing):
-        raise ValueError('Missing required field values :-', *missing)
+        raise ValueError("Missing required field values :-", *missing)
 
     p(format)
 
@@ -430,13 +407,14 @@ def generate(dash_config, viz_config):
 
     missing = config_validator(generated_visState)
     if len(missing):
-        raise ValueError('required fields are missing values! ', *missing)
+        raise ValueError("required fields are missing values! ", *missing)
     return format, generated_visState
 
 
 # Check the generated format if it contains any key with None
 # as it's value which indicates incomplete information
 
+
 def config_validator(val, missing=[]):
     for key, value in val.items():
         if isinstance(value, dict):
@@ -446,14 +424,15 @@ def config_validator(val, missing=[]):
     return missing
 
 
-if __name__ == '__main__':
-    with open('viz_config.yaml', 'r') as f:
+if __name__ == "__main__":
+    with open("viz_config.yaml", "r") as f:
         viz_config = yaml.safe_load(f)
 
-    with open('dash_config.yaml', 'r') as f:
+    with open("dash_config.yaml", "r") as f:
         dash_config = yaml.safe_load(f)
 
-    generate(dash_config['dashboard']['viz'][2],
-             viz_config['opendaylight-test-performance'])
+    generate(
+        dash_config["dashboard"]["viz"][2], viz_config["opendaylight-test-performance"]
+    )
     # generate(dash_config['dashboard']['viz'][3],viz_config['opendaylight-test-performance'])
     # generate(dash_config['dashboard']['viz'][1],viz_config['opendaylight-test-feature'])
index 5120ad5447131afe377f7ce97d1a5765c73a71d7..e7e6167dae454575218c02e284bc0ac65fdefb08 100755 (executable)
@@ -43,22 +43,22 @@ def p(x):
 
 # ELK DB host and port to be passed as ':' separated argument
 if len(sys.argv) > 1:
-    if ':' in sys.argv[1]:
-        ELK_DB_HOST = sys.argv[1].split(':')[0]
-        ELK_DB_PORT = sys.argv[1].split(':')[1]
+    if ":" in sys.argv[1]:
+        ELK_DB_HOST = sys.argv[1].split(":")[0]
+        ELK_DB_PORT = sys.argv[1].split(":")[1]
 else:
-    print('Usage: python push_to_elk.py host:port')
-    print('Unable to publish data to ELK. Exiting.')
+    print("Usage: python push_to_elk.py host:port")
+    print("Unable to publish data to ELK. Exiting.")
     sys.exit()
 
 try:
     es = Elasticsearch(
-        hosts=[{'host': ELK_DB_HOST, 'port': int(ELK_DB_PORT)}],
-        scheme='https',
-        connection_class=RequestsHttpConnection
+        hosts=[{"host": ELK_DB_HOST, "port": int(ELK_DB_PORT)}],
+        scheme="https",
+        connection_class=RequestsHttpConnection,
     )
 except Exception as e:
-    print('Unexpected Error Occurred. Exiting')
+    print("Unexpected Error Occurred. Exiting")
     print(e)
 # print(es.info())
 
@@ -70,158 +70,153 @@ except Exception as e:
 
 def JSONToString(jobj):
     retval = str(jobj)
-    retval = retval.replace('\'', '"')
-    retval = retval.replace(': ', ':')
-    retval = retval.replace(', ', ',')
-    retval = retval.replace('True', 'true')
-    retval = retval.replace('False', 'false')
-    retval = retval.replace('None', 'null')
+    retval = retval.replace("'", '"')
+    retval = retval.replace(": ", ":")
+    retval = retval.replace(", ", ",")
+    retval = retval.replace("True", "true")
+    retval = retval.replace("False", "false")
+    retval = retval.replace("None", "null")
     return retval
 
 
 # Clear .kibana index before pushing visualizations
 try:
-    index = '.kibana'
+    index = ".kibana"
     res = es.indices.delete(index=index)
 except Exception as e:
     print(e)
     # raise e
-    print('Unable to push data to ElasticSearch')
+    print("Unable to push data to ElasticSearch")
 
 
 # Create and push index-pattern to be used by visualizations
 
-TEST_DATA_INDEX = 'opendaylight-test'
+TEST_DATA_INDEX = "opendaylight-test"
 
 INDEX_PATTERN_BODY = {
     "type": "index-pattern",
-    "index-pattern": {
-        "timeFieldName": "@timestamp",
-        "title": TEST_DATA_INDEX
-    }
+    "index-pattern": {"timeFieldName": "@timestamp", "title": TEST_DATA_INDEX},
 }
 
 
-KIBANA_CONFIG = {'config': {
-    'defaultIndex': 'pattern-for-{}'.format(TEST_DATA_INDEX),
-    'timepicker:timeDefaults': '{\n  "from": "now-5y",\n \
+KIBANA_CONFIG = {
+    "config": {
+        "defaultIndex": "pattern-for-{}".format(TEST_DATA_INDEX),
+        "timepicker:timeDefaults": '{\n  "from": "now-5y",\n \
                                 "to": "now",\n  "mode": "quick"\n}',
-    'xPackMonitoring:showBanner': False},
-    'type': 'config',
+        "xPackMonitoring:showBanner": False,
+    },
+    "type": "config",
 }
 
-res = es.index(index='.kibana', doc_type='doc',
-               id='config:6.2.4', body=KIBANA_CONFIG)
+res = es.index(index=".kibana", doc_type="doc", id="config:6.2.4", body=KIBANA_CONFIG)
 
 
 try:
-    index = '.kibana'
-    ES_ID = 'index-pattern:pattern-for-{}'.format(
-        TEST_DATA_INDEX)
-    res = es.index(index=index, doc_type='doc',
-                   id=ES_ID, body=INDEX_PATTERN_BODY)
+    index = ".kibana"
+    ES_ID = "index-pattern:pattern-for-{}".format(TEST_DATA_INDEX)
+    res = es.index(index=index, doc_type="doc", id=ES_ID, body=INDEX_PATTERN_BODY)
     p(json.dumps(INDEX_PATTERN_BODY, indent=4))
     print(json.dumps(res, indent=4))
 except Exception as e:
     print(e)
     # raise e
-    print('Unable to push data to ElasticSearch')
+    print("Unable to push data to ElasticSearch")
 
 try:
-    viz_config_path = glob.glob('**/dashboard/viz_config.yaml')[0]
+    viz_config_path = glob.glob("**/dashboard/viz_config.yaml")[0]
 except IndexError:
-    print('Visualization template file not found!')
+    print("Visualization template file not found!")
     sys.exit()
 
 try:
-    dash_config_path = glob.glob('**/dashboard/dash_config.yaml')[0]
+    dash_config_path = glob.glob("**/dashboard/dash_config.yaml")[0]
 except IndexError:
-    print('Dashboard configuration file not found!')
+    print("Dashboard configuration file not found!")
     sys.exit()
 
-with open(dash_config_path, 'r') as f:
+with open(dash_config_path, "r") as f:
     dash_config = yaml.safe_load(f)
 
-with open(viz_config_path, 'r') as f:
+with open(viz_config_path, "r") as f:
     viz_config = yaml.safe_load(f)
 
 
 # Create and push visualizations
 for dashboard_id, dashboard_content in dash_config.items():
 
-    for _, i in dash_config[dashboard_id]['viz'].items():
+    for _, i in dash_config[dashboard_id]["viz"].items():
         intermediate_format, visState = vis_gen.generate(
-            i, viz_config[i['viz-template']])
+            i, viz_config[i["viz-template"]]
+        )
 
         searchSourceJSON = searchSourceJSON_gen.generate(
-            i, viz_config[i['viz-template']],
-            intermediate_format['index_pattern'])
+            i, viz_config[i["viz-template"]], intermediate_format["index_pattern"]
+        )
 
-        uiStateJSON = uiStateJSON_gen.generate(
-            i, viz_config[i['viz-template']])
+        uiStateJSON = uiStateJSON_gen.generate(i, viz_config[i["viz-template"]])
 
         # p(intermediate_format)
         # p(visState)
 
         # Template for visualization template
         VIZ_BODY = {
-            'type': 'visualization',
-            'visualization': {
+            "type": "visualization",
+            "visualization": {
                 "title": None,
                 "visState": None,
                 "uiStateJSON": "{}",
                 "description": None,
                 "version": 1,
-                "kibanaSavedObjectMeta": {
-                    "searchSourceJSON": None
-                }
-            }
+                "kibanaSavedObjectMeta": {"searchSourceJSON": None},
+            },
         }
 
-        VIZ_BODY['visualization']['title'] = intermediate_format['title']
-        VIZ_BODY['visualization']['visState'] = JSONToString(visState)
-        VIZ_BODY['visualization']['uiStateJSON'] = JSONToString(uiStateJSON)
-        VIZ_BODY['visualization']['description'] = intermediate_format['desc']
-        VIZ_BODY['visualization']['kibanaSavedObjectMeta']['searchSourceJSON']\
-            = JSONToString(
-            searchSourceJSON)
+        VIZ_BODY["visualization"]["title"] = intermediate_format["title"]
+        VIZ_BODY["visualization"]["visState"] = JSONToString(visState)
+        VIZ_BODY["visualization"]["uiStateJSON"] = JSONToString(uiStateJSON)
+        VIZ_BODY["visualization"]["description"] = intermediate_format["desc"]
+        VIZ_BODY["visualization"]["kibanaSavedObjectMeta"][
+            "searchSourceJSON"
+        ] = JSONToString(searchSourceJSON)
 
         p(VIZ_BODY)
         # Pushing visualization to Kibana
-        index = '.kibana'
-        ES_ID = 'visualization:{}'.format(i['id'])
-        res = es.index(index=index, doc_type='doc', id=ES_ID, body=VIZ_BODY)
+        index = ".kibana"
+        ES_ID = "visualization:{}".format(i["id"])
+        res = es.index(index=index, doc_type="doc", id=ES_ID, body=VIZ_BODY)
         print(json.dumps(res, indent=4))
 
     # Create and push dashboards
 
     # Template for dashboard body in Kibana
     DASH_BODY = {
-        'type': 'dashboard',
-        'dashboard': {
-            'title': None,
-            'description': None,
-            'panelsJSON': None,
-            'optionsJSON': '{\"darkTheme\":false,\
-                            \"hidePanelTitles\":false,\"useMargins\":true}',
-            'version': 1,
-            'kibanaSavedObjectMeta': {
-                'searchSourceJSON': '{\"query\":{\"language\":\"lucene\", \
-                                     \"query\":\"\"}, \
-                                     \"filter\":[],\"highlightAll\" \
-                                      :true,\"version\":true}'
-            }
-        }
+        "type": "dashboard",
+        "dashboard": {
+            "title": None,
+            "description": None,
+            "panelsJSON": None,
+            "optionsJSON": '{"darkTheme":false,\
+                            "hidePanelTitles":false,"useMargins":true}',
+            "version": 1,
+            "kibanaSavedObjectMeta": {
+                "searchSourceJSON": '{"query":{"language":"lucene", \
+                                     "query":""}, \
+                                     "filter":[],"highlightAll" \
+                                      :true,"version":true}'
+            },
+        },
     }
 
-    DASH_BODY['dashboard']['title'] = dashboard_content['title']
-    DASH_BODY['dashboard']['description'] = dashboard_content['desc']
-    DASH_BODY['dashboard']['panelsJSON'] = JSONToString(
-        dash_gen.generate(dashboard_content['viz']))
+    DASH_BODY["dashboard"]["title"] = dashboard_content["title"]
+    DASH_BODY["dashboard"]["description"] = dashboard_content["desc"]
+    DASH_BODY["dashboard"]["panelsJSON"] = JSONToString(
+        dash_gen.generate(dashboard_content["viz"])
+    )
 
     p(DASH_BODY)
     # Pushing dashboard to kibana
-    index = '.kibana'
-    ES_ID = 'dashboard:{}'.format(dashboard_content['id'])
-    res = es.index(index=index, doc_type='doc', id=ES_ID, body=DASH_BODY)
+    index = ".kibana"
+    ES_ID = "dashboard:{}".format(dashboard_content["id"])
+    res = es.index(index=index, doc_type="doc", id=ES_ID, body=DASH_BODY)
     print(json.dumps(res, indent=4))
index 976b2277e6f5388b923d02ff0dbe6f96abf079bd..2b3923303dc4c5e5f8daa7a05b20c44f4bc40816 100755 (executable)
@@ -61,16 +61,18 @@ import data_generate as data_gen
 
 def p(x):
     print(json.dumps(x, indent=6, sort_keys=True))
+
+
 # ELK DB host and port to be passed as ':' separated argument
 
 
 if len(sys.argv) > 1:
-    if ':' in sys.argv[1]:
-        ELK_DB_HOST = sys.argv[1].split(':')[0]
-        ELK_DB_PORT = sys.argv[1].split(':')[1]
+    if ":" in sys.argv[1]:
+        ELK_DB_HOST = sys.argv[1].split(":")[0]
+        ELK_DB_PORT = sys.argv[1].split(":")[1]
 else:
-    print('Usage: python push_to_elk.py host:port')
-    print('Unable to publish data to ELK. Exiting.')
+    print("Usage: python push_to_elk.py host:port")
+    print("Unable to publish data to ELK. Exiting.")
     sys.exit()
 
 # Construct json body
@@ -79,12 +81,12 @@ else:
 
 try:
     es = Elasticsearch(
-        hosts=[{'host': ELK_DB_HOST, 'port': int(ELK_DB_PORT)}],
-        scheme='https',
-        connection_class=RequestsHttpConnection
+        hosts=[{"host": ELK_DB_HOST, "port": int(ELK_DB_PORT)}],
+        scheme="https",
+        connection_class=RequestsHttpConnection,
     )
 except Exception as e:
-    print('Unexpected Error Occurred. Exiting')
+    print("Unexpected Error Occurred. Exiting")
     print(e)
 # print(es.info())
 
@@ -95,18 +97,16 @@ BODY = data_gen.generate()
 print(json.dumps(BODY, indent=4))
 
 # Skip ELK update if it comes from sandbox.
-if BODY['jenkins-silo'] == 'sandbox':
-    print('silo is sandbox, ELK update is skipped')
+if BODY["jenkins-silo"] == "sandbox":
+    print("silo is sandbox, ELK update is skipped")
     sys.exit()
 
 # Try to send request to ELK DB.
 try:
-    index = '{}-{}'.format(BODY['project'],
-                           BODY['subject'])
-    ES_ID = '{}:{}-{}'.format(BODY['test-type'], BODY['test-name'],
-                              BODY['test-run'])
-    res = es.index(index=index, doc_type='doc', id=ES_ID, body=BODY)
+    index = "{}-{}".format(BODY["project"], BODY["subject"])
+    ES_ID = "{}:{}-{}".format(BODY["test-type"], BODY["test-name"], BODY["test-run"])
+    res = es.index(index=index, doc_type="doc", id=ES_ID, body=BODY)
     print(json.dumps(res, indent=4))
 except Exception as e:
     print(e)
-    print('Unable to push data to ElasticSearch')
+    print("Unable to push data to ElasticSearch")
index 8503c8e5da571100efac8e9a6797be3aad728d65..24468e2e31552598c58e5e9fc08fdbf469945616 100755 (executable)
@@ -1,65 +1,67 @@
 # Config for switches, tunnelIP is the local IP address.
-switches = [{'name': 'sw1',
-             'type': 'gbp',
-             'dpid': '1'},
-            {'name': 'sw2',
-             'type': 'gbp',
-             'dpid': '2'},
-            {'name': 'sw3',
-             'type': 'gbp',
-             'dpid': '3'},
-            {'name': 'sw4',
-             'type': 'none',
-             'dpid': '4'},
-            {'name': 'sw5',
-             'type': 'none',
-             'dpid': '5'},
-            {'name': 'sw6',
-             'type': 'none',
-             'dpid': '6'},
-            {'name': 'sw7',
-             'type': 'none',
-             'dpid': '7'},
-            {'name': 'sw8',
-             'type': 'none',
-             'dpid': '8'}
-            ]
+switches = [
+    {"name": "sw1", "type": "gbp", "dpid": "1"},
+    {"name": "sw2", "type": "gbp", "dpid": "2"},
+    {"name": "sw3", "type": "gbp", "dpid": "3"},
+    {"name": "sw4", "type": "none", "dpid": "4"},
+    {"name": "sw5", "type": "none", "dpid": "5"},
+    {"name": "sw6", "type": "none", "dpid": "6"},
+    {"name": "sw7", "type": "none", "dpid": "7"},
+    {"name": "sw8", "type": "none", "dpid": "8"},
+]
 
-defaultContainerImage = 'alagalah/odlpoc_ovs230'
+defaultContainerImage = "alagalah/odlpoc_ovs230"
 # defaultContainerImage='ubuntu:14.04'
 
 # Note that tenant name and endpointGroup name come from policy_config.py
 
-hosts = [{'name': 'h35_2',
-          'mac': '00:00:00:00:35:02',
-          'ip': '10.0.35.2/24',
-          'switch': 'sw1'},
-         {'name': 'h35_3',
-          'ip': '10.0.35.3/24',
-          'mac': '00:00:00:00:35:03',
-          'switch': 'sw2'},
-         {'name': 'h35_4',
-          'ip': '10.0.35.4/24',
-          'mac': '00:00:00:00:35:04',
-          'switch': 'sw3'},
-         {'name': 'h35_5',
-          'ip': '10.0.35.5/24',
-          'mac': '00:00:00:00:35:05',
-          'switch': 'sw1'},
-         {'name': 'h36_2',
-          'ip': '10.0.36.2/24',
-          'mac': '00:00:00:00:36:02',
-          'switch': 'sw2'},
-         {'name': 'h36_3',
-          'ip': '10.0.36.3/24',
-          'mac': '00:00:00:00:36:03',
-          'switch': 'sw3'},
-         {'name': 'h36_4',
-          'ip': '10.0.36.4/24',
-          'mac': '00:00:00:00:36:04',
-          'switch': 'sw1'},
-         {'name': 'h36_5',
-          'ip': '10.0.36.5/24',
-          'mac': '00:00:00:00:36:05',
-          'switch': 'sw2'}
-         ]
+hosts = [
+    {
+        "name": "h35_2",
+        "mac": "00:00:00:00:35:02",
+        "ip": "10.0.35.2/24",
+        "switch": "sw1",
+    },
+    {
+        "name": "h35_3",
+        "ip": "10.0.35.3/24",
+        "mac": "00:00:00:00:35:03",
+        "switch": "sw2",
+    },
+    {
+        "name": "h35_4",
+        "ip": "10.0.35.4/24",
+        "mac": "00:00:00:00:35:04",
+        "switch": "sw3",
+    },
+    {
+        "name": "h35_5",
+        "ip": "10.0.35.5/24",
+        "mac": "00:00:00:00:35:05",
+        "switch": "sw1",
+    },
+    {
+        "name": "h36_2",
+        "ip": "10.0.36.2/24",
+        "mac": "00:00:00:00:36:02",
+        "switch": "sw2",
+    },
+    {
+        "name": "h36_3",
+        "ip": "10.0.36.3/24",
+        "mac": "00:00:00:00:36:03",
+        "switch": "sw3",
+    },
+    {
+        "name": "h36_4",
+        "ip": "10.0.36.4/24",
+        "mac": "00:00:00:00:36:04",
+        "switch": "sw1",
+    },
+    {
+        "name": "h36_5",
+        "ip": "10.0.36.5/24",
+        "mac": "00:00:00:00:36:05",
+        "switch": "sw2",
+    },
+]
index a3a60183e90c0763498490b7f066d0fb0e614a2a..5d9f610006c4733054fde0f52faa732bf6385b3e 100755 (executable)
 # Config for switches, tunnelIP is the local IP address.
 switches = [
-    {'name': 'sw1',
-     'type': 'gbp',
-     'dpid': '1'},
-    {'name': 'sw2',
-     'type': 'gbp',
-     'dpid': '2'},
-    {'name': 'sw3',
-     'type': 'gbp',
-     'dpid': '3'}
+    {"name": "sw1", "type": "gbp", "dpid": "1"},
+    {"name": "sw2", "type": "gbp", "dpid": "2"},
+    {"name": "sw3", "type": "gbp", "dpid": "3"},
 ]
 
-defaultContainerImage = 'alagalah/odlpoc_ovs230'
+defaultContainerImage = "alagalah/odlpoc_ovs230"
 # defaultContainerImage='ubuntu:14.04'
 
 # Note that tenant name and endpointGroup name come from policy_config.py
 
-hosts = [{'name': 'h35_2',
-          'mac': '00:00:00:00:35:02',
-          'ip': '10.0.35.2/24',
-          'switch': 'sw1'},
-         {'name': 'h35_3',
-          'ip': '10.0.35.3/24',
-          'mac': '00:00:00:00:35:03',
-          'switch': 'sw2'},
-         {'name': 'h35_4',
-          'ip': '10.0.35.4/24',
-          'mac': '00:00:00:00:35:04',
-          'switch': 'sw3'},
-         {'name': 'h35_5',
-          'ip': '10.0.35.5/24',
-          'mac': '00:00:00:00:35:05',
-          'switch': 'sw1'},
-
-         {'name': 'h35_6',
-          'ip': '10.0.35.6/24',
-          'mac': '00:00:00:00:35:06',
-          'switch': 'sw2',
-          'tenant': 'GBPPOC2',
-          'endpointGroup': 'test'},
-         {'name': 'h35_7',
-          'ip': '10.0.35.7/24',
-          'mac': '00:00:00:00:35:07',
-          'switch': 'sw3',
-          'tenant': 'GBPPOC2',
-          'endpointGroup': 'test'},
-         {'name': 'h35_8',
-          'ip': '10.0.35.8/24',
-          'mac': '00:00:00:00:35:08',
-          'switch': 'sw1',
-          'tenant': 'GBPPOC2',
-          'endpointGroup': 'test'},
-         {'name': 'h35_9',
-          'ip': '10.0.35.9/24',
-          'mac': '00:00:00:00:35:09',
-          'switch': 'sw2',
-          'tenant': 'GBPPOC2',
-          'endpointGroup': 'test'},
-
-         {'name': 'h36_2',
-          'ip': '10.0.36.2/24',
-          'mac': '00:00:00:00:36:02',
-          'switch': 'sw3'},
-         {'name': 'h36_3',
-          'ip': '10.0.36.3/24',
-          'mac': '00:00:00:00:36:03',
-          'switch': 'sw1'},
-         {'name': 'h36_4',
-          'ip': '10.0.36.4/24',
-          'mac': '00:00:00:00:36:04',
-          'switch': 'sw2'},
-         {'name': 'h36_5',
-          'ip': '10.0.36.5/24',
-          'mac': '00:00:00:00:36:05',
-          'switch': 'sw3'},
-
-         {'name': 'h36_6',
-          'ip': '10.0.36.6/24',
-          'mac': '00:00:00:00:36:06',
-          'switch': 'sw1',
-          'tenant': 'GBPPOC2',
-          'endpointGroup': 'test'},
-         {'name': 'h36_7',
-          'ip': '10.0.36.7/24',
-          'mac': '00:00:00:00:36:07',
-          'switch': 'sw2',
-          'tenant': 'GBPPOC2',
-          'endpointGroup': 'test'},
-         {'name': 'h36_8',
-          'ip': '10.0.36.8/24',
-          'mac': '00:00:00:00:36:08',
-          'switch': 'sw3',
-          'tenant': 'GBPPOC2',
-          'endpointGroup': 'test'},
-         {'name': 'h36_9',
-          'ip': '10.0.36.9/24',
-          'mac': '00:00:00:00:36:09',
-          'switch': 'sw1',
-          'tenant': 'GBPPOC2',
-          'endpointGroup': 'test'}]
+hosts = [
+    {
+        "name": "h35_2",
+        "mac": "00:00:00:00:35:02",
+        "ip": "10.0.35.2/24",
+        "switch": "sw1",
+    },
+    {
+        "name": "h35_3",
+        "ip": "10.0.35.3/24",
+        "mac": "00:00:00:00:35:03",
+        "switch": "sw2",
+    },
+    {
+        "name": "h35_4",
+        "ip": "10.0.35.4/24",
+        "mac": "00:00:00:00:35:04",
+        "switch": "sw3",
+    },
+    {
+        "name": "h35_5",
+        "ip": "10.0.35.5/24",
+        "mac": "00:00:00:00:35:05",
+        "switch": "sw1",
+    },
+    {
+        "name": "h35_6",
+        "ip": "10.0.35.6/24",
+        "mac": "00:00:00:00:35:06",
+        "switch": "sw2",
+        "tenant": "GBPPOC2",
+        "endpointGroup": "test",
+    },
+    {
+        "name": "h35_7",
+        "ip": "10.0.35.7/24",
+        "mac": "00:00:00:00:35:07",
+        "switch": "sw3",
+        "tenant": "GBPPOC2",
+        "endpointGroup": "test",
+    },
+    {
+        "name": "h35_8",
+        "ip": "10.0.35.8/24",
+        "mac": "00:00:00:00:35:08",
+        "switch": "sw1",
+        "tenant": "GBPPOC2",
+        "endpointGroup": "test",
+    },
+    {
+        "name": "h35_9",
+        "ip": "10.0.35.9/24",
+        "mac": "00:00:00:00:35:09",
+        "switch": "sw2",
+        "tenant": "GBPPOC2",
+        "endpointGroup": "test",
+    },
+    {
+        "name": "h36_2",
+        "ip": "10.0.36.2/24",
+        "mac": "00:00:00:00:36:02",
+        "switch": "sw3",
+    },
+    {
+        "name": "h36_3",
+        "ip": "10.0.36.3/24",
+        "mac": "00:00:00:00:36:03",
+        "switch": "sw1",
+    },
+    {
+        "name": "h36_4",
+        "ip": "10.0.36.4/24",
+        "mac": "00:00:00:00:36:04",
+        "switch": "sw2",
+    },
+    {
+        "name": "h36_5",
+        "ip": "10.0.36.5/24",
+        "mac": "00:00:00:00:36:05",
+        "switch": "sw3",
+    },
+    {
+        "name": "h36_6",
+        "ip": "10.0.36.6/24",
+        "mac": "00:00:00:00:36:06",
+        "switch": "sw1",
+        "tenant": "GBPPOC2",
+        "endpointGroup": "test",
+    },
+    {
+        "name": "h36_7",
+        "ip": "10.0.36.7/24",
+        "mac": "00:00:00:00:36:07",
+        "switch": "sw2",
+        "tenant": "GBPPOC2",
+        "endpointGroup": "test",
+    },
+    {
+        "name": "h36_8",
+        "ip": "10.0.36.8/24",
+        "mac": "00:00:00:00:36:08",
+        "switch": "sw3",
+        "tenant": "GBPPOC2",
+        "endpointGroup": "test",
+    },
+    {
+        "name": "h36_9",
+        "ip": "10.0.36.9/24",
+        "mac": "00:00:00:00:36:09",
+        "switch": "sw1",
+        "tenant": "GBPPOC2",
+        "endpointGroup": "test",
+    },
+]
index 46a7deff2652e25fa76d3e8d293ee4153034ec87..f951b1d9c9f6dc2de3ddfd961507c2bf0af19a60 100644 (file)
@@ -1,64 +1,66 @@
 # Config for switches, tunnelIP is the local IP address.
-switches = [{'name': 'sw1',
-             'type': 'gbp',
-             'dpid': '1'},
-            {'name': 'sw2',
-             'type': 'sff',
-             'dpid': '2'},
-            {'name': 'sw3',
-             'type': 'sf',
-             'dpid': '3'},
-            {'name': 'sw4',
-             'type': 'sff',
-             'dpid': '4'},
-            {'name': 'sw5',
-             'type': 'sf',
-             'dpid': '5'},
-            {'name': 'sw6',
-             'type': 'gbp',
-             'dpid': '6'},
-            {'name': 'sw7',
-             'type': 'none',
-             'dpid': '7'},
-            {'name': 'sw8',
-             'type': 'none',
-             'dpid': '8'}
-            ]
+switches = [
+    {"name": "sw1", "type": "gbp", "dpid": "1"},
+    {"name": "sw2", "type": "sff", "dpid": "2"},
+    {"name": "sw3", "type": "sf", "dpid": "3"},
+    {"name": "sw4", "type": "sff", "dpid": "4"},
+    {"name": "sw5", "type": "sf", "dpid": "5"},
+    {"name": "sw6", "type": "gbp", "dpid": "6"},
+    {"name": "sw7", "type": "none", "dpid": "7"},
+    {"name": "sw8", "type": "none", "dpid": "8"},
+]
 
-defaultContainerImage = 'alagalah/odlpoc_ovs230'
+defaultContainerImage = "alagalah/odlpoc_ovs230"
 
 # Note that tenant name and endpointGroup name come from policy_config.py
 
-hosts = [{'name': 'h35_2',
-          'mac': '00:00:00:00:35:02',
-          'ip': '10.0.35.2/24',
-          'switch': 'sw1'},
-         {'name': 'h35_3',
-          'ip': '10.0.35.3/24',
-          'mac': '00:00:00:00:35:03',
-          'switch': 'sw1'},
-         {'name': 'h35_4',
-          'ip': '10.0.35.4/24',
-          'mac': '00:00:00:00:35:04',
-          'switch': 'sw6'},
-         {'name': 'h35_5',
-          'ip': '10.0.35.5/24',
-          'mac': '00:00:00:00:35:05',
-          'switch': 'sw6'},
-         {'name': 'h36_2',
-          'ip': '10.0.36.2/24',
-          'mac': '00:00:00:00:36:02',
-          'switch': 'sw1'},
-         {'name': 'h36_3',
-          'ip': '10.0.36.3/24',
-          'mac': '00:00:00:00:36:03',
-          'switch': 'sw1'},
-         {'name': 'h36_4',
-          'ip': '10.0.36.4/24',
-          'mac': '00:00:00:00:36:04',
-          'switch': 'sw6'},
-         {'name': 'h36_5',
-          'ip': '10.0.36.5/24',
-          'mac': '00:00:00:00:36:05',
-          'switch': 'sw6'}
-         ]
+hosts = [
+    {
+        "name": "h35_2",
+        "mac": "00:00:00:00:35:02",
+        "ip": "10.0.35.2/24",
+        "switch": "sw1",
+    },
+    {
+        "name": "h35_3",
+        "ip": "10.0.35.3/24",
+        "mac": "00:00:00:00:35:03",
+        "switch": "sw1",
+    },
+    {
+        "name": "h35_4",
+        "ip": "10.0.35.4/24",
+        "mac": "00:00:00:00:35:04",
+        "switch": "sw6",
+    },
+    {
+        "name": "h35_5",
+        "ip": "10.0.35.5/24",
+        "mac": "00:00:00:00:35:05",
+        "switch": "sw6",
+    },
+    {
+        "name": "h36_2",
+        "ip": "10.0.36.2/24",
+        "mac": "00:00:00:00:36:02",
+        "switch": "sw1",
+    },
+    {
+        "name": "h36_3",
+        "ip": "10.0.36.3/24",
+        "mac": "00:00:00:00:36:03",
+        "switch": "sw1",
+    },
+    {
+        "name": "h36_4",
+        "ip": "10.0.36.4/24",
+        "mac": "00:00:00:00:36:04",
+        "switch": "sw6",
+    },
+    {
+        "name": "h36_5",
+        "ip": "10.0.36.5/24",
+        "mac": "00:00:00:00:36:05",
+        "switch": "sw6",
+    },
+]
index 46a7deff2652e25fa76d3e8d293ee4153034ec87..f951b1d9c9f6dc2de3ddfd961507c2bf0af19a60 100644 (file)
@@ -1,64 +1,66 @@
 # Config for switches, tunnelIP is the local IP address.
-switches = [{'name': 'sw1',
-             'type': 'gbp',
-             'dpid': '1'},
-            {'name': 'sw2',
-             'type': 'sff',
-             'dpid': '2'},
-            {'name': 'sw3',
-             'type': 'sf',
-             'dpid': '3'},
-            {'name': 'sw4',
-             'type': 'sff',
-             'dpid': '4'},
-            {'name': 'sw5',
-             'type': 'sf',
-             'dpid': '5'},
-            {'name': 'sw6',
-             'type': 'gbp',
-             'dpid': '6'},
-            {'name': 'sw7',
-             'type': 'none',
-             'dpid': '7'},
-            {'name': 'sw8',
-             'type': 'none',
-             'dpid': '8'}
-            ]
+switches = [
+    {"name": "sw1", "type": "gbp", "dpid": "1"},
+    {"name": "sw2", "type": "sff", "dpid": "2"},
+    {"name": "sw3", "type": "sf", "dpid": "3"},
+    {"name": "sw4", "type": "sff", "dpid": "4"},
+    {"name": "sw5", "type": "sf", "dpid": "5"},
+    {"name": "sw6", "type": "gbp", "dpid": "6"},
+    {"name": "sw7", "type": "none", "dpid": "7"},
+    {"name": "sw8", "type": "none", "dpid": "8"},
+]
 
-defaultContainerImage = 'alagalah/odlpoc_ovs230'
+defaultContainerImage = "alagalah/odlpoc_ovs230"
 
 # Note that tenant name and endpointGroup name come from policy_config.py
 
-hosts = [{'name': 'h35_2',
-          'mac': '00:00:00:00:35:02',
-          'ip': '10.0.35.2/24',
-          'switch': 'sw1'},
-         {'name': 'h35_3',
-          'ip': '10.0.35.3/24',
-          'mac': '00:00:00:00:35:03',
-          'switch': 'sw1'},
-         {'name': 'h35_4',
-          'ip': '10.0.35.4/24',
-          'mac': '00:00:00:00:35:04',
-          'switch': 'sw6'},
-         {'name': 'h35_5',
-          'ip': '10.0.35.5/24',
-          'mac': '00:00:00:00:35:05',
-          'switch': 'sw6'},
-         {'name': 'h36_2',
-          'ip': '10.0.36.2/24',
-          'mac': '00:00:00:00:36:02',
-          'switch': 'sw1'},
-         {'name': 'h36_3',
-          'ip': '10.0.36.3/24',
-          'mac': '00:00:00:00:36:03',
-          'switch': 'sw1'},
-         {'name': 'h36_4',
-          'ip': '10.0.36.4/24',
-          'mac': '00:00:00:00:36:04',
-          'switch': 'sw6'},
-         {'name': 'h36_5',
-          'ip': '10.0.36.5/24',
-          'mac': '00:00:00:00:36:05',
-          'switch': 'sw6'}
-         ]
+hosts = [
+    {
+        "name": "h35_2",
+        "mac": "00:00:00:00:35:02",
+        "ip": "10.0.35.2/24",
+        "switch": "sw1",
+    },
+    {
+        "name": "h35_3",
+        "ip": "10.0.35.3/24",
+        "mac": "00:00:00:00:35:03",
+        "switch": "sw1",
+    },
+    {
+        "name": "h35_4",
+        "ip": "10.0.35.4/24",
+        "mac": "00:00:00:00:35:04",
+        "switch": "sw6",
+    },
+    {
+        "name": "h35_5",
+        "ip": "10.0.35.5/24",
+        "mac": "00:00:00:00:35:05",
+        "switch": "sw6",
+    },
+    {
+        "name": "h36_2",
+        "ip": "10.0.36.2/24",
+        "mac": "00:00:00:00:36:02",
+        "switch": "sw1",
+    },
+    {
+        "name": "h36_3",
+        "ip": "10.0.36.3/24",
+        "mac": "00:00:00:00:36:03",
+        "switch": "sw1",
+    },
+    {
+        "name": "h36_4",
+        "ip": "10.0.36.4/24",
+        "mac": "00:00:00:00:36:04",
+        "switch": "sw6",
+    },
+    {
+        "name": "h36_5",
+        "ip": "10.0.36.5/24",
+        "mac": "00:00:00:00:36:05",
+        "switch": "sw6",
+    },
+]
index bd7c6aeb01f401ef75970d6fa6aa6dd4da2f9276..4899d293c4f7492bc20b4f0a2e382fcd4d4ee1bd 100644 (file)
@@ -45,7 +45,7 @@ def add_controller(sw, ip):
         print("Error: %s is not a valid IPv4 address of controller!" % (ip))
         os.exit(2)
 
-    call(['sudo', 'ovs-vsctl', 'set-controller', sw, 'tcp:%s:6653' % ip])
+    call(["sudo", "ovs-vsctl", "set-controller", sw, "tcp:%s:6653" % ip])
 
 
 def add_manager(ip):
@@ -64,7 +64,7 @@ def add_manager(ip):
         print("Error: %s is not a valid IPv4 address of manager!" % (ip))
         os.exit(2)
 
-    cmd = ['sudo', 'ovs-vsctl', 'set-manager', 'tcp:%s:6640' % ip]
+    cmd = ["sudo", "ovs-vsctl", "set-manager", "tcp:%s:6640" % ip]
     call(cmd)
 
 
@@ -78,20 +78,28 @@ def add_switch(name, dpid=None):
         :param dpid: DataPath ID of new switch
     """
 
-    call(['sudo', 'ovs-vsctl', 'add-br', name])  # Add bridge
+    call(["sudo", "ovs-vsctl", "add-br", name])  # Add bridge
     if dpid:
         if len(dpid) < 16:  # DPID must be 16-bytes in later versions of OVS
-            filler = '0000000000000000'
+            filler = "0000000000000000"
             # prepending zeros to match 16-byt length, e.g. 123 -> 0000000000000123
-            dpid = filler[:len(filler) - len(dpid)] + dpid
+            dpid = filler[: len(filler) - len(dpid)] + dpid
         elif len(dpid) > 16:
-            print('DPID: %s is too long' % dpid)
+            print("DPID: %s is too long" % dpid)
             sys.exit(3)
-        call(['sudo', 'ovs-vsctl', 'set', 'bridge', name,
-              'other-config:datapath-id=%s' % dpid])
-
-
-def set_of_version(sw, version='OpenFlow13,OpenFlow12,OpenFlow10'):
+        call(
+            [
+                "sudo",
+                "ovs-vsctl",
+                "set",
+                "bridge",
+                name,
+                "other-config:datapath-id=%s" % dpid,
+            ]
+        )
+
+
+def set_of_version(sw, version="OpenFlow13,OpenFlow12,OpenFlow10"):
     """Sets OpenFlow protocol versions on OVS switch
 
     Args:
@@ -100,7 +108,7 @@ def set_of_version(sw, version='OpenFlow13,OpenFlow12,OpenFlow10'):
         :param sw: OpenFlow versions to support on switch
     """
 
-    call(['sudo', 'ovs-vsctl', 'set', 'bridge', sw, 'protocols={}'.format(version)])
+    call(["sudo", "ovs-vsctl", "set", "bridge", sw, "protocols={}".format(version)])
 
 
 def add_vxlan_tunnel(sw):
@@ -112,12 +120,21 @@ def add_vxlan_tunnel(sw):
     NOTE:
         :Remote IP is read from flows.
     """
-    ifaceName = '{}-vxlan-0'.format(sw)
-    cmd = ['sudo', 'ovs-vsctl', 'add-port', sw, ifaceName,
-           '--', 'set', 'Interface', ifaceName,
-           'type=vxlan',
-           'options:remote_ip=flow',
-           'options:key=flow']
+    ifaceName = "{}-vxlan-0".format(sw)
+    cmd = [
+        "sudo",
+        "ovs-vsctl",
+        "add-port",
+        sw,
+        ifaceName,
+        "--",
+        "set",
+        "Interface",
+        ifaceName,
+        "type=vxlan",
+        "options:remote_ip=flow",
+        "options:key=flow",
+    ]
     call(cmd)
 
 
@@ -133,19 +150,28 @@ def add_gpe_tunnel(sw):
         :Remote IP is read from flows.
     """
 
-    ifaceName = '{}-vxlangpe-0'.format(sw)
-    cmd = ['sudo', 'ovs-vsctl', 'add-port', sw, ifaceName,
-           '--', 'set', 'Interface', ifaceName,
-           'type=vxlan',
-           'options:remote_ip=flow',
-           'options:dst_port=6633',
-           'options:nshc1=flow',
-           'options:nshc2=flow',
-           'options:nshc3=flow',
-           'options:nshc4=flow',
-           'options:nsp=flow',
-           'options:nsi=flow',
-           'options:key=flow']
+    ifaceName = "{}-vxlangpe-0".format(sw)
+    cmd = [
+        "sudo",
+        "ovs-vsctl",
+        "add-port",
+        sw,
+        ifaceName,
+        "--",
+        "set",
+        "Interface",
+        ifaceName,
+        "type=vxlan",
+        "options:remote_ip=flow",
+        "options:dst_port=6633",
+        "options:nshc1=flow",
+        "options:nshc2=flow",
+        "options:nshc3=flow",
+        "options:nshc4=flow",
+        "options:nsp=flow",
+        "options:nsi=flow",
+        "options:key=flow",
+    ]
     call(cmd)
 
 
@@ -166,18 +192,22 @@ def launch_container(host, containerImage):
 
     """
 
-    containerID = check_output(['docker',
-                                'run',
-                                '-d',
-                                '--net=none',
-                                '--name=%s' % host['name'],
-                                '-h',
-                                host['name'],
-                                '-t',
-                                '-i',
-                                '--privileged=True',
-                                containerImage,
-                                '/bin/bash'])
+    containerID = check_output(
+        [
+            "docker",
+            "run",
+            "-d",
+            "--net=none",
+            "--name=%s" % host["name"],
+            "-h",
+            host["name"],
+            "-t",
+            "-i",
+            "--privileged=True",
+            containerImage,
+            "/bin/bash",
+        ]
+    )
     return containerID[:-1]  # Remove extraneous \n from output of above
 
 
@@ -199,26 +229,19 @@ def connect_container_to_switch(sw, host, containerID):
         :param containerID: ID of docker container
     """
 
-    hostIP = host['ip']
-    mac = host['mac']
+    hostIP = host["ip"]
+    mac = host["mac"]
     nw = ipaddr.IPv4Network(hostIP)
     broadcast = "{}".format(nw.broadcast)
     router = "{}".format(nw.network + 1)
-    ovswork_path = os.path.dirname(os.path.realpath(__file__)) + '/ovswork.sh'
-    cmd = [ovswork_path,
-           sw,
-           containerID,
-           hostIP,
-           broadcast,
-           router,
-           mac,
-           host['name']]
-    if ('vlan') in host:
-        cmd.append(host['vlan'])
+    ovswork_path = os.path.dirname(os.path.realpath(__file__)) + "/ovswork.sh"
+    cmd = [ovswork_path, sw, containerID, hostIP, broadcast, router, mac, host["name"]]
+    if ("vlan") in host:
+        cmd.append(host["vlan"])
     call(cmd)
 
 
-def launch(switches, hosts, odl_ip='127.0.0.1'):
+def launch(switches, hosts, odl_ip="127.0.0.1"):
     """Connects hosts to switches. Arguments are
        tied to underlying configuration file. Processing runs
        for switch, that is present on local environment and
@@ -249,24 +272,26 @@ def launch(switches, hosts, odl_ip='127.0.0.1'):
         ports = 0
         first_host = True
         for host in hosts:
-            if host['switch'] == sw['name']:
+            if host["switch"] == sw["name"]:
                 if first_host:
-                    add_switch(sw['name'], sw['dpid'])
-                    set_of_version(sw['name'])
-                    add_controller(sw['name'], odl_ip)
-                    add_gpe_tunnel(sw['name'])
-                    add_vxlan_tunnel(sw['name'])
+                    add_switch(sw["name"], sw["dpid"])
+                    set_of_version(sw["name"])
+                    add_controller(sw["name"], odl_ip)
+                    add_gpe_tunnel(sw["name"])
+                    add_vxlan_tunnel(sw["name"])
                 first_host = False
                 containerImage = defaultContainerImage  # from Config
-                if ('container_image') in host:  # from Config
-                    containerImage = host['container_image']
+                if ("container_image") in host:  # from Config
+                    containerImage = host["container_image"]
                 containerID = launch_container(host, containerImage)
                 ports += 1
-                connect_container_to_switch(
-                    sw['name'], host, containerID)
-                host['port-name'] = 'vethl-' + host['name']
-                print("Created container: %s with IP: %s. Connect using docker attach %s,"
-                      "disconnect with 'ctrl-p-q'." % (host['name'], host['ip'], host['name']))
+                connect_container_to_switch(sw["name"], host, containerID)
+                host["port-name"] = "vethl-" + host["name"]
+                print(
+                    "Created container: %s with IP: %s. Connect using docker attach %s,"
+                    "disconnect with 'ctrl-p-q'."
+                    % (host["name"], host["ip"], host["name"])
+                )
 
 
 if __name__ == "__main__":
@@ -290,9 +315,9 @@ if __name__ == "__main__":
         print("Error: %s is not a valid switch index!" % (sw_index))
         sys.exit(2)
 
-    sw_type = switches[sw_index]['type']
-    sw_name = switches[sw_index]['name']
-    if sw_type == 'gbp':
+    sw_type = switches[sw_index]["type"]
+    sw_name = switches[sw_index]["name"]
+    if sw_type == "gbp":
         print("*****************************")
         print("Configuring %s as a GBP node." % (sw_name))
         print("*****************************")
@@ -302,20 +327,25 @@ if __name__ == "__main__":
         print("OVS status:")
         print("-----------")
         print
-        call(['sudo', 'ovs-vsctl', 'show'])
+        call(["sudo", "ovs-vsctl", "show"])
         print
         print("Docker containers:")
         print("------------------")
-        call(['docker', 'ps'])
+        call(["docker", "ps"])
         print("*****************************")
-    elif sw_type == 'sff':
+    elif sw_type == "sff":
         print("*****************************")
         print("Configuring %s as an SFF." % (sw_name))
         print("*****************************")
-        call(['sudo', 'ovs-vsctl', 'set-manager', 'tcp:%s:6640' % controller])
+        call(["sudo", "ovs-vsctl", "set-manager", "tcp:%s:6640" % controller])
         print
-    elif sw_type == 'sf':
+    elif sw_type == "sf":
         print("*****************************")
         print("Configuring %s as an SF." % (sw_name))
         print("*****************************")
-        call(['%s/sf-config.sh' % os.path.dirname(os.path.realpath(__file__)), '%s' % sw_name])
+        call(
+            [
+                "%s/sf-config.sh" % os.path.dirname(os.path.realpath(__file__)),
+                "%s" % sw_name,
+            ]
+        )
index 451b638abe79920974b51ee29ff16aea922a31db..3a56420d01299ee28d84f89bdb3353690b0fdf9b 100755 (executable)
@@ -18,14 +18,14 @@ class RingTopo(Topo):
     def __init__(self, switches=3, hosts_per_switch=1, **opts):
         Topo.__init__(self, **opts)
         host_suffix = 1
-        switch = self.addSwitch('s%s' % 1)
+        switch = self.addSwitch("s%s" % 1)
         first_switch = switch
         for i in range(1, switches):
             # add hosts to switch
             add_hosts_to_switch(self, switch, hosts_per_switch, host_suffix)
             host_suffix += hosts_per_switch
 
-            new_switch = self.addSwitch('s%s' % (i + 1))
+            new_switch = self.addSwitch("s%s" % (i + 1))
             self.addLink(new_switch, switch)
             switch = new_switch
 
@@ -39,7 +39,7 @@ class MeshTopo(Topo):
         created_switches = []
         host_suffix = 1
         for i in range(switches):
-            new_switch = self.addSwitch('s%s' % (i + 1))
+            new_switch = self.addSwitch("s%s" % (i + 1))
 
             # add hosts to new switch
             add_hosts_to_switch(self, new_switch, hosts_per_switch, host_suffix)
@@ -51,5 +51,4 @@ class MeshTopo(Topo):
             created_switches.append(new_switch)
 
 
-topos = {'ring': RingTopo,
-         'mesh': MeshTopo}
+topos = {"ring": RingTopo, "mesh": MeshTopo}
index 1234c784585682778e87118701efb9dcd1ac1729..5944b0ed0aefbd62a71fd8baaf4bc86dca721d56 100644 (file)
@@ -38,12 +38,12 @@ from mininet.link import Link
 
 class LacpTopo(Topo):
     net = Mininet(controller=RemoteController)
-    c0 = net.addController('c0', controller=RemoteController, ip='CONTROLLER')
-    s1 = net.addSwitch('s1')
-    h1 = net.addHost('h1', mac='00:00:00:00:00:11')
-    h2 = net.addHost('h2', mac='00:00:00:00:00:22')
-    h3 = net.addHost('h3', mac='00:00:00:00:00:33', ip='10.1.1.3')
-    h4 = net.addHost('h4', mac='00:00:00:00:00:44', ip='10.1.1.4')
+    c0 = net.addController("c0", controller=RemoteController, ip="CONTROLLER")
+    s1 = net.addSwitch("s1")
+    h1 = net.addHost("h1", mac="00:00:00:00:00:11")
+    h2 = net.addHost("h2", mac="00:00:00:00:00:22")
+    h3 = net.addHost("h3", mac="00:00:00:00:00:33", ip="10.1.1.3")
+    h4 = net.addHost("h4", mac="00:00:00:00:00:44", ip="10.1.1.4")
 
     Link(s1, h1)
     Link(s1, h1)
@@ -53,11 +53,11 @@ class LacpTopo(Topo):
     Link(s1, h4)
     net.build()
     s1.start([c0])
-    s1.cmd('sudo ovs-vsctl set bridge s1 protocols=OpenFlow13')
-    print(h1.cmd('./h1-bond0.sh'))
-    print(h2.cmd('./h2-bond0.sh'))
+    s1.cmd("sudo ovs-vsctl set bridge s1 protocols=OpenFlow13")
+    print(h1.cmd("./h1-bond0.sh"))
+    print(h2.cmd("./h2-bond0.sh"))
     CLI(net)
     net.stop()
 
 
-topos = {'lacp': (lambda: LacpTopo())}
+topos = {"lacp": (lambda: LacpTopo())}
index 6ec2bf74eff3aaa2714edc0a9d4eec8e23020103..a941a2b0c5e9407cc0d57a5e77f41909498f94d5 100755 (executable)
@@ -20,10 +20,14 @@ class Switch1(Topo):
 
     def __init__(self):
         Topo.__init__(self)
-        switch = self.addSwitch('s1')
+        switch = self.addSwitch("s1")
         n = 2
         for h in range(n):
-            host = self.addHost('h%s' % (h + 1), mac="00:00:00:00:00:0" + str(h + 1), ip="10.0.0." + str(h + 1))
+            host = self.addHost(
+                "h%s" % (h + 1),
+                mac="00:00:00:00:00:0" + str(h + 1),
+                ip="10.0.0." + str(h + 1),
+            )
             self.addLink(host, switch)
 
 
@@ -32,12 +36,15 @@ class Switch2(Topo):
 
     def __init__(self):
         Topo.__init__(self)
-        switch = self.addSwitch('s2')
+        switch = self.addSwitch("s2")
         n = 2
         for h in range(n):
-            host = self.addHost('h%s' % (h + 3), mac="00:00:00:00:00:0" + str(h + 3), ip="10.0.0." + str(h + 3))
+            host = self.addHost(
+                "h%s" % (h + 3),
+                mac="00:00:00:00:00:0" + str(h + 3),
+                ip="10.0.0." + str(h + 3),
+            )
             self.addLink(host, switch)
 
 
-topos = {'Switch1': (lambda: Switch1()),
-         'Switch2': (lambda: Switch2())}
+topos = {"Switch1": (lambda: Switch1()), "Switch2": (lambda: Switch2())}
index a29f9a3b434ae134068abb75c79b96dd83a9bb52..c5ba23c22ea8250f635bd39dfda58b9ea611e0df 100644 (file)
@@ -7,144 +7,148 @@ Edited: Many times by many people
 """
 
 # VM Environment defaults
-DEFAULT_LINUX_PROMPT = '>'
-DEFAULT_LINUX_PROMPT_STRICT = ']>'
-DEFAULT_USER = 'jenkins'
-DEFAULT_TIMEOUT = '30s'
+DEFAULT_LINUX_PROMPT = ">"
+DEFAULT_LINUX_PROMPT_STRICT = "]>"
+DEFAULT_USER = "jenkins"
+DEFAULT_TIMEOUT = "30s"
 
 # ODL system variables
-ODL_SYSTEM_IP = '127.0.0.1'  # Override if ODL is not running locally to pybot
-ODL_SYSTEM_IP_LIST = ['ODL_SYSTEM_1_IP', 'ODL_SYSTEM_2_IP', 'ODL_SYSTEM_3_IP']
+ODL_SYSTEM_IP = "127.0.0.1"  # Override if ODL is not running locally to pybot
+ODL_SYSTEM_IP_LIST = ["ODL_SYSTEM_1_IP", "ODL_SYSTEM_2_IP", "ODL_SYSTEM_3_IP"]
 ODL_SYSTEM_USER = DEFAULT_USER
-ODL_SYSTEM_PASSWORD = ''  # empty means use public key authentication
+ODL_SYSTEM_PASSWORD = ""  # empty means use public key authentication
 ODL_SYSTEM_PROMPT = DEFAULT_LINUX_PROMPT
 
 # "Tools" system variables (mininet etc).
-TOOLS_SYSTEM_IP = '127.0.0.1'  # Override if tools are not run locally to pybot
+TOOLS_SYSTEM_IP = "127.0.0.1"  # Override if tools are not run locally to pybot
 TOOLS_SYSTEM_USER = DEFAULT_USER
-TOOLS_SYSTEM_PASSWORD = ''  # empty means use public key authentication
+TOOLS_SYSTEM_PASSWORD = ""  # empty means use public key authentication
 TOOLS_SYSTEM_PROMPT = DEFAULT_LINUX_PROMPT
 
 # KARAF Variables
-KARAF_SHELL_PORT = '8101'
-ESCAPE_CHARACTER = '\x1B'
-KARAF_PROMPT_LOGIN = 'opendaylight-user'
-KARAF_USER = 'karaf'
-KARAF_PASSWORD = 'karaf'
-KARAF_PROMPT = 'opendaylight-user.*root.*>'
+KARAF_SHELL_PORT = "8101"
+ESCAPE_CHARACTER = "\x1B"
+KARAF_PROMPT_LOGIN = "opendaylight-user"
+KARAF_USER = "karaf"
+KARAF_PASSWORD = "karaf"
+KARAF_PROMPT = "opendaylight-user.*root.*>"
 
 # Logging levels
-DEFAULT_ODL_LOG_LEVEL = 'INFO'
+DEFAULT_ODL_LOG_LEVEL = "INFO"
 DEFAULT_BGPCEP_LOG_LEVEL = DEFAULT_ODL_LOG_LEVEL
 DEFAULT_PROTOCOL_LOG_LEVEL = DEFAULT_BGPCEP_LOG_LEVEL
 BGPCEP_LOG_LEVEL = DEFAULT_BGPCEP_LOG_LEVEL
 PROTOCOL_LOG_LEVEL = BGPCEP_LOG_LEVEL
 
 # BGP variables
-ODL_BGP_PORT = '1790'
-BGP_TOOL_PORT = '17900'
+ODL_BGP_PORT = "1790"
+BGP_TOOL_PORT = "17900"
 
 # Restconf variables
-ODL_RESTCONF_USER = 'admin'
-ODL_RESTCONF_PASSWORD = 'admin'
+ODL_RESTCONF_USER = "admin"
+ODL_RESTCONF_PASSWORD = "admin"
 
 # Netconf variables
-ODL_NETCONF_CONFIG_PORT = '1830'
-ODL_NETCONF_MDSAL_PORT = '2830'
-ODL_NETCONF_USER = 'admin'
-ODL_NETCONF_PASSWORD = 'admin'
-ODL_NETCONF_PROMPT = ']]>]]>'
-ODL_NETCONF_NAMESPACE = 'urn:ietf:params:xml:ns:netconf:base:1.0'
+ODL_NETCONF_CONFIG_PORT = "1830"
+ODL_NETCONF_MDSAL_PORT = "2830"
+ODL_NETCONF_USER = "admin"
+ODL_NETCONF_PASSWORD = "admin"
+ODL_NETCONF_PROMPT = "]]>]]>"
+ODL_NETCONF_NAMESPACE = "urn:ietf:params:xml:ns:netconf:base:1.0"
 
 # OpenFlow variables
-ODL_OF_PORT = '6633'
-ODL_OF_PLUGIN = 'lithium'
+ODL_OF_PORT = "6633"
+ODL_OF_PLUGIN = "lithium"
 
 # VTN Coordinator Variables
-VTNC = '127.0.0.1'
-VTNCPORT = '8083'
-VTNC_PREFIX = 'http://' + VTNC + ':' + VTNCPORT
-VTNC_HEADERS = {'Content-Type': 'application/json',
-                'username': 'admin', 'password': 'adminpass'}
-
-VTNWEBAPI = '/vtn-webapi'
+VTNC = "127.0.0.1"
+VTNCPORT = "8083"
+VTNC_PREFIX = "http://" + VTNC + ":" + VTNCPORT
+VTNC_HEADERS = {
+    "Content-Type": "application/json",
+    "username": "admin",
+    "password": "adminpass",
+}
+
+VTNWEBAPI = "/vtn-webapi"
 # controllers URL
-CTRLS_CREATE = 'controllers.json'
-CTRLS = 'controllers'
-SW = 'switches'
+CTRLS_CREATE = "controllers.json"
+CTRLS = "controllers"
+SW = "switches"
 
 # vtn URL
-VTNS_CREATE = 'vtns.json'
-VTNS = 'vtns'
+VTNS_CREATE = "vtns.json"
+VTNS = "vtns"
 
 # vbridge URL
-VBRS_CREATE = 'vbridges.json'
-VBRS = 'vbridges'
+VBRS_CREATE = "vbridges.json"
+VBRS = "vbridges"
 
 # interfaces URL
-VBRIFS_CREATE = 'interfaces.json'
-VBRIFS = 'interfaces'
+VBRIFS_CREATE = "interfaces.json"
+VBRIFS = "interfaces"
 
 # portmap URL
-PORTMAP_CREATE = 'portmap.json'
+PORTMAP_CREATE = "portmap.json"
 
 # vlanmap URL
-VLANMAP_CREATE = 'vlanmaps.json'
+VLANMAP_CREATE = "vlanmaps.json"
 
 # ports URL
-PORTS = 'ports/detail.json'
+PORTS = "ports/detail.json"
 
 # flowlist URL
-FLOWLISTS_CREATE = 'flowlists.json'
+FLOWLISTS_CREATE = "flowlists.json"
 
 # flowlistentry_URL
-FLOWLISTENTRIES_CREATE = 'flowlistentries.json'
-FLOWLISTS = 'flowlists'
+FLOWLISTENTRIES_CREATE = "flowlistentries.json"
+FLOWLISTS = "flowlists"
 
 # flowfilter_URL
-FLOWFILTERS_CREATE = 'flowfilters.json'
-FLOWFILTERENTRIES_CREATE = 'flowfilterentries.json'
-FLOWFILTERS = 'flowfilters/in'
-FLOWFILTERS_UPDATE = 'flowfilterentries'
+FLOWFILTERS_CREATE = "flowfilters.json"
+FLOWFILTERENTRIES_CREATE = "flowfilterentries.json"
+FLOWFILTERS = "flowfilters/in"
+FLOWFILTERS_UPDATE = "flowfilterentries"
 
 
 # Common APIs
-CONFIG_NODES_API = '/restconf/config/opendaylight-inventory:nodes'
-OPERATIONAL_NODES_API = '/restconf/operational/opendaylight-inventory:nodes'
-OPERATIONAL_NODES_NETVIRT = '/restconf/operational/network-topology:network-topology/topology/netvirt:1'
-OPERATIONAL_TOPO_API = '/restconf/operational/network-topology:' \
-                       'network-topology'
-CONFIG_TOPO_API = '/restconf/config/network-topology:network-topology'
-CONTROLLER_CONFIG_MOUNT = ('/restconf/config/network-topology:'
-                           'network-topology/topology'
-                           '/topology-netconf/node/'
-                           'controller-config/yang-ext:mount')
-CONFIG_API = '/restconf/config'
-OPERATIONAL_API = '/restconf/operational'
-MODULES_API = '/restconf/modules'
-VTN_INVENTORY_NODE_API = '/restconf/operational/vtn-inventory:vtn-nodes'
+CONFIG_NODES_API = "/restconf/config/opendaylight-inventory:nodes"
+OPERATIONAL_NODES_API = "/restconf/operational/opendaylight-inventory:nodes"
+OPERATIONAL_NODES_NETVIRT = (
+    "/restconf/operational/network-topology:network-topology/topology/netvirt:1"
+)
+OPERATIONAL_TOPO_API = "/restconf/operational/network-topology:" "network-topology"
+CONFIG_TOPO_API = "/restconf/config/network-topology:network-topology"
+CONTROLLER_CONFIG_MOUNT = (
+    "/restconf/config/network-topology:"
+    "network-topology/topology"
+    "/topology-netconf/node/"
+    "controller-config/yang-ext:mount"
+)
+CONFIG_API = "/restconf/config"
+OPERATIONAL_API = "/restconf/operational"
+MODULES_API = "/restconf/modules"
+VTN_INVENTORY_NODE_API = "/restconf/operational/vtn-inventory:vtn-nodes"
 
 # NEMO Variables
-PREDEFINE_ROLE_URI = '/restconf/config/nemo-user:user-roles'
-PREDEFINE_NODE_URI = '/restconf/config/nemo-object:node-definitions'
-PREDEFINE_CONNECTION_URI = '/restconf/config/nemo-object:connection-definitions'
-REGISTER_TENANT_URI = '/restconf/operations/nemo-intent:register-user'
-STRUCTURE_INTENT_URI = '/restconf/operations/nemo-intent:structure-style-nemo-update'
-GET_INTENTS_URI = '/retconf/config/intent:intents'
+PREDEFINE_ROLE_URI = "/restconf/config/nemo-user:user-roles"
+PREDEFINE_NODE_URI = "/restconf/config/nemo-object:node-definitions"
+PREDEFINE_CONNECTION_URI = "/restconf/config/nemo-object:connection-definitions"
+REGISTER_TENANT_URI = "/restconf/operations/nemo-intent:register-user"
+STRUCTURE_INTENT_URI = "/restconf/operations/nemo-intent:structure-style-nemo-update"
+GET_INTENTS_URI = "/retconf/config/intent:intents"
 
 # TOKEN
-AUTH_TOKEN_API = '/oauth2/token'
-REVOKE_TOKEN_API = '/oauth2/revoke'
+AUTH_TOKEN_API = "/oauth2/token"
+REVOKE_TOKEN_API = "/oauth2/revoke"
 
 # Vlan Custom Topology Path and File
 CREATE_VLAN_TOPOLOGY_FILE = "vlan_vtn_test.py"
-CREATE_VLAN_TOPOLOGY_FILE_PATH = "MininetTopo/" +\
-                                 CREATE_VLAN_TOPOLOGY_FILE
+CREATE_VLAN_TOPOLOGY_FILE_PATH = "MininetTopo/" + CREATE_VLAN_TOPOLOGY_FILE
 
 # Mininet Custom Topology Path and File for Path Policy
 CREATE_PATHPOLICY_TOPOLOGY_FILE = "topo-3sw-2host_multipath.py"
-CREATE_PATHPOLICY_TOPOLOGY_FILE_PATH = "MininetTopo/" +\
-                                       CREATE_PATHPOLICY_TOPOLOGY_FILE
+CREATE_PATHPOLICY_TOPOLOGY_FILE_PATH = "MininetTopo/" + CREATE_PATHPOLICY_TOPOLOGY_FILE
 
 GBP_REGEP_API = "/restconf/operations/endpoint:register-endpoint"
 GBP_UNREGEP_API = "/restconf/operations/endpoint:unregister-endpoint"
@@ -160,44 +164,55 @@ LFM_RPC_API_LI = "/restconf/operations/lfm-mapping-database"
 LFM_SB_RPC_API = "/restconf/operations/odl-lisp-sb"
 
 # Neutron
-NEUTRON_NB_API = '/controller/nb/v2/neutron'
-NEUTRON_NETWORKS_API = NEUTRON_NB_API + '/' + 'networks'
-NEUTRON_SUBNETS_API = NEUTRON_NB_API + '/' + 'subnets'
-NEUTRON_PORTS_API = NEUTRON_NB_API + '/' + 'ports'
-NEUTRON_ROUTERS_API = NEUTRON_NB_API + '/' + 'routers'
-OSREST = '/v2.0/networks'
+NEUTRON_NB_API = "/controller/nb/v2/neutron"
+NEUTRON_NETWORKS_API = NEUTRON_NB_API + "/" + "networks"
+NEUTRON_SUBNETS_API = NEUTRON_NB_API + "/" + "subnets"
+NEUTRON_PORTS_API = NEUTRON_NB_API + "/" + "ports"
+NEUTRON_ROUTERS_API = NEUTRON_NB_API + "/" + "routers"
+OSREST = "/v2.0/networks"
 
 # Openstack System Prompt
-OS_SYSTEM_PROMPT = '$'
+OS_SYSTEM_PROMPT = "$"
 
 # Other global variables
 # TODO: Move these to more apropriate sections.
-PORT = '8080'
-RESTPORT = '8282'
-RESTCONFPORT = '8181'
-OVSDBPORT = '6640'
-CONTAINER = 'default'
-PREFIX = 'http://' + ODL_SYSTEM_IP + ':' + PORT  # TODO: determine where this is used; create a better named variable
-USER = 'admin'  # TODO: who is using this?  Can we make it more specific? (e.g.  RESTCONF_USER)
-PWD = 'admin'
-PASSWORD = 'EMPTY'
-AUTH = [u'admin', u'admin']
-SCOPE = 'sdn'
-HEADERS = {'Content-Type': 'application/json'}
-HEADERS_YANG_JSON = {'Content-Type': 'application/yang.data+json'}
-HEADERS_XML = {'Content-Type': 'application/xml'}
-ACCEPT_XML = {'Accept': 'application/xml'}
-ACCEPT_JSON = {'Accept': 'application/json'}
-ACCEPT_EMPTY = {}  # Json should be default, but no-output RPC cannot have Accept header.
+PORT = "8080"
+RESTPORT = "8282"
+RESTCONFPORT = "8181"
+OVSDBPORT = "6640"
+CONTAINER = "default"
+PREFIX = (  # TODO: determine where this is used; create a better named variable
+    "http://" + ODL_SYSTEM_IP + ":" + PORT
+)
+USER = (  # TODO: who is using this?  Can we make it more specific? (e.g.  RESTCONF_USER)
+    "admin"
+)
+PWD = "admin"
+PASSWORD = "EMPTY"
+AUTH = [u"admin", u"admin"]
+SCOPE = "sdn"
+HEADERS = {"Content-Type": "application/json"}
+HEADERS_YANG_JSON = {"Content-Type": "application/yang.data+json"}
+HEADERS_XML = {"Content-Type": "application/xml"}
+ACCEPT_XML = {"Accept": "application/xml"}
+ACCEPT_JSON = {"Accept": "application/json"}
+ACCEPT_EMPTY = (
+    {}
+)  # Json should be default, but no-output RPC cannot have Accept header.
 ODL_CONTROLLER_SESSION = None
 TOPO_TREE_LEVEL = 2
 TOPO_TREE_DEPTH = 3
 TOPO_TREE_FANOUT = 2
-KEYFILE_PASS = 'any'
-SSH_KEY = 'id_rsa'
-CONTROLLER_STOP_TIMEOUT = 120  # Max number of seconds test will wait for a controller to stop
-TOPOLOGY_URL = 'network-topology:network-topology/topology'
-SEND_ACCEPT_XML_HEADERS = {'Content-Type': 'application/xml', 'Accept': 'application/xml'}
+KEYFILE_PASS = "any"
+SSH_KEY = "id_rsa"
+CONTROLLER_STOP_TIMEOUT = (
+    120  # Max number of seconds test will wait for a controller to stop
+)
+TOPOLOGY_URL = "network-topology:network-topology/topology"
+SEND_ACCEPT_XML_HEADERS = {
+    "Content-Type": "application/xml",
+    "Accept": "application/xml",
+}
 
 # Test deadlines global control
 ENABLE_GLOBAL_TEST_DEADLINES = True
@@ -205,26 +220,32 @@ ENABLE_GLOBAL_TEST_DEADLINES = True
 # Deprecated old variables, to be removed once all tests that need them are
 # updated to use the new names.
 CONTROLLER = ODL_SYSTEM_IP
-CONTROLLERS = ['ODL_SYSTEM_1_IP', 'ODL_SYSTEM_2_IP', 'ODL_SYSTEM_3_IP']
+CONTROLLERS = ["ODL_SYSTEM_1_IP", "ODL_SYSTEM_2_IP", "ODL_SYSTEM_3_IP"]
 CONTROLLER_PASSWORD = ODL_SYSTEM_PASSWORD
 CONTROLLER_PROMPT = ODL_SYSTEM_PROMPT
 
 # Centinel Variables
-SET_CONFIGURATION_URI = '/restconf/operations/configuration:set-centinel-configurations'
-GET_CONFIGURATION_URI = '/restconf/operational/configuration:configurationRecord/'
-STREAMRECORD_CONFIG = '/restconf/config/stream:streamRecord'
-SET_STREAMRECORD = '/restconf/operations/stream:set-stream'
-ALERTFIELDCONTENTRULERECORD = '/restconf/config/alertrule:alertFieldContentRuleRecord/'
-SET_ALERTFIELDCONTENTRULERECORD = '/restconf/operations/alertrule:set-alert-field-content-rule'
-ALERTFIELDVALUERULERECORD = '/restconf/config/alertrule:alertFieldValueRuleRecord'
-SET_ALERTFIELDVALUERULERECORD = '/restconf/operations/alertrule:set-alert-field-value-rule'
-ALERTMESSAGECOUNTRULERECORD = '/restconf/config/alertrule:alertMessageCountRuleRecord/'
-SET_ALERTMESSAGECOUNTRULERECORD = '/restconf/operations/alertrule:set-alert-message-count-rule'
-GET_DASHBOARDRECORD = '/restconf/operational/dashboardrule:dashboardRecord/'
-SET_DASHBOARDRECORD = '/restconf/operations/dashboardrule:set-dashboard'
-DELETE_DASHBOARDRECORD = '/restconf/operations/dashboardrule:delete-dashboard'
-SET_SUBSCRIBEUSER = '/restconf/operations/subscribe:subscribe-user'
-SUBSCRIPTION = '/restconf/config/subscribe:subscription/'
+SET_CONFIGURATION_URI = "/restconf/operations/configuration:set-centinel-configurations"
+GET_CONFIGURATION_URI = "/restconf/operational/configuration:configurationRecord/"
+STREAMRECORD_CONFIG = "/restconf/config/stream:streamRecord"
+SET_STREAMRECORD = "/restconf/operations/stream:set-stream"
+ALERTFIELDCONTENTRULERECORD = "/restconf/config/alertrule:alertFieldContentRuleRecord/"
+SET_ALERTFIELDCONTENTRULERECORD = (
+    "/restconf/operations/alertrule:set-alert-field-content-rule"
+)
+ALERTFIELDVALUERULERECORD = "/restconf/config/alertrule:alertFieldValueRuleRecord"
+SET_ALERTFIELDVALUERULERECORD = (
+    "/restconf/operations/alertrule:set-alert-field-value-rule"
+)
+ALERTMESSAGECOUNTRULERECORD = "/restconf/config/alertrule:alertMessageCountRuleRecord/"
+SET_ALERTMESSAGECOUNTRULERECORD = (
+    "/restconf/operations/alertrule:set-alert-message-count-rule"
+)
+GET_DASHBOARDRECORD = "/restconf/operational/dashboardrule:dashboardRecord/"
+SET_DASHBOARDRECORD = "/restconf/operations/dashboardrule:set-dashboard"
+DELETE_DASHBOARDRECORD = "/restconf/operations/dashboardrule:delete-dashboard"
+SET_SUBSCRIBEUSER = "/restconf/operations/subscribe:subscribe-user"
+SUBSCRIPTION = "/restconf/config/subscribe:subscription/"
 
 # Elasticsearch Variables
 ELASTICPORT = 9200
index 0016feb54348a8a00883474688ce79e2d30e671c..ba8be19547616a237fb02365124d453b29f61755 100644 (file)
@@ -1,7 +1,9 @@
-RESOURCE_POOL_BASE = 'restconf/operational/alto-resourcepool:context'
-DEFAULT_CONTEXT_ID = '00000000-0000-0000-0000-000000000000'
+RESOURCE_POOL_BASE = "restconf/operational/alto-resourcepool:context"
+DEFAULT_CONTEXT_ID = "00000000-0000-0000-0000-000000000000"
 
 # ALTO IRD variables
-ALTO_SIMPLE_IRD_INFO = 'restconf/operational/alto-simple-ird:information'
-ALTO_CONFIG_IRD_INSTANCE_CONFIG = 'restconf/config/alto-simple-ird:ird-instance-configuration'
-ALTO_OPERATIONAL_IRD_INSTANCE = 'restconf/operational/alto-simple-ird:ird-instance'
+ALTO_SIMPLE_IRD_INFO = "restconf/operational/alto-simple-ird:information"
+ALTO_CONFIG_IRD_INSTANCE_CONFIG = (
+    "restconf/config/alto-simple-ird:ird-instance-configuration"
+)
+ALTO_OPERATIONAL_IRD_INSTANCE = "restconf/operational/alto-simple-ird:ird-instance"
index 8f04bec177dd7f0e556a23517ad05b67f36e60db..862402b992b0cf3b187e2d20784d8a8640c5bd7c 100644 (file)
@@ -1,36 +1,36 @@
 data_models = [
-    'config/ietf-interfaces:interfaces',
-    'config/interface-service-bindings:service-bindings',
-    'config/itm:transport-zones',
-    'config/itm-config:tunnel-monitor-enabled',
-    'config/itm-config:tunnel-monitor-interval',
-    'config/itm-config:tunnel-monitor-params',
-    'config/itm-state:dpn-endpoints',
-    'config/itm-state:dpn-teps-state',
-    'config/itm-state:external-tunnel-list',
-    'config/itm-state:tunnel-list',
-    'config/network-topology:network-topology/topology/ovsdb:1',
-    'config/odl-interface-meta:bridge-interface-info',
-    'config/odl-interface-meta:interface-child-info',
-    'config/odl-itm-meta:bridge-tunnel-info',
-    'config/opendaylight-inventory:nodes',
-    'operational/ietf-interfaces:interfaces-state',
-    'operational/interface-service-bindings:bound-services-state-list',
-    'operational/itm-config:tunnel-monitor-enabled',
-    'operational/itm-config:tunnel-monitor-interval',
-    'operational/itm-config:tunnel-monitor-params',
-    'operational/itm-state:tunnels_state',
-    'operational/network-topology:network-topology/topology/ovsdb:1',
-    'operational/odl-interface-meta:bridge-ref-info',
-    'operational/odl-interface-meta:dpn-to-interface-list',
-    'operational/odl-interface-meta:if-indexes-interface-map',
-    'operational/odl-itm-meta:dpn-to-interface-list',
-    'operational/odl-itm-meta:if-indexes-tunnel-map',
-    'operational/odl-itm-meta:ovs-bridge-ref-info',
-    'operational/opendaylight-inventory:nodes',
+    "config/ietf-interfaces:interfaces",
+    "config/interface-service-bindings:service-bindings",
+    "config/itm:transport-zones",
+    "config/itm-config:tunnel-monitor-enabled",
+    "config/itm-config:tunnel-monitor-interval",
+    "config/itm-config:tunnel-monitor-params",
+    "config/itm-state:dpn-endpoints",
+    "config/itm-state:dpn-teps-state",
+    "config/itm-state:external-tunnel-list",
+    "config/itm-state:tunnel-list",
+    "config/network-topology:network-topology/topology/ovsdb:1",
+    "config/odl-interface-meta:bridge-interface-info",
+    "config/odl-interface-meta:interface-child-info",
+    "config/odl-itm-meta:bridge-tunnel-info",
+    "config/opendaylight-inventory:nodes",
+    "operational/ietf-interfaces:interfaces-state",
+    "operational/interface-service-bindings:bound-services-state-list",
+    "operational/itm-config:tunnel-monitor-enabled",
+    "operational/itm-config:tunnel-monitor-interval",
+    "operational/itm-config:tunnel-monitor-params",
+    "operational/itm-state:tunnels_state",
+    "operational/network-topology:network-topology/topology/ovsdb:1",
+    "operational/odl-interface-meta:bridge-ref-info",
+    "operational/odl-interface-meta:dpn-to-interface-list",
+    "operational/odl-interface-meta:if-indexes-interface-map",
+    "operational/odl-itm-meta:dpn-to-interface-list",
+    "operational/odl-itm-meta:if-indexes-tunnel-map",
+    "operational/odl-itm-meta:ovs-bridge-ref-info",
+    "operational/opendaylight-inventory:nodes",
 ]
 
 idmanager_data_models = [
-    'config/id-manager:id-pools/',
-    'operational/lock-manager:locks/',
+    "config/id-manager:id-pools/",
+    "operational/lock-manager:locks/",
 ]
index dce14b5668c6a7a0d8feb483de22a1b7c2594bc9..f69f12715ea7eac9ad4f7456a2b3701e47665e4f 100755 (executable)
@@ -101,7 +101,7 @@ class Service(object):
         try:
             # assumes that params are supplied as a list for call by position
             response["result"] = self._methods[method](
-                params['store'], params['entity'], params['path']
+                params["store"], params["entity"], params["path"]
             )  # pylint: disable=star-args
         except KeyError:
             response["error"] = {"code": -32601, "message": "Method not found"}
index 8c8b728ca0f0e427408d3a7fcc5ce1cb4b40a76f..a24ed4ade598abef24ca023042f4a299929a7805 100644 (file)
@@ -1,78 +1,78 @@
 netvirt_data_models = [
-    'config/ebgp:bgp',
-    'config/elan:elan-instances',
-    'config/elan:elan-interfaces',
-    'config/id-manager:id-pools',
-    'config/ietf-access-control-list:access-lists',
-    'config/ietf-interfaces:interfaces',
-    'config/interface-service-bindings:service-bindings',
-    'config/itm-state:dpn-endpoints',
-    'config/itm-state:dpn-teps-state',
-    'config/itm-state:external-tunnel-list',
-    'config/itm-state:tunnel-list',
-    'config/itm:transport-zones',
-    'config/l3vpn:vpn-instances',
-    'config/l3vpn:vpn-interfaces',
-    'config/l3vpn-instances-interfaces:vpn-instances',
-    'config/l3vpn-instances-interfaces:vpn-interfaces',
-    'config/network-topology:network-topology/topology/ovsdb:1',
-    'config/neutron:neutron',
-    'config/neutronvpn:networkMaps',
-    'config/neutronvpn:neutron-vpn-portip-port-data',
-    'config/neutronvpn:router-interfaces-map',
-    'config/neutronvpn:subnetmaps',
-    'config/neutronvpn:vpnMaps',
-    'config/odl-fib:fibEntries',
-    'config/odl-interface-meta:interface-child-info',
-    'config/odl-l3vpn:router-interfaces',
-    'config/odl-l3vpn:vpn-id-to-vpn-instance',
-    'config/odl-l3vpn:vpn-instance-to-vpn-id',
-    'config/odl-nat:ext-routers',
-    'config/odl-nat:external-networks',
-    'config/odl-nat:external-subnets',
-    'config/odl-nat:floating-ip-info',
-    'config/odl-nat:intext-ip-port-map',
-    'config/odl-nat:napt-switches',
-    'config/odl-nat:router-id-name',
-    'config/odl-nat:snatint-ip-port-map',
-    'config/opendaylight-inventory:nodes',
-    'operational/elan:elan-dpn-interfaces',
-    'operational/elan:elan-forwarding-tables',
-    'operational/elan:elan-interfaces',
-    'operational/elan:elan-state',
-    'operational/ietf-interfaces:interfaces-state',
-    'operational/interface-service-bindings:bound-services-state-list',
-    'operational/itm-state:tunnels_state',
-    'operational/l3nexthop:l3nexthop',
-    'operational/l3vpn:vpn-interfaces',
-    'operational/network-topology:network-topology/topology/ovsdb:1',
-    'operational/neutron:neutron/neutron:ports',
-    'operational/odl-fib:label-route-map',
-    'operational/odl-interface-meta:if-indexes-interface-map',
-    'operational/odl-l3vpn:learnt-vpn-vip-to-port-data',
-    'operational/odl-l3vpn:neutron-router-dpns',
-    'operational/odl-l3vpn:port-op-data',
-    'operational/odl-l3vpn:prefix-to-interface',
-    'operational/odl-l3vpn:subnet-op-data',
-    'operational/odl-l3vpn:vpn-instance-op-data',
-    'operational/odl-l3vpn:vpn-interface-op-data',
-    'operational/odl-l3vpn:vpn-to-extraroute',
-    'operational/odl-nat:external-ips-counter',
-    'operational/odl-nat:floating-ip-info',
-    'operational/odl-nat:intext-ip-map',
-    'operational/opendaylight-inventory:nodes'
+    "config/ebgp:bgp",
+    "config/elan:elan-instances",
+    "config/elan:elan-interfaces",
+    "config/id-manager:id-pools",
+    "config/ietf-access-control-list:access-lists",
+    "config/ietf-interfaces:interfaces",
+    "config/interface-service-bindings:service-bindings",
+    "config/itm-state:dpn-endpoints",
+    "config/itm-state:dpn-teps-state",
+    "config/itm-state:external-tunnel-list",
+    "config/itm-state:tunnel-list",
+    "config/itm:transport-zones",
+    "config/l3vpn:vpn-instances",
+    "config/l3vpn:vpn-interfaces",
+    "config/l3vpn-instances-interfaces:vpn-instances",
+    "config/l3vpn-instances-interfaces:vpn-interfaces",
+    "config/network-topology:network-topology/topology/ovsdb:1",
+    "config/neutron:neutron",
+    "config/neutronvpn:networkMaps",
+    "config/neutronvpn:neutron-vpn-portip-port-data",
+    "config/neutronvpn:router-interfaces-map",
+    "config/neutronvpn:subnetmaps",
+    "config/neutronvpn:vpnMaps",
+    "config/odl-fib:fibEntries",
+    "config/odl-interface-meta:interface-child-info",
+    "config/odl-l3vpn:router-interfaces",
+    "config/odl-l3vpn:vpn-id-to-vpn-instance",
+    "config/odl-l3vpn:vpn-instance-to-vpn-id",
+    "config/odl-nat:ext-routers",
+    "config/odl-nat:external-networks",
+    "config/odl-nat:external-subnets",
+    "config/odl-nat:floating-ip-info",
+    "config/odl-nat:intext-ip-port-map",
+    "config/odl-nat:napt-switches",
+    "config/odl-nat:router-id-name",
+    "config/odl-nat:snatint-ip-port-map",
+    "config/opendaylight-inventory:nodes",
+    "operational/elan:elan-dpn-interfaces",
+    "operational/elan:elan-forwarding-tables",
+    "operational/elan:elan-interfaces",
+    "operational/elan:elan-state",
+    "operational/ietf-interfaces:interfaces-state",
+    "operational/interface-service-bindings:bound-services-state-list",
+    "operational/itm-state:tunnels_state",
+    "operational/l3nexthop:l3nexthop",
+    "operational/l3vpn:vpn-interfaces",
+    "operational/network-topology:network-topology/topology/ovsdb:1",
+    "operational/neutron:neutron/neutron:ports",
+    "operational/odl-fib:label-route-map",
+    "operational/odl-interface-meta:if-indexes-interface-map",
+    "operational/odl-l3vpn:learnt-vpn-vip-to-port-data",
+    "operational/odl-l3vpn:neutron-router-dpns",
+    "operational/odl-l3vpn:port-op-data",
+    "operational/odl-l3vpn:prefix-to-interface",
+    "operational/odl-l3vpn:subnet-op-data",
+    "operational/odl-l3vpn:vpn-instance-op-data",
+    "operational/odl-l3vpn:vpn-interface-op-data",
+    "operational/odl-l3vpn:vpn-to-extraroute",
+    "operational/odl-nat:external-ips-counter",
+    "operational/odl-nat:floating-ip-info",
+    "operational/odl-nat:intext-ip-map",
+    "operational/opendaylight-inventory:nodes",
 ]
 
 netvirt_sfc_data_models = [
-    'config/service-function-chain:service-function-chains',
-    'config/service-function-classifier:service-function-classifiers',
-    'config/service-function-forwarder:service-function-forwarders',
-    'config/service-function-group:service-function-groups',
-    'config/service-function-mapping:sff-dpl-by-sf-dpl-mappings',
-    'config/service-function-path:service-function-paths',
-    'config/service-function-path-metadata:service-function-metadata',
-    'config/service-function-type:service-function-types',
-    'config/service-function:service-functions',
-    'config/sfc-of-renderer:sfc-of-renderer-config',
-    'operational/rendered-service-path:rendered-service-paths/'
+    "config/service-function-chain:service-function-chains",
+    "config/service-function-classifier:service-function-classifiers",
+    "config/service-function-forwarder:service-function-forwarders",
+    "config/service-function-group:service-function-groups",
+    "config/service-function-mapping:sff-dpl-by-sf-dpl-mappings",
+    "config/service-function-path:service-function-paths",
+    "config/service-function-path-metadata:service-function-metadata",
+    "config/service-function-type:service-function-types",
+    "config/service-function:service-functions",
+    "config/sfc-of-renderer:sfc-of-renderer-config",
+    "operational/rendered-service-path:rendered-service-paths/",
 ]
index e1e5abed790179074c02810b1c5dcab241cf5a54..52d8c3e1a00aeb8d5341cc50c39b10b1d9d1dd53 100644 (file)
@@ -1,2 +1,2 @@
-NODE_ID = '/restconf/operational/opendaylight-inventory:nodes/node/ocp:'
-REST_GET_PARAM = '/restconf/operations/ocp-service:get-param-nb'
+NODE_ID = "/restconf/operational/opendaylight-inventory:nodes/node/ocp:"
+REST_GET_PARAM = "/restconf/operations/ocp-service:get-param-nb"
index 7f93fdef9341ba074fbcd8d31f40fff9feea1d27..d73cef7bab6f276eb10fd8ba90cbd36d85a150a3 100644 (file)
@@ -1,13 +1,13 @@
-RPC_SEND_BARRIER_DATA = '''<input xmlns="urn:opendaylight:flow:transaction">
+RPC_SEND_BARRIER_DATA = """<input xmlns="urn:opendaylight:flow:transaction">
     <node xmlns:inv="urn:opendaylight:inventory">/inv:nodes/inv:node[inv:id="openflow:1"]</node>
-</input>'''
+</input>"""
 
-RPC_SEND_ECHO_DATA = '''<input xmlns="urn:opendaylight:echo:service">
+RPC_SEND_ECHO_DATA = """<input xmlns="urn:opendaylight:echo:service">
     <node xmlns:inv="urn:opendaylight:inventory">/inv:nodes/inv:node[inv:id="openflow:1"]</node>
     <data>aGVsbG8gYmFzZSA2NC4gaW5wdXQ=</data>
-</input>'''
+</input>"""
 
-RPC_SEND_UPDATE_TABLE_DATA = '''<input xmlns="urn:opendaylight:table:service">
+RPC_SEND_UPDATE_TABLE_DATA = """<input xmlns="urn:opendaylight:table:service">
   <node xmlns:inv="urn:opendaylight:inventory">/inv:nodes/inv:node[inv:id="openflow:1"]</node>
   <updated-table>
     <table-features>  <!-- model opendaylight-table-types, grouping table-features  -->
@@ -19,4 +19,4 @@ RPC_SEND_UPDATE_TABLE_DATA = '''<input xmlns="urn:opendaylight:table:service">
       <config>DEPRECATED-MASK</config>
     </table-features>
   </updated-table>
-</input>'''
+</input>"""
index f0f9b4c4616a9b5e2cd8467df72fcddafd013470..366e681232b7d0930a74ab16e2f6f13ed32a57a7 100644 (file)
@@ -38,8 +38,8 @@ class HostTopo(Topo):
 
     def __init__(self, host_suffix=1, hosts_per_switch=1, **opts):
         Topo.__init__(self, **opts)
-        switch = self.addSwitch('s%s' % host_suffix)
+        switch = self.addSwitch("s%s" % host_suffix)
         add_hosts(self, switch, hosts_per_switch, host_suffix)
 
 
-topos = {'host': HostTopo}
+topos = {"host": HostTopo}
index 7d9e8ad47adb93a1303503e07431eb5148bbcb88..c513f79153d696c69db90b92d517415a0d9e46f4 100644 (file)
@@ -36,7 +36,9 @@ def get_variables(mininet_ip):
     # The whole list: default_json, updated_json, updated_default_json, updated_updated_json.
     # Oh, and the state without mock-pcc connected is off_json.
     # off_json has '{}' substring and no variable data, so here it is as a special case:
-    variables['off_json'] = '''{
+    variables[
+        "off_json"
+    ] = """{
  "topology": [
   {
    "topology-id": "pcep-topology",
@@ -45,12 +47,13 @@ def get_variables(mininet_ip):
    }
   }
  ]
-}'''
+}"""
     # Ok, other _json strings will have more regular structure and some variable data,
     # so we will be using templates heavily.
     # First off, there is segment describing PCC which conatins IP address but is otherwise constant.
     # So the top-level template will look like this:
-    json_templ = Template('''{
+    json_templ = Template(
+        """{
  "network-topology-pcep:path-computation-client": {
   "ip-address": "$IP",
   "reported-lsp": [$LSPS
@@ -63,7 +66,8 @@ def get_variables(mininet_ip):
    }
   }
  }
-}''')
+}"""
+    )
     # The _json variables will differ only in $LSPS, but $IP will be present inside.
     # Thus, the $IP substitution will come last, and any auxiliary substitutions before this final one
     # will have to use safe_substitute().
@@ -73,7 +77,8 @@ def get_variables(mininet_ip):
     # Discussion amout delegated and instantiated implies that $LSPS is either a single delegated LSP
     # or a pair of delegated and instantiated (separated by comma) LSPS, in appropriate state.
     # Of course, one LSP always follow a structure, for which here is the template:
-    lsp_templ = Template('''
+    lsp_templ = Template(
+        """
    {
     "name": "$NAME",
     "path": [
@@ -112,134 +117,149 @@ def get_variables(mininet_ip):
       }
      }
     ]
-   }''')
+   }"""
+    )
     # IDs were already talked about, IP will be set last. Now, $NAME.
     # Pcc-mock uses a fixed naming scheme for delegated tunnels, so one more template can be written,
     # but it is so simple we can write just the one-line code instead:
-    delegated_name = 'pcc_' + mininet_ip + '_tunnel_1'  # 1 == ID
+    delegated_name = "pcc_" + mininet_ip + "_tunnel_1"  # 1 == ID
     # For the instantiated tunnel, user is free to specify anything, even charachers such as \u0000 work.
     # But as we need to plug the name to XML, let us try something more friendly:
-    instantiated_name = 'Instantiated tunnel'  # the space is only somewhat evil character :)
+    instantiated_name = (
+        "Instantiated tunnel"  # the space is only somewhat evil character :)
+    )
     # What is CODE? The NAME in base64 encoding (without endline):
-    delegated_name_bytes = delegated_name.encode('ascii')
+    delegated_name_bytes = delegated_name.encode("ascii")
     delegated_code_encoded = base64.b64encode(delegated_name_bytes)
-    delegated_code = delegated_code_encoded.decode('ascii')
-    instantiated_name_bytes = instantiated_name.encode('ascii')
+    delegated_code = delegated_code_encoded.decode("ascii")
+    instantiated_name_bytes = instantiated_name.encode("ascii")
     instantiated_code_encoded = base64.b64encode(instantiated_name_bytes)
-    instantiated_code = instantiated_code_encoded.decode('ascii')
+    instantiated_code = instantiated_code_encoded.decode("ascii")
 
     # The remaining segment is HOPS, and that is the place where default and updated states differ.
     # Once again, there is a template for a single hop:
-    hop_templ = Template('''
+    hop_templ = Template(
+        """
         {
          "ip-prefix": {
           "ip-prefix": "$HOPIP/32"
          },
          "loose": false
-        }''')
+        }"""
+    )
     # The low-to-high part of V comes now, it is just substituting and concatenating.
     # Hops:
-    final_hop = hop_templ.substitute({'HOPIP': '1.1.1.1'})
-    update_hop = hop_templ.substitute({'HOPIP': '2.2.2.2'})
-    both_hops = update_hop + ',' + final_hop
+    final_hop = hop_templ.substitute({"HOPIP": "1.1.1.1"})
+    update_hop = hop_templ.substitute({"HOPIP": "2.2.2.2"})
+    both_hops = update_hop + "," + final_hop
     # Lsps:
-    default_lsp_templ = Template(lsp_templ.safe_substitute({'HOPS': final_hop}))
-    updated_lsp_templ = Template(lsp_templ.safe_substitute({'HOPS': both_hops}))
-    repl_dict = {'NAME': delegated_name, 'ID': '1', 'CODE': delegated_code, 'CREATED': 'false'}
+    default_lsp_templ = Template(lsp_templ.safe_substitute({"HOPS": final_hop}))
+    updated_lsp_templ = Template(lsp_templ.safe_substitute({"HOPS": both_hops}))
+    repl_dict = {
+        "NAME": delegated_name,
+        "ID": "1",
+        "CODE": delegated_code,
+        "CREATED": "false",
+    }
     delegated_default_lsp = default_lsp_templ.safe_substitute(repl_dict)
     delegated_updated_lsp = updated_lsp_templ.safe_substitute(repl_dict)
-    repl_dict = {'NAME': instantiated_name, 'ID': '2', 'CODE': instantiated_code, 'CREATED': 'true'}
+    repl_dict = {
+        "NAME": instantiated_name,
+        "ID": "2",
+        "CODE": instantiated_code,
+        "CREATED": "true",
+    }
     instantiated_default_lsp = default_lsp_templ.safe_substitute(repl_dict)
     instantiated_updated_lsp = updated_lsp_templ.safe_substitute(repl_dict)
     # Json templates (without IP set).
-    repl_dict = {'LSPS': delegated_default_lsp}
+    repl_dict = {"LSPS": delegated_default_lsp}
     default_json_templ = Template(json_templ.safe_substitute(repl_dict))
-    repl_dict = {'LSPS': delegated_updated_lsp}
+    repl_dict = {"LSPS": delegated_updated_lsp}
     updated_json_templ = Template(json_templ.safe_substitute(repl_dict))
-    repl_dict = {'LSPS': delegated_updated_lsp + ',' + instantiated_default_lsp}
+    repl_dict = {"LSPS": delegated_updated_lsp + "," + instantiated_default_lsp}
     updated_default_json_templ = Template(json_templ.safe_substitute(repl_dict))
-    repl_dict = {'LSPS': delegated_updated_lsp + ',' + instantiated_updated_lsp}
+    repl_dict = {"LSPS": delegated_updated_lsp + "," + instantiated_updated_lsp}
     updated_updated_json_templ = Template(json_templ.safe_substitute(repl_dict))
     # Final json variables.
-    repl_dict = {'IP': mininet_ip}
-    variables['default_json'] = default_json_templ.substitute(repl_dict)
-    variables['updated_json'] = updated_json_templ.substitute(repl_dict)
-    variables['updated_default_json'] = updated_default_json_templ.substitute(repl_dict)
-    variables['updated_updated_json'] = updated_updated_json_templ.substitute(repl_dict)
+    repl_dict = {"IP": mininet_ip}
+    variables["default_json"] = default_json_templ.substitute(repl_dict)
+    variables["updated_json"] = updated_json_templ.substitute(repl_dict)
+    variables["updated_default_json"] = updated_default_json_templ.substitute(repl_dict)
+    variables["updated_updated_json"] = updated_updated_json_templ.substitute(repl_dict)
     # ### Pcep operations XML data.
     # There are three operations, so let us just write templates from information at
     # https://wiki.opendaylight.org/view/BGP_LS_PCEP:Programmer_Guide#Tunnel_Management_for_draft-ietf-pce-stateful-pce-07_and_draft-ietf-pce-pce-initiated-lsp-00
     # _xml describes content type and also distinguishes from similarly named _json strings.
     add_xml_templ = Template(
         '<input xmlns="urn:opendaylight:params:xml:ns:yang:topology:pcep">\n'
-        ' <node>pcc://$IP</node>\n'
-        ' <name>$NAME</name>\n'
+        " <node>pcc://$IP</node>\n"
+        " <name>$NAME</name>\n"
         ' <network-topology-ref xmlns:topo="urn:TBD:params:xml:ns:yang:network-topology">'
         '/topo:network-topology/topo:topology[topo:topology-id="pcep-topology"]'
-        '</network-topology-ref>\n'
-        ' <arguments>\n'
+        "</network-topology-ref>\n"
+        " <arguments>\n"
         '  <lsp xmlns="urn:opendaylight:params:xml:ns:yang:pcep:ietf:stateful">\n'
-        '   <delegate>true</delegate>\n'
-        '   <administrative>true</administrative>\n'
-        '  </lsp>\n'
-        '  <endpoints-obj>\n'
-        '   <ipv4>\n'
-        '    <source-ipv4-address>$IP</source-ipv4-address>\n'
-        '    <destination-ipv4-address>1.1.1.1</destination-ipv4-address>\n'
-        '   </ipv4>\n'
-        '  </endpoints-obj>\n'
-        '  <ero>\n'
-        '   <subobject>\n'
-        '    <loose>false</loose>\n'
-        '    <ip-prefix><ip-prefix>1.1.1.1/32</ip-prefix></ip-prefix>\n'
-        '   </subobject>\n'
-        '  </ero>\n'
-        ' </arguments>\n'
-        '</input>\n'
+        "   <delegate>true</delegate>\n"
+        "   <administrative>true</administrative>\n"
+        "  </lsp>\n"
+        "  <endpoints-obj>\n"
+        "   <ipv4>\n"
+        "    <source-ipv4-address>$IP</source-ipv4-address>\n"
+        "    <destination-ipv4-address>1.1.1.1</destination-ipv4-address>\n"
+        "   </ipv4>\n"
+        "  </endpoints-obj>\n"
+        "  <ero>\n"
+        "   <subobject>\n"
+        "    <loose>false</loose>\n"
+        "    <ip-prefix><ip-prefix>1.1.1.1/32</ip-prefix></ip-prefix>\n"
+        "   </subobject>\n"
+        "  </ero>\n"
+        " </arguments>\n"
+        "</input>\n"
     )
     update_xml_templ = Template(
         '<input xmlns="urn:opendaylight:params:xml:ns:yang:topology:pcep">\n'
-        ' <node>pcc://$IP</node>\n'
-        ' <name>$NAME</name>\n'
+        " <node>pcc://$IP</node>\n"
+        " <name>$NAME</name>\n"
         ' <network-topology-ref xmlns:topo="urn:TBD:params:xml:ns:yang:network-topology">'
         '/topo:network-topology/topo:topology[topo:topology-id="pcep-topology"]'
-        '</network-topology-ref>\n'
-        ' <arguments>\n'
+        "</network-topology-ref>\n"
+        " <arguments>\n"
         '  <lsp xmlns="urn:opendaylight:params:xml:ns:yang:pcep:ietf:stateful">\n'
-        '   <delegate>true</delegate>\n'
-        '   <administrative>true</administrative>\n'
-        '  </lsp>\n'
-        '  <ero>\n'
-        '   <subobject>\n'
-        '    <loose>false</loose>\n'
-        '    <ip-prefix><ip-prefix>2.2.2.2/32</ip-prefix></ip-prefix>\n'
-        '   </subobject>\n'
-        '   <subobject>\n'
-        '    <loose>false</loose>\n'
-        '    <ip-prefix><ip-prefix>1.1.1.1/32</ip-prefix></ip-prefix>\n'
-        '   </subobject>\n'
-        '  </ero>\n'
-        ' </arguments>\n'
-        '</input>\n'
+        "   <delegate>true</delegate>\n"
+        "   <administrative>true</administrative>\n"
+        "  </lsp>\n"
+        "  <ero>\n"
+        "   <subobject>\n"
+        "    <loose>false</loose>\n"
+        "    <ip-prefix><ip-prefix>2.2.2.2/32</ip-prefix></ip-prefix>\n"
+        "   </subobject>\n"
+        "   <subobject>\n"
+        "    <loose>false</loose>\n"
+        "    <ip-prefix><ip-prefix>1.1.1.1/32</ip-prefix></ip-prefix>\n"
+        "   </subobject>\n"
+        "  </ero>\n"
+        " </arguments>\n"
+        "</input>\n"
     )
     remove_xml_templ = Template(
         '<input xmlns="urn:opendaylight:params:xml:ns:yang:topology:pcep">\n'
-        ' <node>pcc://$IP</node>\n'
-        ' <name>$NAME</name>\n'
+        " <node>pcc://$IP</node>\n"
+        " <name>$NAME</name>\n"
         ' <network-topology-ref xmlns:topo="urn:TBD:params:xml:ns:yang:network-topology">'
         '/topo:network-topology/topo:topology[topo:topology-id="pcep-topology"]'
-        '</network-topology-ref>\n'
-        '</input>\n'
+        "</network-topology-ref>\n"
+        "</input>\n"
     )
     # The operations can be applied to either delegated or instantiated tunnel, NAME is the only distinguishing value.
     # Also, the final IP substitution can be done here.
-    repl_dict = {'IP': mininet_ip}
-    repl_dict['NAME'] = delegated_name
-    variables['update_delegated_xml'] = update_xml_templ.substitute(repl_dict)
-    variables['remove_delegated_xml'] = remove_xml_templ.substitute(repl_dict)
-    repl_dict['NAME'] = instantiated_name
-    variables['add_instantiated_xml'] = add_xml_templ.substitute(repl_dict)
-    variables['update_instantiated_xml'] = update_xml_templ.substitute(repl_dict)
-    variables['remove_instantiated_xml'] = remove_xml_templ.substitute(repl_dict)
+    repl_dict = {"IP": mininet_ip}
+    repl_dict["NAME"] = delegated_name
+    variables["update_delegated_xml"] = update_xml_templ.substitute(repl_dict)
+    variables["remove_delegated_xml"] = remove_xml_templ.substitute(repl_dict)
+    repl_dict["NAME"] = instantiated_name
+    variables["add_instantiated_xml"] = add_xml_templ.substitute(repl_dict)
+    variables["update_instantiated_xml"] = update_xml_templ.substitute(repl_dict)
+    variables["remove_instantiated_xml"] = remove_xml_templ.substitute(repl_dict)
     # All variables ready.
     return variables
index 2aa7f0c2266a3ec69fe1e04481e09038efc3eb5b..41a5c1ad03d79f8e00046eebd76114a2fead3880 100644 (file)
@@ -1,23 +1,23 @@
 sfc_data_models = [
-    'config/ietf-access-control-list:access-lists',
-    'config/ietf-interfaces:interfaces',
-    'config/interface-service-bindings:service-bindings',
-    'config/network-topology:network-topology/topology/ovsdb:1',
-    'config/opendaylight-inventory:nodes',
-    'config/rendered-service-path:rendered-service-path',
-    'config/service-function-chain:service-function-chains',
-    'config/service-function-classifier:service-function-classifiers',
-    'config/service-function-forwarder:service-function-forwarders',
-    'config/service-function-group:service-function-groups',
-    'config/service-function-mapping:sff-dpl-by-sf-dpl-mappings',
-    'config/service-function-path:service-function-paths',
-    'config/service-function-path-metadata:service-function-metadata',
-    'config/service-function-type:service-function-types',
-    'config/service-function:service-functions',
-    'config/sfc-of-renderer:sfc-of-renderer-config',
-    'operational/ietf-interfaces:interfaces-state',
-    'operational/network-topology:network-topology/topology/ovsdb:1',
-    'operational/opendaylight-inventory:nodes',
-    'operational/rendered-service-path:rendered-service-paths',
-    'operational/service-function-path:service-function-paths-state'
+    "config/ietf-access-control-list:access-lists",
+    "config/ietf-interfaces:interfaces",
+    "config/interface-service-bindings:service-bindings",
+    "config/network-topology:network-topology/topology/ovsdb:1",
+    "config/opendaylight-inventory:nodes",
+    "config/rendered-service-path:rendered-service-path",
+    "config/service-function-chain:service-function-chains",
+    "config/service-function-classifier:service-function-classifiers",
+    "config/service-function-forwarder:service-function-forwarders",
+    "config/service-function-group:service-function-groups",
+    "config/service-function-mapping:sff-dpl-by-sf-dpl-mappings",
+    "config/service-function-path:service-function-paths",
+    "config/service-function-path-metadata:service-function-metadata",
+    "config/service-function-type:service-function-types",
+    "config/service-function:service-functions",
+    "config/sfc-of-renderer:sfc-of-renderer-config",
+    "operational/ietf-interfaces:interfaces-state",
+    "operational/network-topology:network-topology/topology/ovsdb:1",
+    "operational/opendaylight-inventory:nodes",
+    "operational/rendered-service-path:rendered-service-paths",
+    "operational/service-function-path:service-function-paths-state",
 ]
index 6d0ce2fa91365ecda167a8bd35e7d9bfd4946296..0ba35563fdcf22e104fb7b431e74012f652b194b 100644 (file)
@@ -26,21 +26,24 @@ __email__ = "vrpolak@cisco.com"
 
 # FIXME: Migrate values shared by other suites to separate Python module.
 
+
 def get_variables(mininet_ip):
     """Return dict of variables for the given IPv4 address of Mininet VM."""
     # TODO: Document in 'V' fashion, as in pcepuser/variables.py using more systematic local variable names.
     # Dict of variables to return, starts empty and grows as function proceeds.
     variables = {}
     # Given mininet_ip, this will be the sympolic name uf tunnel under test.
-    tunnelname = 'pcc_' + mininet_ip + '_tunnel_1'
+    tunnelname = "pcc_" + mininet_ip + "_tunnel_1"
     # Base64 code for the symbolic name, as that is present in datastore.
-    tunnelname_bytes = tunnelname.encode('ascii')
+    tunnelname_bytes = tunnelname.encode("ascii")
     pathcode_encoded = base64.b64encode(tunnelname_bytes)
-    pathcode = pathcode_encoded.decode('ascii')
-    variables['pcc_name'] = tunnelname
-    variables['pcc_name_code'] = pathcode
+    pathcode = pathcode_encoded.decode("ascii")
+    variables["pcc_name"] = tunnelname
+    variables["pcc_name_code"] = pathcode
     # JSON response when pcep-topology is ready but no PCC is connected.
-    variables['offjson'] = '''{
+    variables[
+        "offjson"
+    ] = """{
  "topology": [
   {
    "topology-id": "pcep-topology",
@@ -49,9 +52,10 @@ def get_variables(mininet_ip):
    }
   }
  ]
-}'''
+}"""
     # Template of JSON response with pcep-topology seeing 1 PCC 1 LSP.
-    onjsontempl = Template('''{
+    onjsontempl = Template(
+        """{
  "topology": [
   {
    "node": [
@@ -122,60 +126,79 @@ def get_variables(mininet_ip):
    }
   }
  ]
-}''')
+}"""
+    )
     # Dictionly which tells values for placeholders.
-    repl_dict = {'IP': mininet_ip, 'NAME': tunnelname, 'CODE': pathcode}
+    repl_dict = {"IP": mininet_ip, "NAME": tunnelname, "CODE": pathcode}
     # The finalized JSON.
-    variables['onjson'] = onjsontempl.substitute(repl_dict)
+    variables["onjson"] = onjsontempl.substitute(repl_dict)
     # The following strings are XML data.
     # See https://wiki.opendaylight.org/view/BGP_LS_PCEP:TCP_MD5_Guide#RESTCONF_Configuration
     # For curl, string is suitable to became -d argument only after
     # replacing ' -> '"'"' and enclosing in single quotes.
-    variables['key_access_module'] = '''<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+    variables[
+        "key_access_module"
+    ] = """<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
  <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:jni:cfg">x:native-key-access-factory</type>
  <name>global-key-access-factory</name>
-</module>'''
-    variables['key_access_service'] = '''<service xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+</module>"""
+    variables[
+        "key_access_service"
+    ] = """<service xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
  <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:cfg">x:key-access-factory</type>
  <instance>
   <name>global-key-access-factory</name>
   <provider>/modules/module[type='native-key-access-factory'][name='global-key-access-factory']</provider>
  </instance>
-</service>'''
-    variables['client_channel_module'] = '''<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+</service>"""
+    variables[
+        "client_channel_module"
+    ] = """<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
  <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:netty:cfg">x:md5-client-channel-factory</type>
  <name>md5-client-channel-factory</name>
  <key-access-factory xmlns="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:netty:cfg">
   <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:cfg">x:key-access-factory</type>
   <name>global-key-access-factory</name>
  </key-access-factory>
-</module>'''
-    variables['client_channel_service'] = '''<service xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+</module>"""
+    variables[
+        "client_channel_service"
+    ] = """<service xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
  <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:netty:cfg">x:md5-channel-factory</type>
  <instance>
   <name>md5-client-channel-factory</name>
   <provider>/modules/module[type='md5-client-channel-factory'][name='md5-client-channel-factory']</provider>
  </instance>
-</service>'''
-    variables['server_channel_module'] = '''<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:netty:cfg">'''
+</service>"""
+    variables[
+        "server_channel_module"
+    ] = """<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:netty:cfg">"""
     # What is your favourite way to concatenate strings without resembling tuple?
-    variables['server_channel_module'] += '''prefix:md5-server-channel-factory-impl</type>
+    variables[
+        "server_channel_module"
+    ] += """prefix:md5-server-channel-factory-impl</type>
  <name>md5-server-channel-factory</name>
  <server-key-access-factory xmlns="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:netty:cfg">
   <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:cfg">x:key-access-factory</type>
   <name>global-key-access-factory</name>
  </server-key-access-factory>
-</module>'''
-    variables['server_channel_service'] = '''<service xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:netty:cfg">'''
-    variables['server_channel_service'] += '''prefix:md5-server-channel-factory</type>
+</module>"""
+    variables[
+        "server_channel_service"
+    ] = """<service xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:netty:cfg">"""
+    variables[
+        "server_channel_service"
+    ] += """prefix:md5-server-channel-factory</type>
  <instance>
   <name>md5-server-channel-factory</name>
   <provider>/modules/module[type='md5-server-channel-factory-impl'][name='md5-server-channel-factory']</provider>
  </instance>
-</service>'''
-    variables['pcep_dispatcher_module'] = '''<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+</service>"""
+    variables[
+        "pcep_dispatcher_module"
+    ] = """<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
  <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:pcep:impl">x:pcep-dispatcher-impl</type>
  <name>global-pcep-dispatcher</name>
  <md5-channel-factory xmlns="urn:opendaylight:params:xml:ns:yang:controller:pcep:impl">
@@ -186,27 +209,29 @@ def get_variables(mininet_ip):
   <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:tcpmd5:netty:cfg">x:md5-server-channel-factory</type>
   <name>md5-server-channel-factory</name>
  </md5-server-channel-factory>
-</module>'''
+</module>"""
     # Template to set password.
-    passwd_templ = Template('''<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+    passwd_templ = Template(
+        """<module xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
  <type xmlns:x="urn:opendaylight:params:xml:ns:yang:controller:pcep:topology:provider">x:pcep-topology-provider</type>
  <name>pcep-topology</name>
  <client xmlns="urn:opendaylight:params:xml:ns:yang:controller:pcep:topology:provider">
   <address xmlns="urn:opendaylight:params:xml:ns:yang:controller:pcep:topology:provider">$IP</address>
 $PASSWD </client>
-</module>''')
+</module>"""
+    )
     # We use three template instantiations. No password:
-    repl_dict = {'IP': mininet_ip, 'PASSWD': ''}
-    variables['no_passwd_module'] = passwd_templ.substitute(repl_dict)
-    changeme = '''  <password>changeme</password>
-'''
+    repl_dict = {"IP": mininet_ip, "PASSWD": ""}
+    variables["no_passwd_module"] = passwd_templ.substitute(repl_dict)
+    changeme = """  <password>changeme</password>
+"""
     # wrong password
-    repl_dict = {'IP': mininet_ip, 'PASSWD': changeme}
-    variables['passwd_changeme_module'] = passwd_templ.substitute(repl_dict)
+    repl_dict = {"IP": mininet_ip, "PASSWD": changeme}
+    variables["passwd_changeme_module"] = passwd_templ.substitute(repl_dict)
     # and correct password.
-    topsecret = '''  <password>topsecret</password>
-'''
-    repl_dict = {'IP': mininet_ip, 'PASSWD': topsecret}
-    variables['passwd_topsecret_module'] = passwd_templ.substitute(repl_dict)
+    topsecret = """  <password>topsecret</password>
+"""
+    repl_dict = {"IP": mininet_ip, "PASSWD": topsecret}
+    variables["passwd_topsecret_module"] = passwd_templ.substitute(repl_dict)
     # All variables set, return dict to Robot.
     return variables
index a5691abae8f4d1dfdd0185519490e89da5319285..92ceb10bf66a7c9b783df8b641811433a13f69e8 100644 (file)
@@ -2,23 +2,31 @@
 Definitions of target field paths widely used throughout the Topoprocessing suites.
 """
 # NT target fields
-ISIS_NODE_TE_ROUTER_ID_IPV4 = ('l3-unicast-igp-topology:igp-node-attributes'
-                               '/isis-topology:isis-node-attributes'
-                               '/isis-topology:ted'
-                               '/isis-topology:te-router-id-ipv4')
-ISIS_NODE_TE_ROUTER_ID_IPV6 = ('l3-unicast-igp-topology:igp-node-attributes'
-                               '/isis-topology:isis-node-attributes'
-                               '/isis-topology:ted'
-                               '/isis-topology:te-router-id-ipv6')
-IGP_LINK_METRIC = 'l3-unicast-igp-topology:igp-link-attributes/l3-unicast-igp-topology:metric'
-IGP_LINK_NAME = 'l3-unicast-igp-topology:igp-link-attributes/l3-unicast-igp-topology:name'
-OVSDB_OFPORT = 'ovsdb:ofport'
-OVSDB_OVS_VERSION = 'ovsdb:ovs-version'
-OVSDB_TP_NAME = 'ovsdb:name'
+ISIS_NODE_TE_ROUTER_ID_IPV4 = (
+    "l3-unicast-igp-topology:igp-node-attributes"
+    "/isis-topology:isis-node-attributes"
+    "/isis-topology:ted"
+    "/isis-topology:te-router-id-ipv4"
+)
+ISIS_NODE_TE_ROUTER_ID_IPV6 = (
+    "l3-unicast-igp-topology:igp-node-attributes"
+    "/isis-topology:isis-node-attributes"
+    "/isis-topology:ted"
+    "/isis-topology:te-router-id-ipv6"
+)
+IGP_LINK_METRIC = (
+    "l3-unicast-igp-topology:igp-link-attributes/l3-unicast-igp-topology:metric"
+)
+IGP_LINK_NAME = (
+    "l3-unicast-igp-topology:igp-link-attributes/l3-unicast-igp-topology:name"
+)
+OVSDB_OFPORT = "ovsdb:ofport"
+OVSDB_OVS_VERSION = "ovsdb:ovs-version"
+OVSDB_TP_NAME = "ovsdb:name"
 
 # Inventory target fields
-OPENFLOW_NODE_IP_ADDRESS = 'flow-node-inventory:ip-address'
-OPENFLOW_NODE_SERIAL_NUMBER = 'flow-node-inventory:serial-number'
-OPENFLOW_NODE_CONNECTOR_PORT_NUMBER = 'flow-node-inventory:port-number'
-OPENFLOW_NODE_CONNECTOR_MAXIMUM_SPEED = 'flow-node-inventory:maximum-speed'
-OPENFLOW_NODE_CONNECTOR_NAME = 'flow-node-inventory:name'
+OPENFLOW_NODE_IP_ADDRESS = "flow-node-inventory:ip-address"
+OPENFLOW_NODE_SERIAL_NUMBER = "flow-node-inventory:serial-number"
+OPENFLOW_NODE_CONNECTOR_PORT_NUMBER = "flow-node-inventory:port-number"
+OPENFLOW_NODE_CONNECTOR_MAXIMUM_SPEED = "flow-node-inventory:maximum-speed"
+OPENFLOW_NODE_CONNECTOR_NAME = "flow-node-inventory:name"
index 8a52d71bfa8978fb4e7d60fd190ae345013e6a55..b679acf8066563417f24ee8d91e07ffb59d71e6e 100644 (file)
@@ -1,4 +1,4 @@
-NETWORK_UNDERLAY_TOPOLOGY_1 = '''<topology
+NETWORK_UNDERLAY_TOPOLOGY_1 = """<topology
                                 xmlns="urn:TBD:params:xml:ns:yang:network-topology"
                                 xmlns:igp="urn:TBD:params:xml:ns:yang:nt:l3-unicast-igp-topology"
                                 xmlns:isis="urn:TBD:params:xml:ns:yang:network:isis-topology"
@@ -159,9 +159,9 @@ NETWORK_UNDERLAY_TOPOLOGY_1 = '''<topology
                                     <igp:metric>14</igp:metric>
                                 </igp:igp-link-attributes>
                             </link>
-                        </topology>'''
+                        </topology>"""
 
-NETWORK_UNDERLAY_TOPOLOGY_2 = '''<topology
+NETWORK_UNDERLAY_TOPOLOGY_2 = """<topology
                                 xmlns="urn:TBD:params:xml:ns:yang:network-topology"
                                 xmlns:igp="urn:TBD:params:xml:ns:yang:nt:l3-unicast-igp-topology"
                                 xmlns:isis="urn:TBD:params:xml:ns:yang:network:isis-topology"
@@ -262,9 +262,9 @@ NETWORK_UNDERLAY_TOPOLOGY_2 = '''<topology
                                     </igp:igp-termination-point-attributes>
                                 </termination-point>
                             </node>
-                        </topology>'''
+                        </topology>"""
 
-NETWORK_UNDERLAY_TOPOLOGY_3 = '''<topology
+NETWORK_UNDERLAY_TOPOLOGY_3 = """<topology
                                 xmlns="urn:TBD:params:xml:ns:yang:network-topology"
                                 xmlns:igp="urn:TBD:params:xml:ns:yang:nt:l3-unicast-igp-topology"
                                 xmlns:isis="urn:TBD:params:xml:ns:yang:network:isis-topology">
@@ -337,9 +337,9 @@ NETWORK_UNDERLAY_TOPOLOGY_3 = '''<topology
                                     </isis:isis-node-attributes>
                                 </igp:igp-node-attributes>
                             </node>
-                        </topology>'''
+                        </topology>"""
 
-NETWORK_UNDERLAY_TOPOLOGY_4 = '''<topology
+NETWORK_UNDERLAY_TOPOLOGY_4 = """<topology
                                 xmlns="urn:TBD:params:xml:ns:yang:network-topology"
                                 xmlns:igp="urn:TBD:params:xml:ns:yang:nt:l3-unicast-igp-topology"
                                 xmlns:isis="urn:TBD:params:xml:ns:yang:network:isis-topology">
@@ -394,9 +394,9 @@ NETWORK_UNDERLAY_TOPOLOGY_4 = '''<topology
                                     </isis:isis-node-attributes>
                                 </igp:igp-node-attributes>
                             </node>
-                        </topology>'''
+                        </topology>"""
 
-NETWORK_UNDERLAY_TOPOLOGY_5 = '''<topology
+NETWORK_UNDERLAY_TOPOLOGY_5 = """<topology
                                 xmlns="urn:TBD:params:xml:ns:yang:network-topology"
                                 xmlns:igp="urn:TBD:params:xml:ns:yang:nt:l3-unicast-igp-topology"
                                 xmlns:isis="urn:TBD:params:xml:ns:yang:network:isis-topology"
@@ -508,9 +508,9 @@ NETWORK_UNDERLAY_TOPOLOGY_5 = '''<topology
                                     </isis:isis-node-attributes>
                                 </igp:igp-node-attributes>
                             </node>
-                        </topology>'''
+                        </topology>"""
 
-NETWORK_UNDERLAY_TOPOLOGY_6 = '''<topology
+NETWORK_UNDERLAY_TOPOLOGY_6 = """<topology
                                 xmlns="urn:TBD:params:xml:ns:yang:network-topology"
                                 xmlns:igp="urn:TBD:params:xml:ns:yang:nt:l3-unicast-igp-topology"
                                 xmlns:isis="urn:TBD:params:xml:ns:yang:network:isis-topology">
@@ -617,9 +617,9 @@ NETWORK_UNDERLAY_TOPOLOGY_6 = '''<topology
                                     <igp:metric>12</igp:metric>
                                 </igp:igp-link-attributes>
                             </link>
-                        </topology>'''
+                        </topology>"""
 
-OPENFLOW_UNDERLAY_NODES = '''
+OPENFLOW_UNDERLAY_NODES = """
 <nodes
     xmlns="urn:opendaylight:inventory"
     xmlns:flov-inv="urn:opendaylight:flow:inventory">
@@ -863,9 +863,9 @@ OPENFLOW_UNDERLAY_NODES = '''
         </node-connector>
     </node>
 </nodes>
-'''
+"""
 
-OPENFLOW_UNDERLAY_TOPOLOGY_1 = '''
+OPENFLOW_UNDERLAY_TOPOLOGY_1 = """
 <topology
         xmlns="urn:TBD:params:xml:ns:yang:network-topology"
         xmlns:inventory="urn:opendaylight:inventory"
@@ -964,9 +964,9 @@ OPENFLOW_UNDERLAY_TOPOLOGY_1 = '''
         </termination-point>
     </node>
 </topology>
-'''
+"""
 
-OPENFLOW_UNDERLAY_TOPOLOGY_2 = '''
+OPENFLOW_UNDERLAY_TOPOLOGY_2 = """
 <topology
         xmlns="urn:TBD:params:xml:ns:yang:network-topology"
         xmlns:inventory="urn:opendaylight:inventory"
@@ -993,9 +993,9 @@ OPENFLOW_UNDERLAY_TOPOLOGY_2 = '''
         <inventory-topo:inventory-node-ref>/inventory:nodes/inventory:node[inventory:id="openflow:10"]</inventory-topo:inventory-node-ref>
     </node>
 </topology>
-'''
+"""
 
-OPENFLOW_UNDERLAY_TOPOLOGY_3 = '''
+OPENFLOW_UNDERLAY_TOPOLOGY_3 = """
 <topology
         xmlns="urn:TBD:params:xml:ns:yang:network-topology"
         xmlns:inventory="urn:opendaylight:inventory"
@@ -1075,8 +1075,8 @@ OPENFLOW_UNDERLAY_TOPOLOGY_3 = '''
         </igp:igp-link-attributes>
     </link>
 </topology>
-'''
-OPENFLOW_UNDERLAY_TOPOLOGY_4 = '''
+"""
+OPENFLOW_UNDERLAY_TOPOLOGY_4 = """
 <topology
         xmlns="urn:TBD:params:xml:ns:yang:network-topology"
         xmlns:inventory="urn:opendaylight:inventory"
@@ -1103,9 +1103,9 @@ OPENFLOW_UNDERLAY_TOPOLOGY_4 = '''
         <inventory-topo:inventory-node-ref>/inventory:nodes/inventory:node[inventory:id="openflow:20"]</inventory-topo:inventory-node-ref>
     </node>
 </topology>
-'''
+"""
 
-OPENFLOW_UNDERLAY_TOPOLOGY_5 = '''
+OPENFLOW_UNDERLAY_TOPOLOGY_5 = """
 <topology
         xmlns="urn:TBD:params:xml:ns:yang:network-topology"
         xmlns:inventory="urn:opendaylight:inventory"
@@ -1162,9 +1162,9 @@ OPENFLOW_UNDERLAY_TOPOLOGY_5 = '''
         </termination-point>
     </node>
 </topology>
-'''
+"""
 
-OPENFLOW_UNDERLAY_TOPOLOGY_6 = '''
+OPENFLOW_UNDERLAY_TOPOLOGY_6 = """
 <topology
         xmlns="urn:TBD:params:xml:ns:yang:network-topology"
         xmlns:inventory="urn:opendaylight:inventory"
@@ -1191,4 +1191,4 @@ OPENFLOW_UNDERLAY_TOPOLOGY_6 = '''
         <inventory-topo:inventory-node-ref>/inventory:nodes/inventory:node[inventory:id="openflow:10"]</inventory-topo:inventory-node-ref>
     </node>
 </topology>
-'''
+"""
index f3220c9cd55f789fc5a68ee61f3720eb9f6d0cc2..82fcf553a5e144b31533f7d4c40299cf07cbda53 100644 (file)
@@ -1,4 +1,4 @@
-UNIFICATION_NT = '''<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
+UNIFICATION_NT = """<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
                         <n:topology-id>topo:1</n:topology-id>
                         <correlations>
                             <output-model>{output-model}</output-model>
@@ -23,9 +23,9 @@ UNIFICATION_NT = '''<n:topology xmlns="urn:opendaylight:topology:correlation" xm
                                 </aggregation>
                             </correlation>
                         </correlations>
-                    </n:topology>'''
+                    </n:topology>"""
 
-UNIFICATION_NT_AGGREGATE_INSIDE = '''<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
+UNIFICATION_NT_AGGREGATE_INSIDE = """<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
                         <n:topology-id>topo:1</n:topology-id>
                         <correlations>
                             <output-model>{output-model}</output-model>
@@ -44,9 +44,9 @@ UNIFICATION_NT_AGGREGATE_INSIDE = '''<n:topology xmlns="urn:opendaylight:topolog
                                 </aggregation>
                             </correlation>
                         </correlations>
-                    </n:topology>'''
+                    </n:topology>"""
 
-UNIFICATION_FILTRATION_NT_AGGREGATE_INSIDE = '''<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
+UNIFICATION_FILTRATION_NT_AGGREGATE_INSIDE = """<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
                         <n:topology-id>topo:1</n:topology-id>
                         <correlations>
                             <output-model>{output-model}</output-model>
@@ -70,9 +70,9 @@ UNIFICATION_FILTRATION_NT_AGGREGATE_INSIDE = '''<n:topology xmlns="urn:opendayli
                                 </filtration>
                             </correlation>
                         </correlations>
-                    </n:topology>'''
+                    </n:topology>"""
 
-UNIFICATION_FILTRATION_NT = '''<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
+UNIFICATION_FILTRATION_NT = """<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
                         <n:topology-id>topo:1</n:topology-id>
                         <correlations>
                             <output-model>{output-model}</output-model>
@@ -103,9 +103,9 @@ UNIFICATION_FILTRATION_NT = '''<n:topology xmlns="urn:opendaylight:topology:corr
                                 </filtration>
                             </correlation>
                         </correlations>
-                    </n:topology>'''
+                    </n:topology>"""
 
-FILTRATION_NT = '''<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
+FILTRATION_NT = """<n:topology xmlns="urn:opendaylight:topology:correlation" xmlns:n="urn:TBD:params:xml:ns:yang:network-topology">
                     <n:topology-id>topo:1</n:topology-id>
                     <correlations>
                         <output-model>{output-model}</output-model>
@@ -119,28 +119,28 @@ FILTRATION_NT = '''<n:topology xmlns="urn:opendaylight:topology:correlation" xml
                             </filtration>
                         </correlation>
                     </correlations>
-                </n:topology>'''
+                </n:topology>"""
 
-APPLY_FILTERS = '''
+APPLY_FILTERS = """
 <apply-filters>{filter-id}</apply-filters>
-'''
+"""
 
-TARGET_FIELD = '''
+TARGET_FIELD = """
 <target-field>
     <target-field-path>{target-field-path}</target-field-path>
     <matching-key>{matching-key}</matching-key>
 </target-field>
-'''
+"""
 
-SCRIPTING = '''
+SCRIPTING = """
 <scripting>
     <language>{language}</language>
     <script>
         {script}
     </script>
 </scripting>
-'''
-FILTER_SCRIPT = '''<filter>
+"""
+FILTER_SCRIPT = """<filter>
                         <input-model>{input-model}</input-model>
                         <filter-id>1</filter-id>
                         <target-field>{target-field}</target-field>
@@ -153,10 +153,10 @@ FILTER_SCRIPT = '''<filter>
                                 </script>
                             </scripting>
                         </script-filter>
-                 </filter>'''
+                 </filter>"""
 
 
-FILTER_IPV4 = '''<filter>
+FILTER_IPV4 = """<filter>
                         <input-model>{input-model}</input-model>
                         <filter-id>1</filter-id>
                         <target-field>{target-field}</target-field>
@@ -164,9 +164,9 @@ FILTER_IPV4 = '''<filter>
                         <ipv4-address-filter>
                              <ipv4-address>{ipv4}</ipv4-address>
                         </ipv4-address-filter>
-                 </filter>'''
+                 </filter>"""
 
-FILTER_IPV6 = '''<filter>
+FILTER_IPV6 = """<filter>
                         <input-model>{input-model}</input-model>
                         <filter-id>1</filter-id>
                         <target-field>{target-field}</target-field>
@@ -174,9 +174,9 @@ FILTER_IPV6 = '''<filter>
                         <ipv6-address-filter>
                              <ipv6-address>{ipv6}</ipv6-address>
                         </ipv6-address-filter>
-                 </filter>'''
+                 </filter>"""
 
-FILTER_RANGE_NUMBER = '''<filter>
+FILTER_RANGE_NUMBER = """<filter>
                         <input-model>{input-model}</input-model>
                         <filter-id>1</filter-id>
                         <target-field>{target-field}</target-field>
@@ -185,9 +185,9 @@ FILTER_RANGE_NUMBER = '''<filter>
                              <min-number-value>{min}</min-number-value>
                              <max-number-value>{max}</max-number-value>
                         </range-number-filter>
-                 </filter>'''
+                 </filter>"""
 
-FILTER_RANGE_STRING = '''<filter>
+FILTER_RANGE_STRING = """<filter>
                         <input-model>{input-model}</input-model>
                         <filter-id>1</filter-id>
                         <target-field>{target-field}</target-field>
@@ -196,9 +196,9 @@ FILTER_RANGE_STRING = '''<filter>
                              <min-string-value>{min}</min-string-value>
                              <max-string-value>{max}</max-string-value>
                         </range-string-filter>
-                 </filter>'''
+                 </filter>"""
 
-FILTER_SPECIFIC_NUMBER = '''<filter>
+FILTER_SPECIFIC_NUMBER = """<filter>
                         <input-model>{input-model}</input-model>
                         <filter-id>1</filter-id>
                         <target-field>{target-field}</target-field>
@@ -206,9 +206,9 @@ FILTER_SPECIFIC_NUMBER = '''<filter>
                         <specific-number-filter>
                              <specific-number>{number}</specific-number>
                         </specific-number-filter>
-                 </filter>'''
+                 </filter>"""
 
-FILTER_SPECIFIC_STRING = '''<filter>
+FILTER_SPECIFIC_STRING = """<filter>
                         <input-model>{input-model}</input-model>
                         <filter-id>1</filter-id>
                         <target-field>{target-field}</target-field>
@@ -216,9 +216,9 @@ FILTER_SPECIFIC_STRING = '''<filter>
                         <specific-string-filter>
                              <specific-string>{string}</specific-string>
                         </specific-string-filter>
-                 </filter>'''
+                 </filter>"""
 
-LINK_COMPUTATION_INSIDE = '''<link-computation xmlns="urn:opendaylight:topology:link:computation" xmlns:n="urn:opendaylight:topology:correlation">
+LINK_COMPUTATION_INSIDE = """<link-computation xmlns="urn:opendaylight:topology:link:computation" xmlns:n="urn:opendaylight:topology:correlation">
             <output-model>{output-model}</output-model>
             <node-info>
                 <node-topology>topo:1</node-topology>
@@ -228,9 +228,9 @@ LINK_COMPUTATION_INSIDE = '''<link-computation xmlns="urn:opendaylight:topology:
                 <link-topology>{underlay-topology-id}</link-topology>
                 <input-model>{input-model}</input-model>
             </link-info>
-        </link-computation>'''
+        </link-computation>"""
 
-LINK_COMPUTATION = '''<link-computation xmlns="urn:opendaylight:topology:link:computation" xmlns:n="urn:opendaylight:topology:correlation">
+LINK_COMPUTATION = """<link-computation xmlns="urn:opendaylight:topology:link:computation" xmlns:n="urn:opendaylight:topology:correlation">
             <output-model>{output-model}</output-model>
             <node-info>
                 <node-topology>topo:1</node-topology>
@@ -244,9 +244,9 @@ LINK_COMPUTATION = '''<link-computation xmlns="urn:opendaylight:topology:link:co
                 <link-topology>{underlay-topology-2-id}</link-topology>
                 <input-model>{input-model}</input-model>
             </link-info>
-        </link-computation>'''
+        </link-computation>"""
 
-NODE_ISIS = '''<node xmlns="urn:TBD:params:xml:ns:yang:network-topology"
+NODE_ISIS = """<node xmlns="urn:TBD:params:xml:ns:yang:network-topology"
             xmlns:igp="urn:TBD:params:xml:ns:yang:nt:l3-unicast-igp-topology"
             xmlns:isis="urn:TBD:params:xml:ns:yang:network:isis-topology"
             xmlns:ovsdb="urn:opendaylight:params:xml:ns:yang:ovsdb">
@@ -259,26 +259,26 @@ NODE_ISIS = '''<node xmlns="urn:TBD:params:xml:ns:yang:network-topology"
                     </isis:ted>
                 </isis:isis-node-attributes>
             </igp:igp-node-attributes>
-        </node>'''
+        </node>"""
 
-NODE_OPENFLOW = '''<node xmlns="urn:opendaylight:inventory" xmlns:flov-inv="urn:opendaylight:flow:inventory">
+NODE_OPENFLOW = """<node xmlns="urn:opendaylight:inventory" xmlns:flov-inv="urn:opendaylight:flow:inventory">
         <id>{node-id}</id>
         <flov-inv:ip-address>{ip-address}</flov-inv:ip-address>
         <flov-inv:serial-number>{serial-number}</flov-inv:serial-number>
-    </node>'''
+    </node>"""
 
-TERMINATION_POINT_OVSDB = '''<termination-point xmlns="urn:TBD:params:xml:ns:yang:network-topology"
+TERMINATION_POINT_OVSDB = """<termination-point xmlns="urn:TBD:params:xml:ns:yang:network-topology"
                                                 xmlns:ovsdb="urn:opendaylight:params:xml:ns:yang:ovsdb">
                                     <tp-id>{tp-id}</tp-id>
                                     <ovsdb:ofport>{ofport}</ovsdb:ofport>
-                                </termination-point>'''
+                                </termination-point>"""
 
-NODE_CONNECTOR_OPENFLOW = '''<node-connector xmlns="urn:opendaylight:inventory" xmlns:flov-inv="urn:opendaylight:flow:inventory">
+NODE_CONNECTOR_OPENFLOW = """<node-connector xmlns="urn:opendaylight:inventory" xmlns:flov-inv="urn:opendaylight:flow:inventory">
             <id>{nc-id}</id>
             <flov-inv:port-number>{port-number}</flov-inv:port-number>
-        </node-connector>'''
+        </node-connector>"""
 
-LINK = '''<link xmlns="urn:TBD:params:xml:ns:yang:network-topology"
+LINK = """<link xmlns="urn:TBD:params:xml:ns:yang:network-topology"
                                 xmlns:igp="urn:TBD:params:xml:ns:yang:nt:l3-unicast-igp-topology"
                                 xmlns:isis="urn:TBD:params:xml:ns:yang:network:isis-topology"
                                 xmlns:ovsdb="urn:opendaylight:params:xml:ns:yang:ovsdb">
@@ -293,4 +293,4 @@ LINK = '''<link xmlns="urn:TBD:params:xml:ns:yang:network-topology"
             <igp:name>{name}</igp:name>
             <igp:metric>{metric}</igp:metric>
         </igp:igp-link-attributes>
-    </link>'''
+    </link>"""
index 2f203fd1bf78cd1aca36841e3be1e190722808e0..9b3a58fc2c06a530ad24283fcc5753c46a594ab2 100644 (file)
@@ -9,10 +9,8 @@ def get_variables():
                     "route-distinguisher": "1000:1",
                     "export-route-policy": "3000:1,4000:1",
                     "import-route-policy": "1000:1,2000:1",
-                    "apply-label": {
-                        "apply-label-per-route": "true"
-                    }
-                }
+                    "apply-label": {"apply-label-per-route": "true"},
+                },
             }
         ]
     }
@@ -22,7 +20,7 @@ def get_variables():
                 "name": "s1-eth1",
                 "type": "iana-if-type:l2vlan",
                 "odl-interface:of-port-id": "openflow:1:1",
-                "enabled": "true"
+                "enabled": "true",
             }
         ]
     }
@@ -32,31 +30,25 @@ def get_variables():
                 "odl-l3vpn:adjacency": [
                     {
                         "odl-l3vpn:ip_address": "10.0.0.1",
-                        "odl-l3vpn:mac_address": "12:f8:57:a8:b9:a1"
+                        "odl-l3vpn:mac_address": "12:f8:57:a8:b9:a1",
                     }
                 ],
                 "vpn-instance-name": "testVpn1",
-                "name": "s1-eth1"
+                "name": "s1-eth1",
             }
         ]
     }
     bgp_router = {
-        "bgp-router": {
-            "local-as-identifier": "10.10.10.10",
-            "local-as-number": 108
-        }
+        "bgp-router": {"local-as-identifier": "10.10.10.10", "local-as-number": 108}
     }
     bgp_neighbor = {
-        "bgp-neighbor": [
-            {
-                "as-number": 105,
-                "ip-address": "169.144.42.168"
-            }
-        ]
+        "bgp-neighbor": [{"as-number": 105, "ip-address": "169.144.42.168"}]
+    }
+    variables = {
+        "vpn_instance": vpn_instance,
+        "vm_interface": vm_interface,
+        "vm_vpninterface": vm_vpninterface,
+        "bgp_router": bgp_router,
+        "bgp_neighbor": bgp_neighbor,
     }
-    variables = {'vpn_instance': vpn_instance,
-                 'vm_interface': vm_interface,
-                 'vm_vpninterface': vm_vpninterface,
-                 'bgp_router': bgp_router,
-                 'bgp_neighbor': bgp_neighbor}
     return variables
index 434766e0a7b4c756712157d2f35b1420db5a613f..f5d1f7b21d8d748c377832d9a75c67923eeac6e5 100644 (file)
@@ -11,10 +11,8 @@ def get_variables(mininet1_ip, mininet2_ip):
                     "route-distinguisher": "100:1",
                     "export-route-policy": "300:1",
                     "import-route-policy": "200:1",
-                    "apply-label": {
-                        "apply-label-per-route": "true"
-                    }
-                }
+                    "apply-label": {"apply-label-per-route": "true"},
+                },
             },
             {
                 "description": "Test VPN Instance 2",
@@ -23,11 +21,9 @@ def get_variables(mininet1_ip, mininet2_ip):
                     "route-distinguisher": "400:1",
                     "export-route-policy": "500:1",
                     "import-route-policy": "600:1",
-                    "apply-label": {
-                        "apply-label-per-route": "true"
-                    }
-                }
-            }
+                    "apply-label": {"apply-label-per-route": "true"},
+                },
+            },
         ]
     }
     ietf_interfaces = {
@@ -36,25 +32,25 @@ def get_variables(mininet1_ip, mininet2_ip):
                 "name": "s1-eth1",
                 "type": "iana-if-type:l2vlan",
                 "odl-interface:of-port-id": "openflow:1:1",
-                "enabled": "true"
+                "enabled": "true",
             },
             {
                 "name": "s1-eth2",
                 "type": "iana-if-type:l2vlan",
                 "odl-interface:of-port-id": "openflow:1:2",
-                "enabled": "true"
+                "enabled": "true",
             },
             {
                 "name": "s2-eth1",
                 "type": "iana-if-type:l2vlan",
                 "odl-interface:of-port-id": "openflow:2:1",
-                "enabled": "true"
+                "enabled": "true",
             },
             {
                 "name": "s2-eth2",
                 "type": "iana-if-type:l2vlan",
                 "odl-interface:of-port-id": "openflow:2:2",
-                "enabled": "true"
+                "enabled": "true",
             },
             {
                 "enabled": "true",
@@ -64,7 +60,7 @@ def get_variables(mininet1_ip, mininet2_ip):
                 "type": "odl-interface:l3tunnel",
                 "odl-interface:tunnel-type": "odl-interface:tunnel-type-gre",
                 "odl-interface:local-ip": mininet1_ip,
-                "odl-interface:remote-ip": mininet2_ip
+                "odl-interface:remote-ip": mininet2_ip,
             },
             {
                 "enabled": "true",
@@ -74,8 +70,8 @@ def get_variables(mininet1_ip, mininet2_ip):
                 "type": "odl-interface:l3tunnel",
                 "odl-interface:tunnel-type": "odl-interface:tunnel-type-gre",
                 "odl-interface:local-ip": mininet2_ip,
-                "odl-interface:remote-ip": mininet1_ip
-            }
+                "odl-interface:remote-ip": mininet1_ip,
+            },
         ]
     }
     vpn_interfaces = {
@@ -84,48 +80,50 @@ def get_variables(mininet1_ip, mininet2_ip):
                 "odl-l3vpn:adjacency": [
                     {
                         "odl-l3vpn:ip_address": "10.0.0.1",
-                        "odl-l3vpn:mac_address": "00:00:00:00:00:01"
+                        "odl-l3vpn:mac_address": "00:00:00:00:00:01",
                     }
                 ],
                 "vpn-instance-name": "testVpn1",
-                "name": "s1-eth1"
+                "name": "s1-eth1",
             },
             {
                 "odl-l3vpn:adjacency": [
                     {
                         "odl-l3vpn:ip_address": "10.0.0.2",
-                        "odl-l3vpn:mac_address": "00:00:00:00:00:02"
+                        "odl-l3vpn:mac_address": "00:00:00:00:00:02",
                     }
                 ],
                 "vpn-instance-name": "testVpn2",
-                "name": "s1-eth2"
+                "name": "s1-eth2",
             },
             {
                 "odl-l3vpn:adjacency": [
                     {
                         "odl-l3vpn:ip_address": "10.0.0.3",
-                        "odl-l3vpn:mac_address": "00:00:00:00:00:03"
+                        "odl-l3vpn:mac_address": "00:00:00:00:00:03",
                     }
                 ],
                 "vpn-instance-name": "testVpn1",
-                "name": "s2-eth1"
+                "name": "s2-eth1",
             },
             {
                 "odl-l3vpn:adjacency": [
                     {
                         "odl-l3vpn:ip_address": "10.0.0.4",
-                        "odl-l3vpn:mac_address": "00:00:00:00:00:04"
+                        "odl-l3vpn:mac_address": "00:00:00:00:00:04",
                     }
                 ],
                 "vpn-instance-name": "testVpn2",
-                "name": "s2-eth2"
-            }
+                "name": "s2-eth2",
+            },
         ]
     }
     vpn_inst_data = json.dumps(vpn_instances)
     ietf_int_data = json.dumps(ietf_interfaces)
     vpn_int_data = json.dumps(vpn_interfaces)
-    variables = {'vpn_instances': vpn_inst_data,
-                 'ietf_interfaces': ietf_int_data,
-                 'vpn_interfaces': vpn_int_data}
+    variables = {
+        "vpn_instances": vpn_inst_data,
+        "ietf_interfaces": ietf_int_data,
+        "vpn_interfaces": vpn_int_data,
+    }
     return variables
index e6b55e0b57ad240e7f248676a823135a43ec5801..496cb69e5cd1c8f65e88ac02727114646da62048 100644 (file)
@@ -1,8 +1,8 @@
 vtn_data_models = [
-    'config/network-topology:network-topology/topology/ovsdb:1',
-    'config/opendaylight-inventory:nodes',
-    'operational/opendaylight-inventory:nodes',
-    'operational/entity-owners:entity-owners',
-    'operational/vtn-inventory:vtn-nodes',
-    'operational/network-topology:network-topology/topology/ovsdb:1'
+    "config/network-topology:network-topology/topology/ovsdb:1",
+    "config/opendaylight-inventory:nodes",
+    "operational/opendaylight-inventory:nodes",
+    "operational/entity-owners:entity-owners",
+    "operational/vtn-inventory:vtn-nodes",
+    "operational/network-topology:network-topology/topology/ovsdb:1",
 ]
index 379b33bc72b761e93cc33aaa29a7dc30c11da477..2fe514c2219889c69119f46bdd361d0fce8c80a7 100644 (file)
@@ -14,16 +14,20 @@ import platform
 from docs_conf.conf import *
 
 # Append to intersphinx_mapping
-intersphinx_mapping['odl-releng-builder'] = ('http://docs.opendaylight.org/projects/releng-builder/en/latest/', None)
+intersphinx_mapping["odl-releng-builder"] = (
+    "http://docs.opendaylight.org/projects/releng-builder/en/latest/",
+    None,
+)
 
 linkcheck_ignore = [
     # Ignore jenkins because it's often slow to respond.
-    'https://jenkins.opendaylight.org/releng',
-    'https://jenkins.opendaylight.org/sandbox',
+    "https://jenkins.opendaylight.org/releng",
+    "https://jenkins.opendaylight.org/sandbox",
 ]
 
 nitpicky = True
 
-if platform.system() != 'Windows':
+if platform.system() != "Windows":
     import subprocess
+
     subprocess.call(["./build-integration-robot-libdoc.sh"])
index 92678d14b12ad71cf24189da6497add1740bcbad..cbceca4acf3cdb5e8996b482e55a2d98ec58dc18 100644 (file)
@@ -8,18 +8,15 @@ class Transaction:
     def __init__(self, txnId, startTime, operations):
         self.txnId = txnId
         self.operations = operations
-        self.startTime = datetime.strptime(startTime,
-                                           '%Y-%m-%d,%H:%M:%S,%f')
+        self.startTime = datetime.strptime(startTime, "%Y-%m-%d,%H:%M:%S,%f")
         self.reachedTime = None
         self.completeTime = None
 
     def setReachedTime(self, reachedTime):
-        self.reachedTime = datetime.strptime(reachedTime,
-                                             '%Y-%m-%d,%H:%M:%S,%f')
+        self.reachedTime = datetime.strptime(reachedTime, "%Y-%m-%d,%H:%M:%S,%f")
 
     def setCompleteTime(self, completeTime):
-        self.completeTime = datetime.strptime(completeTime,
-                                              '%Y-%m-%d,%H:%M:%S,%f')
+        self.completeTime = datetime.strptime(completeTime, "%Y-%m-%d,%H:%M:%S,%f")
 
     def totalTime(self):
         return Transaction.diffInMicros(self.startTime, self.completeTime)
@@ -37,25 +34,45 @@ class Transaction:
         return -1
 
     def __str__(self):
-        return "transactionId = " + self.txnId + ", " \
-               + "operations = " + unicode(self.operations) + ", " \
-               + "startTime = " + unicode(self.startTime) + ", " \
-               + "reachedTime = " + unicode(self.reachedTime) + ", " \
-               + "completeTime = " + unicode(self.completeTime) + ", " \
-               + "transferTime = " + unicode(self.transferTime()) + ", " \
-               + "totalTime = " + unicode(self.totalTime())
+        return (
+            "transactionId = "
+            + self.txnId
+            + ", "
+            + "operations = "
+            + unicode(self.operations)
+            + ", "
+            + "startTime = "
+            + unicode(self.startTime)
+            + ", "
+            + "reachedTime = "
+            + unicode(self.reachedTime)
+            + ", "
+            + "completeTime = "
+            + unicode(self.completeTime)
+            + ", "
+            + "transferTime = "
+            + unicode(self.transferTime())
+            + ", "
+            + "totalTime = "
+            + unicode(self.totalTime())
+        )
 
     def csv(self):
-        return unicode(self.startTime) + "," \
-            + self.txnId + "," \
-            + unicode(self.operations) + "," \
-            + unicode(self.transferTime()) + "," \
+        return (
+            unicode(self.startTime)
+            + ","
+            + self.txnId
+            + ","
+            + unicode(self.operations)
+            + ","
+            + unicode(self.transferTime())
+            + ","
             + unicode(self.totalTime())
+        )
 
     @staticmethod
     def csv_header():
-        return "Start Time,Transaction Id,Operations,Transfer Time," \
-               "Complete Time"
+        return "Start Time,Transaction Id,Operations,Transfer Time," "Complete Time"
 
 
 def processFiles():
@@ -63,9 +80,9 @@ def processFiles():
     txnBegin = open("txnbegin.txt", "r")
     for line in txnBegin:
         arr = line.split(",")
-        txns[arr[3]] = Transaction(arr[3],
-                                   arr[0] + "," + arr[1] + "," + arr[2],
-                                   int(arr[4]))
+        txns[arr[3]] = Transaction(
+            arr[3], arr[0] + "," + arr[1] + "," + arr[2], int(arr[4])
+        )
 
     txnReached = open("txnreached.txt", "r")
     for line in txnReached:
index 401dcb630c9fd20cc3970c92a9fad432a39a0504..62cb38ab429ba95f4ff0c5ba6080522386a5b9d2 100755 (executable)
@@ -42,33 +42,55 @@ import random
 import re
 from remote_host import RemoteHost
 
-parser = argparse.ArgumentParser(description='Cluster Deployer')
-parser.add_argument("--distribution", default="",
-                    help="the absolute path of the distribution on the local "
-                         "host that needs to be deployed. (Must contain "
-                         "version in the form: \"<#>.<#>.<#>-<name>\", e.g. "
-                         "0.2.0-SNAPSHOT)",
-                    required=True)
-parser.add_argument("--rootdir", default="/root",
-                    help="the root directory on the remote host where the "
-                         "distribution is to be deployed",
-                    required=True)
-parser.add_argument("--hosts", default="", help="a comma separated list of "
-                                                "host names or ip addresses",
-                    required=True)
-parser.add_argument("--clean", action="store_true", default=False,
-                    help="clean the deployment on the remote host")
-parser.add_argument("--template", default="openflow",
-                    help="the name of the template to be used. "
-                    "This name should match a folder in the templates "
-                         "directory.")
-parser.add_argument("--rf", default=3, type=int,
-                    help="replication factor. This is the number of replicas "
-                         "that should be created for each shard.")
-parser.add_argument("--user", default="root", help="the SSH username for the "
-                                                   "remote host(s)")
-parser.add_argument("--password", default="Ecp123",
-                    help="the SSH password for the remote host(s)")
+parser = argparse.ArgumentParser(description="Cluster Deployer")
+parser.add_argument(
+    "--distribution",
+    default="",
+    help="the absolute path of the distribution on the local "
+    "host that needs to be deployed. (Must contain "
+    'version in the form: "<#>.<#>.<#>-<name>", e.g. '
+    "0.2.0-SNAPSHOT)",
+    required=True,
+)
+parser.add_argument(
+    "--rootdir",
+    default="/root",
+    help="the root directory on the remote host where the "
+    "distribution is to be deployed",
+    required=True,
+)
+parser.add_argument(
+    "--hosts",
+    default="",
+    help="a comma separated list of " "host names or ip addresses",
+    required=True,
+)
+parser.add_argument(
+    "--clean",
+    action="store_true",
+    default=False,
+    help="clean the deployment on the remote host",
+)
+parser.add_argument(
+    "--template",
+    default="openflow",
+    help="the name of the template to be used. "
+    "This name should match a folder in the templates "
+    "directory.",
+)
+parser.add_argument(
+    "--rf",
+    default=3,
+    type=int,
+    help="replication factor. This is the number of replicas "
+    "that should be created for each shard.",
+)
+parser.add_argument(
+    "--user", default="root", help="the SSH username for the " "remote host(s)"
+)
+parser.add_argument(
+    "--password", default="Ecp123", help="the SSH password for the remote host(s)"
+)
 args = parser.parse_args()
 
 
@@ -118,9 +140,22 @@ def array_str(arr):
 # The Deployer deploys the controller to one host and configures it
 #
 class Deployer:
-    def __init__(self, host, member_no, template, user, password, rootdir,
-                 distribution, dir_name, hosts, ds_seed_nodes, rpc_seed_nodes,
-                 replicas, clean=False):
+    def __init__(
+        self,
+        host,
+        member_no,
+        template,
+        user,
+        password,
+        rootdir,
+        distribution,
+        dir_name,
+        hosts,
+        ds_seed_nodes,
+        rpc_seed_nodes,
+        replicas,
+        clean=False,
+    ):
         self.host = host
         self.member_no = member_no
         self.template = template
@@ -136,8 +171,7 @@ class Deployer:
         self.replicas = replicas
 
         # Connect to the remote host and start doing operations
-        self.remote = RemoteHost(self.host, self.user, self.password,
-                                 self.rootdir)
+        self.remote = RemoteHost(self.host, self.user, self.password, self.rootdir)
 
     def kill_controller(self):
         self.remote.copy_file("kill_controller.sh", self.rootdir + "/")
@@ -145,52 +179,58 @@ class Deployer:
 
     def deploy(self):
         # Determine distribution version
-        distribution_name \
-            = os.path.splitext(os.path.basename(self.distribution))[0]
-        distribution_ver = re.search('(\d+\.\d+\.\d+-\w+\Z)|'
-                                     '(\d+\.\d+\.\d+-\w+)(-RC\d+\Z)|'
-                                     '(\d+\.\d+\.\d+-\w+)(-RC\d+(\.\d+)\Z)|'
-                                     '(\d+\.\d+\.\d+-\w+)(-SR\d+\Z)|'
-                                     '(\d+\.\d+\.\d+-\w+)(-SR\d+(\.\d+)\Z)',
-                                     distribution_name)  # noqa
+        distribution_name = os.path.splitext(os.path.basename(self.distribution))[0]
+        distribution_ver = re.search(
+            "(\d+\.\d+\.\d+-\w+\Z)|"
+            "(\d+\.\d+\.\d+-\w+)(-RC\d+\Z)|"
+            "(\d+\.\d+\.\d+-\w+)(-RC\d+(\.\d+)\Z)|"
+            "(\d+\.\d+\.\d+-\w+)(-SR\d+\Z)|"
+            "(\d+\.\d+\.\d+-\w+)(-SR\d+(\.\d+)\Z)",
+            distribution_name,
+        )  # noqa
 
         if distribution_ver is None:
-            print("%s is not a valid distribution version."
-                  " (Must contain version in the form: "
-                  "\"<#>.<#>.<#>-<name>\" or \"<#>.<#>."
-                  "<#>-<name>-SR<#>\" or \"<#>.<#>.<#>"
-                  "-<name>-RC<#>\", e.g. 0.2.0-SNAPSHOT)" % distribution_name)
+            print(
+                "%s is not a valid distribution version."
+                " (Must contain version in the form: "
+                '"<#>.<#>.<#>-<name>" or "<#>.<#>.'
+                '<#>-<name>-SR<#>" or "<#>.<#>.<#>'
+                '-<name>-RC<#>", e.g. 0.2.0-SNAPSHOT)' % distribution_name
+            )
             sys.exit(1)
         distribution_ver = distribution_ver.group()
 
         # Render all the templates
         renderer = TemplateRenderer(self.template)
         akka_conf = renderer.render(
-            "akka.conf.template", "akka.conf",
+            "akka.conf.template",
+            "akka.conf",
             {
                 "HOST": self.host,
                 "MEMBER_NAME": "member-" + str(self.member_no),
                 "DS_SEED_NODES": array_str(self.ds_seed_nodes),
-                "RPC_SEED_NODES": array_str(self.rpc_seed_nodes)
-            })
-        module_shards_conf = renderer.render("module-shards.conf.template",
-                                             "module-shards.conf",
-                                             self.replicas)
-        modules_conf = renderer.render("modules.conf.template",
-                                       "modules.conf")
-        features_cfg = \
-            renderer.render("org.apache.karaf.features.cfg.template",
-                            "org.apache.karaf.features.cfg",
-                            {"ODL_DISTRIBUTION": distribution_ver})
+                "RPC_SEED_NODES": array_str(self.rpc_seed_nodes),
+            },
+        )
+        module_shards_conf = renderer.render(
+            "module-shards.conf.template", "module-shards.conf", self.replicas
+        )
+        modules_conf = renderer.render("modules.conf.template", "modules.conf")
+        features_cfg = renderer.render(
+            "org.apache.karaf.features.cfg.template",
+            "org.apache.karaf.features.cfg",
+            {"ODL_DISTRIBUTION": distribution_ver},
+        )
         jolokia_xml = renderer.render("jolokia.xml.template", "jolokia.xml")
-        management_cfg = \
-            renderer.render("org.apache.karaf.management.cfg.template",
-                            "org.apache.karaf.management.cfg",
-                            {"HOST": self.host})
-        datastore_cfg = \
-            renderer.render(
-                "org.opendaylight.controller.cluster.datastore.cfg.template",
-                "org.opendaylight.controller.cluster.datastore.cfg")
+        management_cfg = renderer.render(
+            "org.apache.karaf.management.cfg.template",
+            "org.apache.karaf.management.cfg",
+            {"HOST": self.host},
+        )
+        datastore_cfg = renderer.render(
+            "org.opendaylight.controller.cluster.datastore.cfg.template",
+            "org.opendaylight.controller.cluster.datastore.cfg",
+        )
 
         # Delete all the sub-directories under the deploy directory if
         # the --clean flag is used
@@ -206,35 +246,39 @@ class Deployer:
         # Copy the distribution to the host and unzip it
         odl_file_path = self.dir_name + "/odl.zip"
         self.remote.copy_file(self.distribution, odl_file_path)
-        self.remote.exec_cmd("unzip -o " + odl_file_path + " -d "
-                             + self.dir_name + "/")
+        self.remote.exec_cmd("unzip -o " + odl_file_path + " -d " + self.dir_name + "/")
 
         # Rename the distribution directory to odl
-        self.remote.exec_cmd("mv " + self.dir_name + "/"
-                             + distribution_name + " " + self.dir_name + "/odl")
+        self.remote.exec_cmd(
+            "mv "
+            + self.dir_name
+            + "/"
+            + distribution_name
+            + " "
+            + self.dir_name
+            + "/odl"
+        )
 
         # Copy all the generated files to the server
-        self.remote.mkdir(self.dir_name
-                          + "/odl/configuration/initial")
-        self.remote.copy_file(akka_conf, self.dir_name
-                              + "/odl/configuration/initial/")
-        self.remote.copy_file(module_shards_conf, self.dir_name
-                              + "/odl/configuration/initial/")
-        self.remote.copy_file(modules_conf, self.dir_name
-                              + "/odl/configuration/initial/")
-        self.remote.copy_file(features_cfg, self.dir_name
-                              + "/odl/etc/")
-        self.remote.copy_file(jolokia_xml, self.dir_name
-                              + "/odl/deploy/")
-        self.remote.copy_file(management_cfg, self.dir_name
-                              + "/odl/etc/")
+        self.remote.mkdir(self.dir_name + "/odl/configuration/initial")
+        self.remote.copy_file(akka_conf, self.dir_name + "/odl/configuration/initial/")
+        self.remote.copy_file(
+            module_shards_conf, self.dir_name + "/odl/configuration/initial/"
+        )
+        self.remote.copy_file(
+            modules_conf, self.dir_name + "/odl/configuration/initial/"
+        )
+        self.remote.copy_file(features_cfg, self.dir_name + "/odl/etc/")
+        self.remote.copy_file(jolokia_xml, self.dir_name + "/odl/deploy/")
+        self.remote.copy_file(management_cfg, self.dir_name + "/odl/etc/")
 
         if datastore_cfg is not None:
             self.remote.copy_file(datastore_cfg, self.dir_name + "/odl/etc/")
 
         # Add symlink
-        self.remote.exec_cmd("ln -sfn " + self.dir_name + " "
-                             + args.rootdir + "/deploy/current")
+        self.remote.exec_cmd(
+            "ln -sfn " + self.dir_name + " " + args.rootdir + "/deploy/current"
+        )
 
         # Run karaf
         self.remote.start_controller(self.dir_name)
@@ -260,27 +304,40 @@ def main():
     replicas = {}
 
     for x in range(0, len(hosts)):
-        ds_seed_nodes.append("akka.tcp://opendaylight-cluster-data@"
-                             + hosts[x] + ":2550")
-        rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@"
-                              + hosts[x] + ":2551")
+        ds_seed_nodes.append(
+            "akka.tcp://opendaylight-cluster-data@" + hosts[x] + ":2550"
+        )
+        rpc_seed_nodes.append("akka.tcp://odl-cluster-rpc@" + hosts[x] + ":2551")
         all_replicas.append("member-" + str(x + 1))
 
     for x in range(0, 10):
         if len(all_replicas) > args.rf:
-            replicas["REPLICAS_" + str(x + 1)] \
-                = array_str(random.sample(all_replicas, args.rf))
+            replicas["REPLICAS_" + str(x + 1)] = array_str(
+                random.sample(all_replicas, args.rf)
+            )
         else:
             replicas["REPLICAS_" + str(x + 1)] = array_str(all_replicas)
 
     deployers = []
 
     for x in range(0, len(hosts)):
-        deployers.append(Deployer(hosts[x], x + 1, args.template, args.user,
-                                  args.password, args.rootdir,
-                                  args.distribution, dir_name, hosts,
-                                  ds_seed_nodes, rpc_seed_nodes, replicas,
-                                  args.clean))
+        deployers.append(
+            Deployer(
+                hosts[x],
+                x + 1,
+                args.template,
+                args.user,
+                args.password,
+                args.rootdir,
+                args.distribution,
+                dir_name,
+                hosts,
+                ds_seed_nodes,
+                rpc_seed_nodes,
+                replicas,
+                args.clean,
+            )
+        )
 
     for x in range(0, len(hosts)):
         deployers[x].kill_controller()
index c85df36f866a999ecc2b96798804999baad84072..1a5a9aaaad45e2cc5561ca5f3c6dac1714683504 100644 (file)
@@ -26,9 +26,12 @@ class RemoteHost:
         print("Executing command %s on host %s" % (command, self.host))
         rc = self.lib.execute_command(command, return_rc=True)
         if rc[1] != 0:
-            raise Exception('remote command failed [{0}] with exit code {1}.'
-                            'For linux-based vms, Please make sure requiretty is disabled in the /etc/sudoers file'
-                            .format(command, rc))
+            raise Exception(
+                "remote command failed [{0}] with exit code {1}."
+                "For linux-based vms, Please make sure requiretty is disabled in the /etc/sudoers file".format(
+                    command, rc
+                )
+            )
 
     def mkdir(self, dir_name):
         self.exec_cmd("mkdir -p " + dir_name)
@@ -46,8 +49,10 @@ class RemoteHost:
         self.lib.put_file(src, dest)
 
     def kill_controller(self):
-        self.exec_cmd("sudo ps axf | grep karaf | grep -v grep "
-                      "| awk '{print \"kill -9 \" $1}' | sudo sh")
+        self.exec_cmd(
+            "sudo ps axf | grep karaf | grep -v grep "
+            "| awk '{print \"kill -9 \" $1}' | sudo sh"
+        )
 
     def start_controller(self, dir_name):
         self.exec_cmd(dir_name + "/odl/bin/start")
index 044e03ee8b410744e8ef39288468d80c94086e49..cf166f245777816c42738edc2cb17a0d20786c41 100755 (executable)
 import argparse
 from remote_host import RemoteHost
 
-parser = argparse.ArgumentParser(description='Cluster Restart')
-parser.add_argument("--rootdir", default="/root",
-                    help="the root directory on the remote host where the distribution is deployed", required=True)
-parser.add_argument("--hosts", default="",
-                    help="a comma separated list of host names or ip addresses", required=True)
-parser.add_argument("--clean", action="store_true", default=False,
-                    help="clean the persistent data for the current deployment")
-parser.add_argument("--user", default="root", help="the SSH username for the remote host(s)")
-parser.add_argument("--password", default="Ecp123", help="the SSH password for the remote host(s)")
+parser = argparse.ArgumentParser(description="Cluster Restart")
+parser.add_argument(
+    "--rootdir",
+    default="/root",
+    help="the root directory on the remote host where the distribution is deployed",
+    required=True,
+)
+parser.add_argument(
+    "--hosts",
+    default="",
+    help="a comma separated list of host names or ip addresses",
+    required=True,
+)
+parser.add_argument(
+    "--clean",
+    action="store_true",
+    default=False,
+    help="clean the persistent data for the current deployment",
+)
+parser.add_argument(
+    "--user", default="root", help="the SSH username for the remote host(s)"
+)
+parser.add_argument(
+    "--password", default="Ecp123", help="the SSH password for the remote host(s)"
+)
 args = parser.parse_args()
 
 
@@ -29,7 +45,7 @@ def main():
         # Connect to the remote host and start doing operations
         remote = RemoteHost(hosts[x], args.user, args.password, args.rootdir)
         remote.kill_controller()
-        if(args.clean):
+        if args.clean:
             remote.exec_cmd("rm -rf " + args.rootdir + "/deploy/current/odl/*journal")
             remote.exec_cmd("rm -rf " + args.rootdir + "/deploy/current/odl/snapshots")
         remote.start_controller(args.rootdir + "/deploy/current")
index b4848091d5110ec83f3638b090b7b00352420c57..07f084635502c4ca4486a6fe07d4a6f5b7a1f447 100755 (executable)
@@ -20,8 +20,8 @@ RESP_GET_SUCCESS = 200
 RESP_NOT_FOUND = 404
 
 
-con_header = {'Accept': 'application/json', 'content-type': 'application/json'}
-authentication = ('admin', 'admin')
+con_header = {"Accept": "application/json", "content-type": "application/json"}
+authentication = ("admin", "admin")
 
 
 def validate_cluster(ipaddress):
@@ -46,9 +46,9 @@ def validate_cluster(ipaddress):
         sys.exit(1)
 
     data = json.loads(resp.content)
-    cluster_status = data['value']['ClusterStatus']
+    cluster_status = data["value"]["ClusterStatus"]
     status = json.loads(cluster_status)
-    members = status['members']
+    members = status["members"]
     member_list = []
     entity_owner_list = []
 
@@ -56,26 +56,27 @@ def validate_cluster(ipaddress):
         # spliting the ip address of the node from json object
         # sample json data
         # "akka.tcp://opendaylight-cluster-data@10.106.138.137:2550"
-        ip = re.search('@(.+?):', member['address']).group(1)
-        node_status = ip + "-" + member['status']
+        ip = re.search("@(.+?):", member["address"]).group(1)
+        node_status = ip + "-" + member["status"]
         member_list.append(node_status)
-        url1 = "http://" + ip +\
-               ":8181/jolokia/read/org.opendaylight.controller:"\
-               "Category=ShardManager,name=shard-manager-operational,"\
-               "type=DistributedOperationalDatastore"
+        url1 = (
+            "http://" + ip + ":8181/jolokia/read/org.opendaylight.controller:"
+            "Category=ShardManager,name=shard-manager-operational,"
+            "type=DistributedOperationalDatastore"
+        )
         resp1 = requests.get(url1, headers=con_header, auth=authentication)
         if resp1.status_code != RESP_GET_SUCCESS:
             print("error in getting response for the node", ip)
             print("response content", resp1.content)
             continue
         data2 = json.loads(resp1.content)
-        member_role = data2['value']['MemberName']
+        member_role = data2["value"]["MemberName"]
         entity_owner_list.append(ip + ":" + member_role)
-    leader = data['value']['Leader']
+    leader = data["value"]["Leader"]
 
-    leaderNode = leader[leader.index('@') + 1:leader.rindex(':')]
+    leaderNode = leader[leader.index("@") + 1 : leader.rindex(":")]
     for leader_node in member_list:
-        address = leader_node.split('-')
+        address = leader_node.split("-")
         if address[0] == leaderNode:
             print("=================== Leader Node ======================\n")
             print(leader_node)
@@ -99,25 +100,25 @@ def list_entity_owners(ipaddress, entity_owner_list):
         print("response content", resp.content)
         sys.exit(1)
     data = json.loads(resp.content)
-    ovsdb = data['entity-owners']['entity-type']
+    ovsdb = data["entity-owners"]["entity-type"]
     print("\n\n=================== Entity Details ===================\n")
     for e_type in ovsdb:
-        entities = e_type['entity']
+        entities = e_type["entity"]
         for entity in entities:
-            id = entity['id']
-            if len(entity['owner']) > 0:
-                print("NODE ID", str(id[id.rindex('=') + 2:len(id) - 2]))
-                print("OWNER", str(entity['owner']))
+            id = entity["id"]
+            if len(entity["owner"]) > 0:
+                print("NODE ID", str(id[id.rindex("=") + 2 : len(id) - 2]))
+                print("OWNER", str(entity["owner"]))
             for owner in entity_owner_list:
-                owner_role = owner.split(':')
-                if entity['owner'] == owner_role[1]:
+                owner_role = owner.split(":")
+                if entity["owner"] == owner_role[1]:
                     print("IP Address", str(owner_role[0]))
                     print("\n")
 
 
 # Main Block
-if __name__ == '__main__':
-    print('*****Cluster Status******')
+if __name__ == "__main__":
+    print("*****Cluster Status******")
     ipaddress = raw_input("Please enter ipaddress to find Leader Node : ")
     validate_cluster(ipaddress)
 
index 297e4f3adcf2aa49bd2927ef1e9755d8b78b6432..4deca8d3f0b08cca1d36263a5f28040ac50f4252 100644 (file)
@@ -42,7 +42,9 @@ limitations under the License."""
 
 logger = logging.getLogger("changes")
 logger.setLevel(logging.DEBUG)
-formatter = logging.Formatter('%(asctime)s - %(levelname).4s - %(name)s - %(lineno)04d - %(message)s')
+formatter = logging.Formatter(
+    "%(asctime)s - %(levelname).4s - %(name)s - %(lineno)04d - %(message)s"
+)
 ch = logging.StreamHandler()
 ch.setLevel(logging.INFO)
 ch.setFormatter(formatter)
@@ -63,8 +65,22 @@ class Changes(object):
     # NETVIRT_PROJECTS, as taken from autorelease dependency info [0]
     # TODO: it would be nice to fetch the dependency info on the fly in case it changes down the road
     # [0] https://logs.opendaylight.org/releng/jenkins092/autorelease-release-carbon/127/archives/dependencies.log.gz
-    NETVIRT_PROJECTS = ["netvirt", "controller", "dlux", "dluxapps", "genius", "infrautils", "mdsal", "netconf",
-                        "neutron", "odlparent", "openflowplugin", "ovsdb", "sfc", "yangtools"]
+    NETVIRT_PROJECTS = [
+        "netvirt",
+        "controller",
+        "dlux",
+        "dluxapps",
+        "genius",
+        "infrautils",
+        "mdsal",
+        "netconf",
+        "neutron",
+        "odlparent",
+        "openflowplugin",
+        "ovsdb",
+        "sfc",
+        "yangtools",
+    ]
     PROJECT_NAMES = NETVIRT_PROJECTS
     VERBOSE = logging.INFO
     DISTRO_PATH = "/tmp/distribution-karaf"
@@ -88,10 +104,16 @@ class Changes(object):
     regex_shortmsg = None
     regex_longmsg = None
 
-    def __init__(self, branch=BRANCH, distro_path=DISTRO_PATH,
-                 limit=LIMIT, qlimit=QUERY_LIMIT,
-                 project_names=PROJECT_NAMES, remote_url=REMOTE_URL,
-                 verbose=VERBOSE):
+    def __init__(
+        self,
+        branch=BRANCH,
+        distro_path=DISTRO_PATH,
+        limit=LIMIT,
+        qlimit=QUERY_LIMIT,
+        project_names=PROJECT_NAMES,
+        remote_url=REMOTE_URL,
+        verbose=VERBOSE,
+    ):
         self.branch = branch
         self.distro_path = distro_path
         self.limit = limit
@@ -101,12 +123,14 @@ class Changes(object):
         self.verbose = verbose
         self.projects = {}
         self.set_log_level(verbose)
-        self.regex_changeid = re.compile(r'(Change-Id.*: (\bI[a-f0-9]{40})\b|\bI([a-f0-9]{8})\b)')
+        self.regex_changeid = re.compile(
+            r"(Change-Id.*: (\bI[a-f0-9]{40})\b|\bI([a-f0-9]{8})\b)"
+        )
         # self.regex_shortmsg = re.compile(r'"([^"]*)"|(git.commit.message.short=(.*))')
         self.regex_shortmsg1 = re.compile(r'(git.commit.message.short=.*"([^"]*)")')
-        self.regex_shortmsg2 = re.compile(r'(git.commit.message.short=(.*))')
-        self.regex_longmsg = re.compile(r'git.commit.message.full=(.*)')
-        self.regex_commitid = re.compile(r'(git.commit.id=(.*))')
+        self.regex_shortmsg2 = re.compile(r"(git.commit.message.short=(.*))")
+        self.regex_longmsg = re.compile(r"git.commit.message.full=(.*)")
+        self.regex_commitid = re.compile(r"(git.commit.id=(.*))")
 
     @staticmethod
     def set_log_level(level):
@@ -121,18 +145,30 @@ class Changes(object):
         if project:
             print("%s" % project)
         print("i  grantedOn           lastUpdatd          chang subject")
-        print("-- ------------------- ------------------- ----- -----------------------------------------")
+        print(
+            "-- ------------------- ------------------- ----- -----------------------------------------"
+        )
         if gerrits is None:
             print("gerrit is under review")
             return
         for i, gerrit in enumerate(gerrits):
             if isinstance(gerrit, dict):
-                print("%02d %19s %19s %5s %s"
-                      % (i,
-                         self.epoch_to_utc(gerrit["grantedOn"]) if "grantedOn" in gerrit else 0,
-                         self.epoch_to_utc(gerrit["lastUpdated"]) if "lastUpdated" in gerrit else 0,
-                         gerrit["number"] if "number" in gerrit else "00000",
-                         gerrit["subject"].encode('ascii', 'replace') if "subject" in gerrit else "none"))
+                print(
+                    "%02d %19s %19s %5s %s"
+                    % (
+                        i,
+                        self.epoch_to_utc(gerrit["grantedOn"])
+                        if "grantedOn" in gerrit
+                        else 0,
+                        self.epoch_to_utc(gerrit["lastUpdated"])
+                        if "lastUpdated" in gerrit
+                        else 0,
+                        gerrit["number"] if "number" in gerrit else "00000",
+                        gerrit["subject"].encode("ascii", "replace")
+                        if "subject" in gerrit
+                        else "none",
+                    )
+                )
 
     def pretty_print_projects(self, projects):
         print("========================================")
@@ -151,35 +187,45 @@ class Changes(object):
         """
         Download the distribution from self.distro_url and extract it to self.distro_path
         """
-        logger.info("attempting to download distribution from %s and extract to %s", self.distro_url, self.distro_path)
+        logger.info(
+            "attempting to download distribution from %s and extract to %s",
+            self.distro_url,
+            self.distro_path,
+        )
 
-        tmp_distro_zip = '/tmp/distro.zip'
-        tmp_unzipped_location = '/tmp/distro_unzipped'
-        downloader = urllib3.PoolManager(cert_reqs='CERT_NONE')
+        tmp_distro_zip = "/tmp/distro.zip"
+        tmp_unzipped_location = "/tmp/distro_unzipped"
+        downloader = urllib3.PoolManager(cert_reqs="CERT_NONE")
 
         # disabling warnings to prevent scaring the user with InsecureRequestWarning
         urllib3.disable_warnings()
 
-        downloaded_distro = downloader.request('GET', self.distro_url)
-        with open(tmp_distro_zip, 'wb') as f:
+        downloaded_distro = downloader.request("GET", self.distro_url)
+        with open(tmp_distro_zip, "wb") as f:
             f.write(downloaded_distro.data)
 
         downloaded_distro.release_conn()
 
         # after the .zip is extracted we want to rename it to be the distro_path which may have
         # been given by the user
-        distro_zip = zipfile.ZipFile(tmp_distro_zip, 'r')
+        distro_zip = zipfile.ZipFile(tmp_distro_zip, "r")
         distro_zip.extractall(tmp_unzipped_location)
         unzipped_distro_folder = os.listdir(tmp_unzipped_location)
 
         # if the distro_path already exists, we wont overwrite it and just continue hoping what's
         # there is relevant (and maybe already put there by this tool earlier)
         try:
-            os.rename(tmp_unzipped_location + "/" + unzipped_distro_folder[0], self.distro_path)
+            os.rename(
+                tmp_unzipped_location + "/" + unzipped_distro_folder[0],
+                self.distro_path,
+            )
         except OSError as e:
             logger.warn(e)
-            logger.warn("Unable to move extracted files from %s to %s. Using whatever bits are already there",
-                        tmp_unzipped_location, self.distro_path)
+            logger.warn(
+                "Unable to move extracted files from %s to %s. Using whatever bits are already there",
+                tmp_unzipped_location,
+                self.distro_path,
+            )
 
     def get_includes(self, project, changeid=None, msg=None, merged=True):
         """
@@ -192,14 +238,25 @@ class Changes(object):
         :return list: includes[0] is the gerrit requested, [1 to limit] are the gerrits found.
         """
         if merged:
-            includes = self.gerritquery.get_gerrits(project, changeid, 1, msg, status="merged")
+            includes = self.gerritquery.get_gerrits(
+                project, changeid, 1, msg, status="merged"
+            )
         else:
-            includes = self.gerritquery.get_gerrits(project, changeid, 1, None, None, True)
+            includes = self.gerritquery.get_gerrits(
+                project, changeid, 1, None, None, True
+            )
         if not includes:
-            logger.info("Review %s in %s:%s was not found", changeid, project, self.gerritquery.branch)
+            logger.info(
+                "Review %s in %s:%s was not found",
+                changeid,
+                project,
+                self.gerritquery.branch,
+            )
             return None
 
-        gerrits = self.gerritquery.get_gerrits(project, changeid=None, limit=self.qlimit, msg=msg, status="merged")
+        gerrits = self.gerritquery.get_gerrits(
+            project, changeid=None, limit=self.qlimit, msg=msg, status="merged"
+        )
         for gerrit in gerrits:
             # don"t include the same change in the list
             if gerrit["id"] == changeid:
@@ -214,7 +271,11 @@ class Changes(object):
                 break
 
         if len(includes) != self.limit + 1:
-            logger.info("%s query limit was not large enough to capture %d gerrits", project, self.limit)
+            logger.info(
+                "%s query limit was not large enough to capture %d gerrits",
+                project,
+                self.limit,
+            )
 
         return includes
 
@@ -256,95 +317,160 @@ class Changes(object):
         # match a 40 or 8 char Change-Id hash. both start with I
         changeid = self.regex_changeid.search(pfile)
         if changeid and changeid.group(2):
-            logger.info("trying Change-Id from git.properties as merged in %s: %s", project, changeid.group(2))
-
-            gerrits = self.gerritquery.get_gerrits(project, changeid.group(2), 1, None, status="merged")
+            logger.info(
+                "trying Change-Id from git.properties as merged in %s: %s",
+                project,
+                changeid.group(2),
+            )
+
+            gerrits = self.gerritquery.get_gerrits(
+                project, changeid.group(2), 1, None, status="merged"
+            )
             if gerrits:
-                logger.info("found Change-Id from git.properties as merged in %s", project)
+                logger.info(
+                    "found Change-Id from git.properties as merged in %s", project
+                )
                 return ChangeId(changeid.group(2), True)
 
             # Maybe this is a patch that has not merged yet
-            logger.info("did not find Change-Id from git.properties as merged in %s, trying as unmerged: %s",
-                        project, changeid.group(2))
-
-            gerrits = self.gerritquery.get_gerrits(project, changeid.group(2), 1, None, status=None, comments=True)
+            logger.info(
+                "did not find Change-Id from git.properties as merged in %s, trying as unmerged: %s",
+                project,
+                changeid.group(2),
+            )
+
+            gerrits = self.gerritquery.get_gerrits(
+                project, changeid.group(2), 1, None, status=None, comments=True
+            )
             if gerrits:
-                logger.info("found Change-Id from git.properties as unmerged in %s", project)
+                logger.info(
+                    "found Change-Id from git.properties as unmerged in %s", project
+                )
                 return ChangeId(gerrits[0]["id"], False)
 
-        logger.info("did not find Change-Id from git.properties in %s, trying commitid", project)
+        logger.info(
+            "did not find Change-Id from git.properties in %s, trying commitid", project
+        )
 
         # match a git commit id
         commitid = self.regex_commitid.search(pfile)
         if commitid and commitid.group(2):
-            logger.info("trying commitid from git.properties in %s: %s", project, commitid.group(2))
+            logger.info(
+                "trying commitid from git.properties in %s: %s",
+                project,
+                commitid.group(2),
+            )
 
             gerrits = self.gerritquery.get_gerrits(project, commitid=commitid.group(2))
             if gerrits:
-                logger.info("found Change-Id from git.properties as unmerged in %s", project)
+                logger.info(
+                    "found Change-Id from git.properties as unmerged in %s", project
+                )
                 return ChangeId(gerrits[0]["id"], True)
 
-        logger.info("did not find Change-Id from commitid from git.properties in %s, trying short commit message1",
-                    project)
+        logger.info(
+            "did not find Change-Id from commitid from git.properties in %s, trying short commit message1",
+            project,
+        )
 
         # Didn't find a Change-Id so try to get a commit message
         # match on "blah" but only keep the blah
         msg = self.regex_shortmsg1.search(pfile)
         if msg and msg.group(2):
             # logger.info("msg.groups 0: %s, 1: %s, 2: %s", msg.group(), msg.group(1), msg.group(2))
-            logger.info("trying with short commit-msg 1 from git.properties in %s: %s", project, msg.group(2))
+            logger.info(
+                "trying with short commit-msg 1 from git.properties in %s: %s",
+                project,
+                msg.group(2),
+            )
 
             gerrits = self.gerritquery.get_gerrits(project, msg=msg.group(2))
             if gerrits:
-                logger.info("found Change-Id from git.properties short commit-msg 1 in %s", project)
+                logger.info(
+                    "found Change-Id from git.properties short commit-msg 1 in %s",
+                    project,
+                )
                 return ChangeId(gerrits[0]["id"], True)
 
             msg_no_spaces = msg.group(2).replace(" ", "+")
-            logger.info("did not find Change-Id in %s, trying with commit-msg 1 (no spaces): %s",
-                        project, msg_no_spaces)
+            logger.info(
+                "did not find Change-Id in %s, trying with commit-msg 1 (no spaces): %s",
+                project,
+                msg_no_spaces,
+            )
 
             gerrits = self.gerritquery.get_gerrits(project, msg=msg_no_spaces)
             if gerrits:
-                logger.info("found Change-Id from git.properties short commit-msg 1 (no spaces) in %s", project)
+                logger.info(
+                    "found Change-Id from git.properties short commit-msg 1 (no spaces) in %s",
+                    project,
+                )
                 return ChangeId(gerrits[0]["id"], True)
 
-        logger.info("did not find Change-Id from short commit message1 from git.properties in %s", project)
+        logger.info(
+            "did not find Change-Id from short commit message1 from git.properties in %s",
+            project,
+        )
 
         # Didn't find a Change-Id so try to get a commit message
         # match on "blah" but only keep the blah
         msg = self.regex_shortmsg2.search(pfile)
         if msg and msg.group(2):
-            logger.info("trying with short commit-msg 2 from git.properties in %s: %s", project, msg.group(2))
+            logger.info(
+                "trying with short commit-msg 2 from git.properties in %s: %s",
+                project,
+                msg.group(2),
+            )
 
             gerrits = self.gerritquery.get_gerrits(project, msg=msg.group(2))
             if gerrits:
-                logger.info("found Change-Id from git.properties short commit-msg 2 in %s", project)
+                logger.info(
+                    "found Change-Id from git.properties short commit-msg 2 in %s",
+                    project,
+                )
                 return ChangeId(gerrits[0]["id"], True)
 
             msg_no_spaces = msg.group(2).replace(" ", "+")
-            logger.info("did not find Change-Id in %s, trying with commit-msg 2 (no spaces): %s",
-                        project, msg_no_spaces)
+            logger.info(
+                "did not find Change-Id in %s, trying with commit-msg 2 (no spaces): %s",
+                project,
+                msg_no_spaces,
+            )
 
             gerrits = self.gerritquery.get_gerrits(project, msg=msg_no_spaces)
             if gerrits:
-                logger.info("found Change-Id from git.properties short commit-msg 2 (no spaces) in %s", project)
+                logger.info(
+                    "found Change-Id from git.properties short commit-msg 2 (no spaces) in %s",
+                    project,
+                )
                 return ChangeId(gerrits[0]["id"], True)
 
-        logger.info("did not find Change-Id from short commit message2 from git.properties in %s", project)
+        logger.info(
+            "did not find Change-Id from short commit message2 from git.properties in %s",
+            project,
+        )
 
         # Maybe one of the monster 'merge the world' gerrits
         msg = self.regex_longmsg.search(pfile)
         first_msg = None
         if msg:
             lines = str(msg.group()).split("\\n")
-            cli = next((i for i, line in enumerate(lines[:-1]) if '* changes\\:' in line), None)
+            cli = next(
+                (i for i, line in enumerate(lines[:-1]) if "* changes\\:" in line), None
+            )
             first_msg = lines[cli + 1] if cli else None
         if first_msg:
-            logger.info("did not find Change-Id or short commit-msg in %s, trying with merge commit-msg: %s",
-                        project, first_msg)
+            logger.info(
+                "did not find Change-Id or short commit-msg in %s, trying with merge commit-msg: %s",
+                project,
+                first_msg,
+            )
             gerrits = self.gerritquery.get_gerrits(project, None, 1, first_msg)
             if gerrits:
-                logger.info("found Change-Id from git.properties merge commit-msg in %s", project)
+                logger.info(
+                    "found Change-Id from git.properties merge commit-msg in %s",
+                    project,
+                )
                 return ChangeId(gerrits[0]["id"], True)
 
         logger.warn("did not find Change-Id for %s" % project)
@@ -359,7 +485,9 @@ class Changes(object):
         :param str project: The project to search
         :return ChangeId: The Change-Id with a valid Change-Id or None if not found
         """
-        project_dir = os.path.join(self.distro_path, "system", "org", "opendaylight", project)
+        project_dir = os.path.join(
+            self.distro_path, "system", "org", "opendaylight", project
+        )
         pfile = None
         for root, dirs, files in os.walk(project_dir):
             for file_ in files:
@@ -371,7 +499,9 @@ class Changes(object):
                         if changeid.changeid:
                             return changeid
                         else:
-                            logger.warn("Could not find %s Change-Id in git.properties", project)
+                            logger.warn(
+                                "Could not find %s Change-Id in git.properties", project
+                            )
                             break  # all jars will have the same git.properties
             if pfile is not None:
                 break  # all jars will have the same git.properties
@@ -389,7 +519,7 @@ class Changes(object):
         taglist = None
         # Ensure the file exists and then read it
         if os.path.isfile(tagfile):
-            with open(tagfile, 'r') as fp:
+            with open(tagfile, "r") as fp:
                 taglist = fp.read()
         return taglist
 
@@ -402,32 +532,42 @@ class Changes(object):
         :return ChangeId: The Change-Id with a valid Change-Id or None if not found
         """
         # break the regex up since {} is a valid regex element but we need it for the format project
-        re1 = r'({0} '.format(project)
-        re1 = re1 + r'(\b[a-f0-9]{40})\b|\b([a-f0-9]{8})\b' + r')'
+        re1 = r"({0} ".format(project)
+        re1 = re1 + r"(\b[a-f0-9]{40})\b|\b([a-f0-9]{8})\b" + r")"
         commitid = re.search(re1, taglist)
         if commitid and commitid.group(2):
-            logger.info("trying commitid from taglist.log in %s: %s", project, commitid.group(2))
+            logger.info(
+                "trying commitid from taglist.log in %s: %s", project, commitid.group(2)
+            )
 
             gerrits = self.gerritquery.get_gerrits(project, commitid=commitid.group(2))
             if gerrits:
                 logger.info("found Change-Id from taglist.log as merged in %s", project)
                 return ChangeId(gerrits[0]["id"], True)
 
-        logger.warn("did not find Change-Id from commitid from taglist.log in %s", project)
+        logger.warn(
+            "did not find Change-Id from commitid from taglist.log in %s", project
+        )
         return ChangeId(None, False)
 
     def init(self):
-        self.gerritquery = gerritquery.GerritQuery(self.remote_url, self.branch, self.qlimit, self.verbose)
+        self.gerritquery = gerritquery.GerritQuery(
+            self.remote_url, self.branch, self.qlimit, self.verbose
+        )
         self.set_projects(self.project_names)
 
     def print_options(self):
-        print("Using these options: branch: %s, limit: %d, qlimit: %d"
-              % (self.branch, self.limit, self.qlimit))
+        print(
+            "Using these options: branch: %s, limit: %d, qlimit: %d"
+            % (self.branch, self.limit, self.qlimit)
+        )
         print("remote_url: %s" % self.remote_url)
         print("distro_path: %s" % self.distro_path)
         print("projects: %s" % (", ".join(map(str, self.projects))))
-        print("gerrit 00 is the most recent patch from which the project was built followed by the next most"
-              " recently merged patches up to %s." % self.limit)
+        print(
+            "gerrit 00 is the most recent patch from which the project was built followed by the next most"
+            " recently merged patches up to %s." % self.limit
+        )
 
     def run_cmd(self):
         """
@@ -446,52 +586,107 @@ class Changes(object):
         if self.distro_url is not None:
             self.download_distro()
 
-        logger.info("Checking if this is an autorelease build by looking for taglist.log")
+        logger.info(
+            "Checking if this is an autorelease build by looking for taglist.log"
+        )
         taglist = self.get_taglist()
         if taglist is not None:
             for project in sorted(self.projects):
                 logger.info("Processing %s using taglist.log", project)
                 changeid = self.find_project_commit_changeid(taglist, project)
                 if changeid.changeid:
-                    self.projects[project]['commit'] = changeid.changeid
-                    self.projects[project]["includes"] = \
-                        self.get_includes(project, changeid.changeid, msg=None, merged=changeid.merged)
+                    self.projects[project]["commit"] = changeid.changeid
+                    self.projects[project]["includes"] = self.get_includes(
+                        project, changeid.changeid, msg=None, merged=changeid.merged
+                    )
             return self.projects
 
-        logger.info("This is not an autorelease build, continuing as integration distribution")
+        logger.info(
+            "This is not an autorelease build, continuing as integration distribution"
+        )
         for project in sorted(self.projects):
             logger.info("Processing %s", project)
             changeid = self.find_distro_changeid(project)
             if changeid.changeid:
-                self.projects[project]['commit'] = changeid.changeid
-                self.projects[project]["includes"] =\
-                    self.get_includes(project, changeid.changeid, msg=None, merged=changeid.merged)
+                self.projects[project]["commit"] = changeid.changeid
+                self.projects[project]["includes"] = self.get_includes(
+                    project, changeid.changeid, msg=None, merged=changeid.merged
+                )
         return self.projects
 
     def main(self):
         parser = argparse.ArgumentParser(description=COPYRIGHT)
 
-        parser.add_argument("-b", "--branch", default=self.BRANCH,
-                            help="git branch for patch under test")
-        parser.add_argument("-d", "--distro-path", dest="distro_path", default=self.DISTRO_PATH,
-                            help="path to the expanded distribution, i.e. " + self.DISTRO_PATH)
-        parser.add_argument("-u", "--distro-url", dest="distro_url", default=self.DISTRO_URL,
-                            help="optional url to download a distribution " + str(self.DISTRO_URL))
-        parser.add_argument("-l", "--limit", dest="limit", type=int, default=self.LIMIT,
-                            help="number of gerrits to return")
-        parser.add_argument("-p", "--projects", dest="projects", default=self.PROJECT_NAMES,
-                            help="list of projects to include in output")
-        parser.add_argument("-q", "--query-limit", dest="qlimit", type=int, default=self.QUERY_LIMIT,
-                            help="number of gerrits to search")
-        parser.add_argument("-r", "--remote", dest="remote_url", default=self.REMOTE_URL,
-                            help="git remote url to use for gerrit")
-        parser.add_argument("-v", "--verbose", dest="verbose", action="count", default=self.VERBOSE,
-                            help="Output more information about what's going on")
-        parser.add_argument("--license", dest="license", action="store_true",
-                            help="Print the license and exit")
-        parser.add_argument("-V", "--version", action="version",
-                            version="%s version %s" %
-                                    (os.path.split(sys.argv[0])[-1], 0.1))
+        parser.add_argument(
+            "-b",
+            "--branch",
+            default=self.BRANCH,
+            help="git branch for patch under test",
+        )
+        parser.add_argument(
+            "-d",
+            "--distro-path",
+            dest="distro_path",
+            default=self.DISTRO_PATH,
+            help="path to the expanded distribution, i.e. " + self.DISTRO_PATH,
+        )
+        parser.add_argument(
+            "-u",
+            "--distro-url",
+            dest="distro_url",
+            default=self.DISTRO_URL,
+            help="optional url to download a distribution " + str(self.DISTRO_URL),
+        )
+        parser.add_argument(
+            "-l",
+            "--limit",
+            dest="limit",
+            type=int,
+            default=self.LIMIT,
+            help="number of gerrits to return",
+        )
+        parser.add_argument(
+            "-p",
+            "--projects",
+            dest="projects",
+            default=self.PROJECT_NAMES,
+            help="list of projects to include in output",
+        )
+        parser.add_argument(
+            "-q",
+            "--query-limit",
+            dest="qlimit",
+            type=int,
+            default=self.QUERY_LIMIT,
+            help="number of gerrits to search",
+        )
+        parser.add_argument(
+            "-r",
+            "--remote",
+            dest="remote_url",
+            default=self.REMOTE_URL,
+            help="git remote url to use for gerrit",
+        )
+        parser.add_argument(
+            "-v",
+            "--verbose",
+            dest="verbose",
+            action="count",
+            default=self.VERBOSE,
+            help="Output more information about what's going on",
+        )
+        parser.add_argument(
+            "--license",
+            dest="license",
+            action="store_true",
+            help="Print the license and exit",
+        )
+        parser.add_argument(
+            "-V",
+            "--version",
+            action="version",
+            version="%s version %s" % (os.path.split(sys.argv[0])[-1], 0.1),
+        )
 
         options = parser.parse_args()
 
@@ -507,7 +702,7 @@ class Changes(object):
         self.remote_url = options.remote_url
         self.verbose = options.verbose
         if options.projects != self.PROJECT_NAMES:
-            self.project_names = options.projects.split(',')
+            self.project_names = options.projects.split(",")
 
         # TODO: add check to verify that the remote can be reached,
         # though the first gerrit query will fail anyways
index a89e9d7c6d9e94389072f1e218b74f8272f31055..ce6f951cfe3fd0e4991f516f5620bcb37e1aa7cd 100644 (file)
@@ -6,7 +6,6 @@ from changes import Changes
 
 
 class DistCompare(object):
-
     def __init__(self, remote_url):
 
         self.remote_url = remote_url
@@ -26,28 +25,75 @@ class DistCompare(object):
         return projects
         """
         # this hard coded list of projects was taken from Oxygen dependencies.log - late January 2018
-        return ['integration/distribution', 'mdsal', 'alto', 'sfc', 'bier', 'serviceutils',
-                'usc', 'ovsdb', 'lispflowmapping', 'groupbasedpolicy', 'snmp4sdn', 'aaa',
-                'honeycomb/vbd', 'openflowplugin', 'of-config', 'daexim', 'dluxapps', 'coe',
-                'packetcable', 'genius', 'yangtools', 'infrautils', 'netvirt', 'neutron',
-                'snmp', 'bgpcep', 'nemo', 'netconf', 'tsdr', 'sxp', 'jsonrpc', 'p4plugin',
-                'odlparent', 'l2switch', 'dlux', 'controller']
+        return [
+            "integration/distribution",
+            "mdsal",
+            "alto",
+            "sfc",
+            "bier",
+            "serviceutils",
+            "usc",
+            "ovsdb",
+            "lispflowmapping",
+            "groupbasedpolicy",
+            "snmp4sdn",
+            "aaa",
+            "honeycomb/vbd",
+            "openflowplugin",
+            "of-config",
+            "daexim",
+            "dluxapps",
+            "coe",
+            "packetcable",
+            "genius",
+            "yangtools",
+            "infrautils",
+            "netvirt",
+            "neutron",
+            "snmp",
+            "bgpcep",
+            "nemo",
+            "netconf",
+            "tsdr",
+            "sxp",
+            "jsonrpc",
+            "p4plugin",
+            "odlparent",
+            "l2switch",
+            "dlux",
+            "controller",
+        ]
 
     def run_cmd(self):
         query_limit = 100
         num_to_display = 50
-        branch = 'master'
+        branch = "master"
         project_names = self.get_project_names()
-        extracted_distro_locations = {'new': '/tmp/distro_new', 'old': '/tmp/distro_old'}
-
-        new_changes = Changes(branch, extracted_distro_locations['new'], num_to_display,
-                              query_limit, project_names, self.remote_url)
+        extracted_distro_locations = {
+            "new": "/tmp/distro_new",
+            "old": "/tmp/distro_old",
+        }
+
+        new_changes = Changes(
+            branch,
+            extracted_distro_locations["new"],
+            num_to_display,
+            query_limit,
+            project_names,
+            self.remote_url,
+        )
 
         new_projects = new_changes.run_cmd()
         new_changes.pretty_print_projects(new_projects)
 
-        old_changes = Changes(branch, extracted_distro_locations['old'], num_to_display,
-                              query_limit, project_names, self.remote_url)
+        old_changes = Changes(
+            branch,
+            extracted_distro_locations["old"],
+            num_to_display,
+            query_limit,
+            project_names,
+            self.remote_url,
+        )
 
         old_projects = old_changes.run_cmd()
         old_changes.pretty_print_projects(old_projects)
@@ -55,23 +101,34 @@ class DistCompare(object):
         patchset_diff = []
         print("\nPatch differences:\n------------------")
         for project_name, values in new_projects.items():
-            new_gerrits = values['includes']
+            new_gerrits = values["includes"]
             for gerrit in new_gerrits:
-                if gerrit not in old_projects[project_name]['includes']:
+                if gerrit not in old_projects[project_name]["includes"]:
                     patchset_diff.append(gerrit)
-                    print('{:<20}{}\t{}'.format(project_name, gerrit['url'], gerrit['subject']))
+                    print(
+                        "{:<20}{}\t{}".format(
+                            project_name, gerrit["url"], gerrit["subject"]
+                        )
+                    )
 
         print("\n%s different patches between the two distros." % len(patchset_diff))
 
 
 def main():
 
-    parser = argparse.ArgumentParser(description='Returns the list of patches found in the unzipped distribution at '
-                                                 '/tmp/distro_new that are not found in the distribution at '
-                                                 '/tmp/distro_old. This should result in a listing of what new changes '
-                                                 'were made between the two distributions.')
-    parser.add_argument("-r", "--remote", dest="remote_url", default=Changes.REMOTE_URL,
-                        help="git remote url to use for gerrit")
+    parser = argparse.ArgumentParser(
+        description="Returns the list of patches found in the unzipped distribution at "
+        "/tmp/distro_new that are not found in the distribution at "
+        "/tmp/distro_old. This should result in a listing of what new changes "
+        "were made between the two distributions."
+    )
+    parser.add_argument(
+        "-r",
+        "--remote",
+        dest="remote_url",
+        default=Changes.REMOTE_URL,
+        help="git remote url to use for gerrit",
+    )
     options = parser.parse_args()
 
     distc = DistCompare(options.remote_url)
index 9f5b7f455ad30fd48d69c1eaf1d470bbcd84faa8..0dc09d5702dbda077658be1d5cb5836e01faa597 100644 (file)
@@ -12,7 +12,7 @@ import traceback
 import sys
 
 # TODO: Haven't tested python 3
-if sys.version < '3':
+if sys.version < "3":
     import urllib
     import urlparse
 
@@ -43,23 +43,26 @@ class CommandFailed(GitReviewException):
     def __init__(self, *args):
         Exception.__init__(self, *args)
         (self.rc, self.output, self.argv, self.envp) = args
-        self.quickmsg = dict([
-            ("argv", " ".join(self.argv)),
-            ("rc", self.rc),
-            ("output", self.output)])
+        self.quickmsg = dict(
+            [("argv", " ".join(self.argv)), ("rc", self.rc), ("output", self.output)]
+        )
 
     def __str__(self):
-        return self.__doc__ + """
+        return (
+            self.__doc__
+            + """
 The following command failed with exit code %(rc)d
     "%(argv)s"
 -----------------------
 %(output)s
------------------------""" % self.quickmsg
+-----------------------"""
+            % self.quickmsg
+        )
 
 
 class GerritQuery:
-    REMOTE_URL = 'ssh://git.opendaylight.org:29418'
-    BRANCH = 'master'
+    REMOTE_URL = "ssh://git.opendaylight.org:29418"
+    BRANCH = "master"
     QUERY_LIMIT = 50
 
     remote_url = REMOTE_URL
@@ -76,10 +79,10 @@ class GerritQuery:
     def print_safe_encoding(string):
         try:
             if type(string) == unicode:
-                encoding = 'utf-8'
-                if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
+                encoding = "utf-8"
+                if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
                     encoding = sys.stdout.encoding
-                return string.encode(encoding or 'utf-8', 'replace')
+                return string.encode(encoding or "utf-8", "replace")
             else:
                 return str(string)
         except Exception:
@@ -90,21 +93,23 @@ class GerritQuery:
         if len(argv) == 1:
             # for python2 compatibility with shlex
             if sys.version_info < (3,) and isinstance(argv[0], unicode):
-                argv = shlex.split(argv[0].encode('utf-8'))
+                argv = shlex.split(argv[0].encode("utf-8"))
             else:
                 argv = shlex.split(str(argv[0]))
-        stdin = kwargs.pop('stdin', None)
+        stdin = kwargs.pop("stdin", None)
         newenv = os.environ.copy()
-        newenv['LANG'] = 'C'
-        newenv['LANGUAGE'] = 'C'
+        newenv["LANG"] = "C"
+        newenv["LANGUAGE"] = "C"
         newenv.update(kwargs)
-        p = subprocess.Popen(argv,
-                             stdin=subprocess.PIPE if stdin else None,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.STDOUT,
-                             env=newenv)
+        p = subprocess.Popen(
+            argv,
+            stdin=subprocess.PIPE if stdin else None,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+            env=newenv,
+        )
         (out, nothing) = p.communicate(stdin)
-        out = out.decode('utf-8', 'replace')
+        out = out.decode("utf-8", "replace")
         return p.returncode, out.strip()
 
     def run_command(self, *argv, **kwargs):
@@ -174,8 +179,12 @@ class GerritQuery:
         :param str request: A gerrit query
         :return unicode: The JSON response
         """
-        (hostname, username, port, project_name) = \
-            self.parse_gerrit_ssh_params_from_git_url()
+        (
+            hostname,
+            username,
+            port,
+            project_name,
+        ) = self.parse_gerrit_ssh_params_from_git_url()
 
         port_data = "p%s" % port if port is not None else ""
         if username is None:
@@ -184,12 +193,23 @@ class GerritQuery:
             userhost = "%s@%s" % (username, hostname)
 
         logger.debug("gerrit request %s %s" % (self.remote_url, request))
-        output = self.run_command_exc(CommandFailed, "ssh", "-x" + port_data, userhost, request)
+        output = self.run_command_exc(
+            CommandFailed, "ssh", "-x" + port_data, userhost, request
+        )
         if logger.isEnabledFor(logging.DEBUG):
             logger.debug("%s", self.print_safe_encoding(output))
         return output
 
-    def make_gerrit_query(self, project, changeid=None, limit=1, msg=None, status=None, comments=False, commitid=None):
+    def make_gerrit_query(
+        self,
+        project,
+        changeid=None,
+        limit=1,
+        msg=None,
+        status=None,
+        comments=False,
+        commitid=None,
+    ):
         """
         Make a gerrit query by combining the given options.
 
@@ -204,13 +224,16 @@ class GerritQuery:
         """
 
         if project == "odlparent" or project == "yangtools":
-            query = "gerrit query --format=json limit:%d " \
-                    "project:%s" \
-                    % (limit, project)
+            query = "gerrit query --format=json limit:%d " "project:%s" % (
+                limit,
+                project,
+            )
         else:
-            query = "gerrit query --format=json limit:%d " \
-                    "project:%s branch:%s" \
-                    % (limit, project, self.branch)
+            query = "gerrit query --format=json limit:%d " "project:%s branch:%s" % (
+                limit,
+                project,
+                self.branch,
+            )
         if changeid:
             query += " change:%s" % changeid
         if msg:
@@ -239,31 +262,37 @@ class GerritQuery:
             if line and line[0] == "{":
                 try:
                     data = json.loads(line)
-                    parsed['id'] = data['id']
-                    parsed['number'] = data['number']
-                    parsed['subject'] = data['subject']
-                    parsed['url'] = data['url']
-                    parsed['lastUpdated'] = data['lastUpdated']
-                    parsed['grantedOn'] = 0
+                    parsed["id"] = data["id"]
+                    parsed["number"] = data["number"]
+                    parsed["subject"] = data["subject"]
+                    parsed["url"] = data["url"]
+                    parsed["lastUpdated"] = data["lastUpdated"]
+                    parsed["grantedOn"] = 0
                     if "patchSets" in data:
-                        patch_sets = data['patchSets']
+                        patch_sets = data["patchSets"]
                         for patch_set in reversed(patch_sets):
                             if "approvals" in patch_set:
-                                approvals = patch_set['approvals']
+                                approvals = patch_set["approvals"]
                                 for approval in approvals:
-                                    if 'type' in approval and approval['type'] == 'SUBM':
-                                        parsed['grantedOn'] = approval['grantedOn']
+                                    if (
+                                        "type" in approval
+                                        and approval["type"] == "SUBM"
+                                    ):
+                                        parsed["grantedOn"] = approval["grantedOn"]
                                         break
-                                if parsed['grantedOn'] != 0:
+                                if parsed["grantedOn"] != 0:
                                     break
                     if "comments" in data:
-                        comments = data['comments']
+                        comments = data["comments"]
                         for comment in reversed(comments):
                             if "message" in comment and "timestamp" in comment:
-                                message = comment['message']
-                                timestamp = comment['timestamp']
-                                if "Build Started" in message and "patch-test" in message:
-                                    parsed['grantedOn'] = timestamp
+                                message = comment["message"]
+                                timestamp = comment["timestamp"]
+                                if (
+                                    "Build Started" in message
+                                    and "patch-test" in message
+                                ):
+                                    parsed["grantedOn"] = timestamp
                                     break
                 except Exception:
                     logger.warn("Failed to decode JSON: %s", traceback.format_exc())
@@ -291,10 +320,21 @@ class GerritQuery:
             else:
                 logger.debug("skipping: {}".format(line))
                 skipped += 1
-        logger.debug("get_gerrit_lines: found {} lines, skipped: {}".format(len(lines), skipped))
+        logger.debug(
+            "get_gerrit_lines: found {} lines, skipped: {}".format(len(lines), skipped)
+        )
         return lines
 
-    def get_gerrits(self, project, changeid=None, limit=1, msg=None, status=None, comments=False, commitid=None):
+    def get_gerrits(
+        self,
+        project,
+        changeid=None,
+        limit=1,
+        msg=None,
+        status=None,
+        comments=False,
+        commitid=None,
+    ):
         """
         Get a list of gerrits from gerrit query request.
 
@@ -313,10 +353,20 @@ class GerritQuery:
         :param commitid: A commit hash to search
         :return str: List of gerrits sorted by merge time
         """
-        logger.debug("get_gerrits: project: %s, changeid: %s, limit: %d, msg: %s, status: %s, comments: %s, " +
-                     "commitid: %s",
-                     project, changeid, limit, msg, status, comments, commitid)
-        query = self.make_gerrit_query(project, changeid, limit, msg, status, comments, commitid)
+        logger.debug(
+            "get_gerrits: project: %s, changeid: %s, limit: %d, msg: %s, status: %s, comments: %s, "
+            + "commitid: %s",
+            project,
+            changeid,
+            limit,
+            msg,
+            status,
+            comments,
+            commitid,
+        )
+        query = self.make_gerrit_query(
+            project, changeid, limit, msg, status, comments, commitid
+        )
         changes = self.gerrit_request(query)
         lines = self.extract_lines_from_json(changes)
         gerrits = []
@@ -325,11 +375,12 @@ class GerritQuery:
             gerrits.append(self.parse_gerrit(line))
 
         from operator import itemgetter
+
         if gerrits is None:
             logger.warn("No gerrits were found for %s", project)
             return gerrits
         try:
-            sorted_gerrits = sorted(gerrits, key=itemgetter('grantedOn'), reverse=True)
+            sorted_gerrits = sorted(gerrits, key=itemgetter("grantedOn"), reverse=True)
         except KeyError as e:
             logger.warn("KeyError exception in %s, %s", project, str(e))
         return sorted_gerrits
index b6762c8db1b31dfe3be6ed547d442dca33e51915..3548d43ec4eeeffd20bc239b61cb9f1623c646b0 100644 (file)
@@ -23,7 +23,9 @@ class Logger:
         global fh
 
         logger = logging.getLogger()
-        formatter = logging.Formatter('%(asctime)s | %(levelname).3s | %(name)-20s | %(lineno)04d | %(message)s')
+        formatter = logging.Formatter(
+            "%(asctime)s | %(levelname).3s | %(name)-20s | %(lineno)04d | %(message)s"
+        )
         ch = logging.StreamHandler()
         ch.setLevel(console_level)
         ch.setFormatter(formatter)
index 6d65e20cd8ec126a1b19cd6f61f851bc2e374d5c..df2208a128f726795498d83c824fadb64023a258 100644 (file)
@@ -7,50 +7,83 @@ import unittest
 import distcompare
 from changes import Changes
 
-REMOTE_URL = 'ssh://git.opendaylight.org:29418'
-NETVIRT_PROJECTS = ["controller", "dlux", "dluxapps", "genius", "infrautils", "mdsal", "netconf", "netvirt",
-                    "neutron", "odlparent", "openflowplugin", "ovsdb", "sfc", "yangtools"]
+REMOTE_URL = "ssh://git.opendaylight.org:29418"
+NETVIRT_PROJECTS = [
+    "controller",
+    "dlux",
+    "dluxapps",
+    "genius",
+    "infrautils",
+    "mdsal",
+    "netconf",
+    "netvirt",
+    "neutron",
+    "odlparent",
+    "openflowplugin",
+    "ovsdb",
+    "sfc",
+    "yangtools",
+]
 PROJECT_NAMES = NETVIRT_PROJECTS
 DISTRO_PATH = "/tmp/distribution-karaf"
-BRANCH = 'master'
+BRANCH = "master"
 LIMIT = 10
 QLIMIT = 50
 
 
 class TestChanges(unittest.TestCase):
-
     def setUp(self):
         print("Starting test: %s" % self.id())
 
     @staticmethod
-    def run_cmd(branch, distro_patch, limit, qlimit, project_names, remote_url, loglevel=0):
-        changes = Changes(branch, distro_patch, limit, qlimit, project_names, remote_url, loglevel)
+    def run_cmd(
+        branch, distro_patch, limit, qlimit, project_names, remote_url, loglevel=0
+    ):
+        changes = Changes(
+            branch, distro_patch, limit, qlimit, project_names, remote_url, loglevel
+        )
         projects = changes.run_cmd()
         changes.pretty_print_projects(projects)
 
     def test_run_cmd_single(self):
-        project_names = ['netvirt']
+        project_names = ["netvirt"]
         branch = BRANCH
-        self.run_cmd(branch, DISTRO_PATH, LIMIT, QLIMIT, project_names, REMOTE_URL, logging.INFO)
+        self.run_cmd(
+            branch, DISTRO_PATH, LIMIT, QLIMIT, project_names, REMOTE_URL, logging.INFO
+        )
 
     def test_run_cmd_multiple(self):
         project_names = PROJECT_NAMES
         branch = BRANCH
-        self.run_cmd(branch, DISTRO_PATH, LIMIT, QLIMIT, project_names, REMOTE_URL, logging.INFO)
+        self.run_cmd(
+            branch, DISTRO_PATH, LIMIT, QLIMIT, project_names, REMOTE_URL, logging.INFO
+        )
 
     def test_pretty_print(self):
         project_names = PROJECT_NAMES
         changes = Changes(BRANCH, DISTRO_PATH, LIMIT, QLIMIT, project_names, REMOTE_URL)
         projects = {}
         for project in project_names:
-            projects[project] = {"commit": 1, "includes": [{'a': 1}]}
+            projects[project] = {"commit": 1, "includes": [{"a": 1}]}
         changes.pretty_print_projects(projects)
         for project in project_names:
-            projects[project] = {"commit": 1,
-                                 "includes": [{"grantedOn": 1, "lastUpdated": 11,
-                                               "number": "12345", "subject": "This is a test for " + project},
-                                              {"grantedOn": 2, "lastUpdated": 22,
-                                               "number": "56789", "subject": "This is a test for " + project}]}
+            projects[project] = {
+                "commit": 1,
+                "includes": [
+                    {
+                        "grantedOn": 1,
+                        "lastUpdated": 11,
+                        "number": "12345",
+                        "subject": "This is a test for " + project,
+                    },
+                    {
+                        "grantedOn": 2,
+                        "lastUpdated": 22,
+                        "number": "56789",
+                        "subject": "This is a test for " + project,
+                    },
+                ],
+            }
         changes.pretty_print_projects(projects)
 
     def test_epoch_to_utc(self):
@@ -62,5 +95,5 @@ class TestChanges(unittest.TestCase):
         distcompare.main()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
index 86daf3ae230e9f3852d854c528bd9873a9c9992e..fc25dbce982f882ac7188d31108c19cc29c7bb0c 100644 (file)
@@ -11,7 +11,7 @@ import logg
 
 
 REMOTE_URL = GerritQuery.remote_url
-BRANCH = 'stable/oxygen'
+BRANCH = "stable/oxygen"
 LIMIT = 10
 QLIMIT = 50
 VERBOSE = 0
@@ -29,5 +29,5 @@ class TestRequest(unittest.TestCase):
         print("{}".format(gerrits))
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()
index 1e9775236828a136da594865270994a88be9565f..aa35ec534a3b26661ddfd772d84411930abed2ef 100644 (file)
@@ -57,11 +57,11 @@ class Rpcs(object):
         Arguments:
             :text: exabgp command
         """
-        logging.debug('Command towards exabgp: {}'.format(text))
+        logging.debug("Command towards exabgp: {}".format(text))
         sys.stdout.write(text)
         sys.stdout.write("\n")
         sys.stdout.flush()
-        logging.debug('Connand flushed: {}.'.format(text))
+        logging.debug("Connand flushed: {}.".format(text))
 
     def get_counter(self, msg_type):
         """Gets counter value
@@ -71,11 +71,11 @@ class Rpcs(object):
         Returns:
             :cnt: counter value
         """
-        logging.debug('get_counter rpc called, storage {}'.format(self.storage))
+        logging.debug("get_counter rpc called, storage {}".format(self.storage))
         with self.storage as s:
-            if 'counters' not in s:
+            if "counters" not in s:
                 return 0
-            cnt = 0 if msg_type not in s['counters'] else s['counters'][msg_type]
+            cnt = 0 if msg_type not in s["counters"] else s["counters"][msg_type]
         return cnt
 
     def clean_counter(self, msg_type):
@@ -85,12 +85,12 @@ class Rpcs(object):
             :msg_type: message type which counter should be cleaned
         """
 
-        logging.debug('clean_counter rpc called, storage {}'.format(self.storage))
+        logging.debug("clean_counter rpc called, storage {}".format(self.storage))
         with self.storage as s:
-            if 'counters' not in s:
+            if "counters" not in s:
                 return
-            if msg_type in s['counters']:
-                del s['counters'][msg_type]
+            if msg_type in s["counters"]:
+                del s["counters"][msg_type]
 
     def get_message(self, msg_type):
         """Gets last received message
@@ -100,11 +100,13 @@ class Rpcs(object):
         Returns:
             :msg: message
         """
-        logging.debug('get_message {} rpc called, storage {}'.format(msg_type, self.storage))
+        logging.debug(
+            "get_message {} rpc called, storage {}".format(msg_type, self.storage)
+        )
         with self.storage as s:
-            if 'messages' not in s:
+            if "messages" not in s:
                 return None
-            msg = None if msg_type not in s['messages'] else s['messages'][msg_type]
+            msg = None if msg_type not in s["messages"] else s["messages"][msg_type]
         return msg
 
     def clean_message(self, msg_type):
@@ -114,12 +116,12 @@ class Rpcs(object):
             :msg_type: message type which message should be cleaned
         """
 
-        logging.debug('clean_message rpc called, storage {}'.format(self.storage))
+        logging.debug("clean_message rpc called, storage {}".format(self.storage))
         with self.storage as s:
-            if 'messages' not in s:
+            if "messages" not in s:
                 return
-            if msg_type in s['messages']:
-                del s['messages'][msg_type]
+            if msg_type in s["messages"]:
+                del s["messages"][msg_type]
         return
 
     def execute(self, exabgp_cmd):
@@ -128,7 +130,7 @@ class Rpcs(object):
         Arguments:
             :exabgp_cmd: command
         """
-        logging.info('executing: {}.'.format(exabgp_cmd))
+        logging.info("executing: {}.".format(exabgp_cmd))
         self._write(exabgp_cmd)
 
 
@@ -157,12 +159,12 @@ def _increment_counter(storage, key):
         :key: message type
     """
     with storage as s:
-        if 'counters' not in s:
-            s['counters'] = {}
-        if key not in s['counters']:
-            s['counters'][key] = 1
+        if "counters" not in s:
+            s["counters"] = {}
+        if key not in s["counters"]:
+            s["counters"][key] = 1
         else:
-            s['counters'][key] += 1
+            s["counters"][key] += 1
 
 
 def _store_last_received_message(storage, key, msg):
@@ -172,9 +174,9 @@ def _store_last_received_message(storage, key, msg):
         :key: message type
     """
     with storage as s:
-        if 'messages' not in s:
-            s['messages'] = {}
-        s['messages'][key] = msg
+        if "messages" not in s:
+            s["messages"] = {}
+        s["messages"][key] = msg
 
 
 def handle_open(storage, msg):
@@ -185,8 +187,8 @@ def handle_open(storage, msg):
     Arguments:
         :msg: hex string of open body
     """
-    logging.debug('Handling Open with storage {}'.format(storage))
-    _increment_counter(storage, 'open')
+    logging.debug("Handling Open with storage {}".format(storage))
+    _increment_counter(storage, "open")
 
 
 def handle_keepalive(storage, msg):
@@ -197,8 +199,8 @@ def handle_keepalive(storage, msg):
     Arguments:
         :msg: hex string of message body (in fact it is None)
     """
-    logging.debug('Handling KeepAlive with storage {}'.format(storage))
-    _increment_counter(storage, 'keepalive')
+    logging.debug("Handling KeepAlive with storage {}".format(storage))
+    _increment_counter(storage, "keepalive")
 
 
 def handle_update(storage, msg):
@@ -209,8 +211,8 @@ def handle_update(storage, msg):
     Arguments:
         :msg: hex string of update body
     """
-    logging.debug('Handling Update with storage {}'.format(storage))
-    _increment_counter(storage, 'update')
+    logging.debug("Handling Update with storage {}".format(storage))
+    _increment_counter(storage, "update")
 
 
 def handle_route_refresh(storage, msg):
@@ -221,8 +223,8 @@ def handle_route_refresh(storage, msg):
     Arguments:
         :msg: hex string of route refresh body
     """
-    logging.debug('Handling Route Refresh with storage {}'.format(storage))
-    _increment_counter(storage, 'route_refresh')
+    logging.debug("Handling Route Refresh with storage {}".format(storage))
+    _increment_counter(storage, "route_refresh")
 
 
 def handle_json_update(storage, jdata):
@@ -233,9 +235,9 @@ def handle_json_update(storage, jdata):
     Arguments:
         :jdata: json formated data of update message
     """
-    logging.debug('Handling Json Update with storage {}'.format(storage))
-    _increment_counter(storage, 'update')
-    _store_last_received_message(storage, 'update', jdata)
+    logging.debug("Handling Json Update with storage {}".format(storage))
+    _increment_counter(storage, "update")
+    _store_last_received_message(storage, "update", jdata)
 
 
 def handle_json_state(storage, jdata):
@@ -247,7 +249,7 @@ def handle_json_state(storage, jdata):
     Arguments:
         :jdata: json formated data about connection/peer state
     """
-    logging.debug('Handling Json State with storage {}'.format(storage))
+    logging.debug("Handling Json State with storage {}".format(storage))
 
 
 def handle_json_refresh(storage, jdata):
@@ -259,25 +261,31 @@ def handle_json_refresh(storage, jdata):
     Arguments:
         :jdata: json formated data about connection/peer state
     """
-    logging.debug('Handling Json State with storage {}'.format(storage))
-    _increment_counter(storage, 'route_refresh')
+    logging.debug("Handling Json State with storage {}".format(storage))
+    _increment_counter(storage, "route_refresh")
 
 
 def exa_msg_handler(storage, data, encoder):
     """Handles incomming messages"""
 
-    if encoder == 'text':
-        if not ('neighbor' in data and 'header' in data and 'body' in data):
-            logging.debug('Ignoring received notification from exabgp: {}'.format(data))
+    if encoder == "text":
+        if not ("neighbor" in data and "header" in data and "body" in data):
+            logging.debug("Ignoring received notification from exabgp: {}".format(data))
             return
-        restr = 'neighbor (?P<ip>[0-9,\\.]+) received (?P<mid>[0-9]+) header\
- (?P<header>[0-9,A-F]+) body.?(?P<body>[0-9,A-F]+)?'
+        restr = "neighbor (?P<ip>[0-9,\\.]+) received (?P<mid>[0-9]+) header\
+ (?P<header>[0-9,A-F]+) body.?(?P<body>[0-9,A-F]+)?"
         pat = re.compile(restr)
         match = re.search(pat, data)
         if match is None:
-            logging.warn('Unexpected data in this part, only bgp message expected. Received: {}.'.format(data))
+            logging.warn(
+                "Unexpected data in this part, only bgp message expected. Received: {}.".format(
+                    data
+                )
+            )
             return
-        msg_type, msg = decode_message(match.groupdict()['header'], match.groupdict()['body'])
+        msg_type, msg = decode_message(
+            match.groupdict()["header"], match.groupdict()["body"]
+        )
         if msg_type == Message.CODE.KEEPALIVE:
             handle_keepalive(storage, msg)
         elif msg_type == Message.CODE.OPEN:
@@ -287,28 +295,28 @@ def exa_msg_handler(storage, data, encoder):
         elif msg_type == Message.CODE.ROUTE_REFRESH:
             handle_route_refresh(storage, msg)
         else:
-            logging.warn('No handler function for msg_type: {}'.format(msg_type))
-    elif encoder == 'json':
+            logging.warn("No handler function for msg_type: {}".format(msg_type))
+    elif encoder == "json":
         try:
             jdata = json.loads(data)
         except Exception:
-            logging.error('Unable to parse, expected json, received: {}.'.format(data))
+            logging.error("Unable to parse, expected json, received: {}.".format(data))
             return
-        if jdata['type'] == 'state':
-            logging.debug('State info received: {}.'.format(data))
+        if jdata["type"] == "state":
+            logging.debug("State info received: {}.".format(data))
             handle_json_state(storage, jdata)
-        elif jdata['type'] == 'update':
-            logging.debug('Update info received: {}.'.format(data))
+        elif jdata["type"] == "update":
+            logging.debug("Update info received: {}.".format(data))
             handle_json_update(storage, jdata)
-        elif jdata['type'] == 'notification':
-            logging.debug('Notification info received: {}.'.format(data))
-        elif jdata['type'] == 'refresh':
-            logging.debug('Route refresh received: {}.'.format(data))
+        elif jdata["type"] == "notification":
+            logging.debug("Notification info received: {}.".format(data))
+        elif jdata["type"] == "refresh":
+            logging.debug("Route refresh received: {}.".format(data))
             handle_json_refresh(storage, jdata)
         else:
-            logging.error('Unexpected type for data: {}'.format(data))
+            logging.error("Unexpected type for data: {}".format(data))
     else:
-        logging.error('Ignoring received data, unknown encoder: {}'.format(encoder))
+        logging.error("Ignoring received data, unknown encoder: {}".format(encoder))
 
 
 def main(*argv):
@@ -321,12 +329,19 @@ def main(*argv):
     Stdin and stdout are used for communication with exabgp.
     """
 
-    parser = argparse.ArgumentParser(description='ExaBgp rpc server script')
-    parser.add_argument('--host', default='127.0.0.1', help='Host where exabgp is running (default is 127.0.0.1)')
-    parser.add_argument('--loglevel', default=logging.DEBUG, help='Log level')
-    parser.add_argument('--logfile', default='{}/exarpc.log'.format(os.path.dirname(os.path.abspath(__file__))),
-                        help='Log file name.')
-    parser.add_argument('--encoder', default='json', help='Exabgp encoder type')
+    parser = argparse.ArgumentParser(description="ExaBgp rpc server script")
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where exabgp is running (default is 127.0.0.1)",
+    )
+    parser.add_argument("--loglevel", default=logging.DEBUG, help="Log level")
+    parser.add_argument(
+        "--logfile",
+        default="{}/exarpc.log".format(os.path.dirname(os.path.abspath(__file__))),
+        help="Log file name.",
+    )
+    parser.add_argument("--encoder", default="json", help="Exabgp encoder type")
     in_args = parser.parse_args(*argv)
     logging.basicConfig(filename=in_args.logfile, level=in_args.loglevel)
 
@@ -342,22 +357,22 @@ def main(*argv):
 
     try:
         while True:
-            logging.debug('Epoll loop')
+            logging.debug("Epoll loop")
             events = epoll.poll(10)
             for fd, event_type in events:
-                logging.debug('Epoll returned: {},{}'.format(fd, event_type))
+                logging.debug("Epoll returned: {},{}".format(fd, event_type))
                 if event_type != select.EPOLLIN:
-                    raise Exception('Unexpected epoll event')
+                    raise Exception("Unexpected epoll event")
                 else:
                     data = sys.stdin.readline()
-                    logging.debug('Data recevied from exabgp: {}.'.format(data))
+                    logging.debug("Data recevied from exabgp: {}.".format(data))
                     exa_msg_handler(storage, data, in_args.encoder)
     except Exception as e:
-        logging.warn('Exception occured: {}'.format(e))
+        logging.warn("Exception occured: {}".format(e))
     finally:
         rpcserver.shutdown()
         trpc.join()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
index 16f658f834a47fdd496cbfb5f5b2974704ff4859..b3dfbfd66c60f7d775c03cfe460d0feb5b00aa81 100755 (executable)
@@ -39,7 +39,9 @@ def _build_url(odl_ip, port, uri):
     return url
 
 
-def _stream_data(xml_template, prefix_base, prefix_len, count, route_key=False, element="ipv4-routes"):
+def _stream_data(
+    xml_template, prefix_base, prefix_len, count, route_key=False, element="ipv4-routes"
+):
     """Stream list of routes based on xml template. Memory non-consumable
     data generation (on the fly).
 
@@ -108,7 +110,9 @@ def _stream_data(xml_template, prefix_base, prefix_len, count, route_key=False,
         yield xml_data
 
 
-def send_request(operation, odl_ip, port, uri, auth, xml_data=None, expect_status_code=200):
+def send_request(
+    operation, odl_ip, port, uri, auth, xml_data=None, expect_status_code=200
+):
     """Send a http request.
 
     Args:
@@ -157,8 +161,16 @@ def send_request(operation, odl_ip, port, uri, auth, xml_data=None, expect_statu
         return rsp
 
 
-def get_prefixes(odl_ip, port, uri, auth, prefix_base=None, prefix_len=None,
-                 count=None, xml_template=None):
+def get_prefixes(
+    odl_ip,
+    port,
+    uri,
+    auth,
+    prefix_base=None,
+    prefix_len=None,
+    count=None,
+    xml_template=None,
+):
     """Send a http GET request for getting all prefixes.
 
     Args:
@@ -190,19 +202,28 @@ def get_prefixes(odl_ip, port, uri, auth, prefix_base=None, prefix_len=None,
         s = s.replace("}", "")
         s = s.replace("[", "")
         s = s.replace("]", "")
-        prefixes = ''
+        prefixes = ""
         prefix_count = 0
         for item in s.split(","):
             if "prefix" in item:
                 prefixes += item + ","
                 prefix_count += 1
-        prefixes = prefixes[:len(prefixes) - 1]
+        prefixes = prefixes[: len(prefixes) - 1]
         logger.debug("prefix_list=%s", prefixes)
         logger.info("prefix_count=%s", prefix_count)
 
 
-def post_prefixes(odl_ip, port, uri, auth, prefix_base=None, prefix_len=None,
-                  count=0, route_key=False, xml_template=None):
+def post_prefixes(
+    odl_ip,
+    port,
+    uri,
+    auth,
+    prefix_base=None,
+    prefix_len=None,
+    count=0,
+    route_key=False,
+    xml_template=None,
+):
     """Send a http POST request for creating a prefix list.
 
     Args:
@@ -227,14 +248,32 @@ def post_prefixes(odl_ip, port, uri, auth, prefix_base=None, prefix_len=None,
     Returns:
         :returns None
     """
-    logger.info("Post %s prefix(es) in a single request (starting from %s/%s) into %s:%s/restconf/%s",
-                count, prefix_base, prefix_len, odl_ip, port, uri)
+    logger.info(
+        "Post %s prefix(es) in a single request (starting from %s/%s) into %s:%s/restconf/%s",
+        count,
+        prefix_base,
+        prefix_len,
+        odl_ip,
+        port,
+        uri,
+    )
     xml_stream = _stream_data(xml_template, prefix_base, prefix_len, count, route_key)
-    send_request("POST", odl_ip, port, uri, auth, xml_data=xml_stream, expect_status_code=204)
-
-
-def put_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
-                 route_key, xml_template=None):
+    send_request(
+        "POST", odl_ip, port, uri, auth, xml_data=xml_stream, expect_status_code=204
+    )
+
+
+def put_prefixes(
+    odl_ip,
+    port,
+    uri,
+    auth,
+    prefix_base,
+    prefix_len,
+    count,
+    route_key,
+    xml_template=None,
+):
     """Send a http PUT request for updating the prefix list.
 
     Args:
@@ -258,14 +297,30 @@ def put_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
         :returns None
     """
     uri_add_prefix = uri + _uri_suffix_ipv4_routes
-    logger.info("Put %s prefix(es) in a single request (starting from %s/%s) into %s:%s/restconf/%s",
-                count, prefix_base, prefix_len, odl_ip, port, uri_add_prefix)
+    logger.info(
+        "Put %s prefix(es) in a single request (starting from %s/%s) into %s:%s/restconf/%s",
+        count,
+        prefix_base,
+        prefix_len,
+        odl_ip,
+        port,
+        uri_add_prefix,
+    )
     xml_stream = _stream_data(xml_template, prefix_base, prefix_len, count, route_key)
     send_request("PUT", odl_ip, port, uri_add_prefix, auth, xml_data=xml_stream)
 
 
-def add_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
-                 route_key, xml_template=None):
+def add_prefixes(
+    odl_ip,
+    port,
+    uri,
+    auth,
+    prefix_base,
+    prefix_len,
+    count,
+    route_key,
+    xml_template=None,
+):
     """Send a consequent http POST request for adding prefixes.
 
     Args:
@@ -288,22 +343,44 @@ def add_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
     Returns:
         :returns None
     """
-    logger.info("Add %s prefixes (starting from %s/%s) into %s:%s/restconf/%s",
-                count, prefix_base, prefix_len, odl_ip, port, uri)
+    logger.info(
+        "Add %s prefixes (starting from %s/%s) into %s:%s/restconf/%s",
+        count,
+        prefix_base,
+        prefix_len,
+        odl_ip,
+        port,
+        uri,
+    )
     uri_add_prefix = uri + _uri_suffix_ipv4_routes
     prefix_gap = 2 ** (32 - prefix_len)
     for prefix_index in range(count):
         prefix = prefix_base + prefix_index * prefix_gap
-        logger.info("Adding prefix %s/%s to %s:%s/restconf/%s",
-                    prefix, prefix_len, odl_ip, port, uri)
-        xml_stream = _stream_data(xml_template, prefix, prefix_len, 1, route_key,
-                                  element="ipv4-route")
-        send_request("POST", odl_ip, port, uri_add_prefix, auth,
-                     xml_data=xml_stream, expect_status_code=204)
-
-
-def delete_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
-                    xml_template=None):
+        logger.info(
+            "Adding prefix %s/%s to %s:%s/restconf/%s",
+            prefix,
+            prefix_len,
+            odl_ip,
+            port,
+            uri,
+        )
+        xml_stream = _stream_data(
+            xml_template, prefix, prefix_len, 1, route_key, element="ipv4-route"
+        )
+        send_request(
+            "POST",
+            odl_ip,
+            port,
+            uri_add_prefix,
+            auth,
+            xml_data=xml_stream,
+            expect_status_code=204,
+        )
+
+
+def delete_prefixes(
+    odl_ip, port, uri, auth, prefix_base, prefix_len, count, xml_template=None
+):
     """Send a http DELETE requests for deleting prefixes.
 
     Args:
@@ -326,21 +403,48 @@ def delete_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
     Returns:
         :returns None
     """
-    logger.info("Delete %s prefix(es) (starting from %s/%s) from %s:%s/restconf/%s",
-                count, prefix_base, prefix_len, odl_ip, port, uri)
+    logger.info(
+        "Delete %s prefix(es) (starting from %s/%s) from %s:%s/restconf/%s",
+        count,
+        prefix_base,
+        prefix_len,
+        odl_ip,
+        port,
+        uri,
+    )
     partkey = "/0"
     uri_del_prefix = uri + _uri_suffix_ipv4_routes + _uri_suffix_ipv4_route
     prefix_gap = 2 ** (32 - prefix_len)
     for prefix_index in range(count):
         prefix = prefix_base + prefix_index * prefix_gap
-        logger.info("Deleting prefix %s/%s/%s from %s:%s/restconf/%s",
-                    prefix, prefix_len, partkey, odl_ip, port, uri)
-        send_request("DELETE", odl_ip, port,
-                     uri_del_prefix + str(prefix) + "%2F" + str(prefix_len) + partkey, auth)
-
-
-def delete_all_prefixes(odl_ip, port, uri, auth, prefix_base=None,
-                        prefix_len=None, count=None, xml_template=None):
+        logger.info(
+            "Deleting prefix %s/%s/%s from %s:%s/restconf/%s",
+            prefix,
+            prefix_len,
+            partkey,
+            odl_ip,
+            port,
+            uri,
+        )
+        send_request(
+            "DELETE",
+            odl_ip,
+            port,
+            uri_del_prefix + str(prefix) + "%2F" + str(prefix_len) + partkey,
+            auth,
+        )
+
+
+def delete_all_prefixes(
+    odl_ip,
+    port,
+    uri,
+    auth,
+    prefix_base=None,
+    prefix_len=None,
+    count=None,
+    xml_template=None,
+):
     """Send a http DELETE request for deleting all prefixes.
 
     Args:
@@ -370,45 +474,85 @@ def delete_all_prefixes(odl_ip, port, uri, auth, prefix_base=None,
 
 _commands = ["post", "put", "add", "delete", "delete-all", "get"]
 _uri_suffix_ipv4_routes = "bgp-inet:ipv4-routes/"
-_uri_suffix_ipv4_route = "bgp-inet:ipv4-route/"   # followed by IP address like 1.1.1.1%2F32
+_uri_suffix_ipv4_route = (
+    "bgp-inet:ipv4-route/"  # followed by IP address like 1.1.1.1%2F32
+)
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(description="BGP application peer script")
-    parser.add_argument("--host", type=ipaddr.IPv4Address, default="127.0.0.1",
-                        help="ODL controller IP address")
-    parser.add_argument("--port", default="8181",
-                        help="ODL RESTCONF port")
-    parser.add_argument("--command", choices=_commands, metavar="command",
-                        help="Command to be performed."
-                        "post, put, add, delete, delete-all, get")
-    parser.add_argument("--prefix", type=ipaddr.IPv4Address, default="8.0.1.0",
-                        help="First prefix IP address")
-    parser.add_argument("--prefixlen", type=int, help="Prefix length in bites",
-                        default=28)
-    parser.add_argument("--count", type=int, help="Number of prefixes",
-                        default=1)
+    parser.add_argument(
+        "--host",
+        type=ipaddr.IPv4Address,
+        default="127.0.0.1",
+        help="ODL controller IP address",
+    )
+    parser.add_argument("--port", default="8181", help="ODL RESTCONF port")
+    parser.add_argument(
+        "--command",
+        choices=_commands,
+        metavar="command",
+        help="Command to be performed." "post, put, add, delete, delete-all, get",
+    )
+    parser.add_argument(
+        "--prefix",
+        type=ipaddr.IPv4Address,
+        default="8.0.1.0",
+        help="First prefix IP address",
+    )
+    parser.add_argument(
+        "--prefixlen", type=int, help="Prefix length in bites", default=28
+    )
+    parser.add_argument("--count", type=int, help="Number of prefixes", default=1)
     parser.add_argument("--user", help="Restconf user name", default="admin")
     parser.add_argument("--password", help="Restconf password", default="admin")
-    parser.add_argument("--uri", help="The uri part of requests",
-                        default="config/bgp-rib:application-rib/example-app-rib/"
-                                "tables/bgp-types:ipv4-address-family/"
-                                "bgp-types:unicast-subsequent-address-family/")
-    parser.add_argument("--xml", help="File name of the xml data template",
-                        default="ipv4-routes-template.xml")
-    parser.add_argument("--error", dest="loglevel", action="store_const",
-                        const=logging.ERROR, default=logging.INFO,
-                        help="Set log level to error (default is info)")
-    parser.add_argument("--warning", dest="loglevel", action="store_const",
-                        const=logging.WARNING, default=logging.INFO,
-                        help="Set log level to warning (default is info)")
-    parser.add_argument("--info", dest="loglevel", action="store_const",
-                        const=logging.INFO, default=logging.INFO,
-                        help="Set log level to info (default is info)")
-    parser.add_argument("--debug", dest="loglevel", action="store_const",
-                        const=logging.DEBUG, default=logging.INFO,
-                        help="Set log level to debug (default is info)")
+    parser.add_argument(
+        "--uri",
+        help="The uri part of requests",
+        default="config/bgp-rib:application-rib/example-app-rib/"
+        "tables/bgp-types:ipv4-address-family/"
+        "bgp-types:unicast-subsequent-address-family/",
+    )
+    parser.add_argument(
+        "--xml",
+        help="File name of the xml data template",
+        default="ipv4-routes-template.xml",
+    )
+    parser.add_argument(
+        "--error",
+        dest="loglevel",
+        action="store_const",
+        const=logging.ERROR,
+        default=logging.INFO,
+        help="Set log level to error (default is info)",
+    )
+    parser.add_argument(
+        "--warning",
+        dest="loglevel",
+        action="store_const",
+        const=logging.WARNING,
+        default=logging.INFO,
+        help="Set log level to warning (default is info)",
+    )
+    parser.add_argument(
+        "--info",
+        dest="loglevel",
+        action="store_const",
+        const=logging.INFO,
+        default=logging.INFO,
+        help="Set log level to info (default is info)",
+    )
+    parser.add_argument(
+        "--debug",
+        dest="loglevel",
+        action="store_const",
+        const=logging.DEBUG,
+        default=logging.INFO,
+        help="Set log level to debug (default is info)",
+    )
     parser.add_argument("--logfile", default="bgp_app_peer.log", help="Log file name")
-    parser.add_argument("--stream", default="", help="ODL Stream - oxygen, fluorine ...")
+    parser.add_argument(
+        "--stream", default="", help="ODL Stream - oxygen, fluorine ..."
+    )
 
     args = parser.parse_args()
 
@@ -444,14 +588,41 @@ if __name__ == "__main__":
     total_number_of_responses_counter = 0
 
     if command == "post":
-        post_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
-                      route_key, xml_template)
+        post_prefixes(
+            odl_ip,
+            port,
+            uri,
+            auth,
+            prefix_base,
+            prefix_len,
+            count,
+            route_key,
+            xml_template,
+        )
     if command == "put":
-        put_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
-                     route_key, xml_template)
+        put_prefixes(
+            odl_ip,
+            port,
+            uri,
+            auth,
+            prefix_base,
+            prefix_len,
+            count,
+            route_key,
+            xml_template,
+        )
     if command == "add":
-        add_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count,
-                     route_key, xml_template)
+        add_prefixes(
+            odl_ip,
+            port,
+            uri,
+            auth,
+            prefix_base,
+            prefix_len,
+            count,
+            route_key,
+            xml_template,
+        )
     elif command == "delete":
         delete_prefixes(odl_ip, port, uri, auth, prefix_base, prefix_len, count)
     elif command == "delete-all":
index b75aa2de4ea86a61d50ce7c732058becb87b8d99..72eb9f18127621dd46402d0a8159d265800ab099 100755 (executable)
@@ -33,13 +33,13 @@ __email__ = "vrpolak@cisco.com"
 
 
 class SafeDict(dict):
-    '''Thread safe dictionary
+    """Thread safe dictionary
 
     The object will serve as thread safe data storage.
     It should be used with "with" statement.
-    '''
+    """
 
-    def __init__(self, * p_arg, ** n_arg):
+    def __init__(self, *p_arg, **n_arg):
         super(SafeDict, self).__init__()
         self._lock = threading.Lock()
 
@@ -63,7 +63,7 @@ def parse_arguments():
     parser.add_argument("--asnumber", default=64496, type=int, help=str_help)
     # FIXME: We are acting as iBGP peer,
     # we should mirror AS number from peer's open message.
-    str_help = "Amount of IP prefixes to generate. (negative means ""infinite"")."
+    str_help = "Amount of IP prefixes to generate. (negative means " "infinite" ")."
     parser.add_argument("--amount", default="1", type=int, help=str_help)
     str_help = "Rpc server port."
     parser.add_argument("--port", default="8000", type=int, help=str_help)
@@ -74,52 +74,97 @@ def parse_arguments():
     str_help = "The number of prefixes to process without withdrawals"
     parser.add_argument("--prefill", default="0", type=int, help=str_help)
     str_help = "Single or two separate UPDATEs for NLRI and WITHDRAWN lists sent"
-    parser.add_argument("--updates", choices=["single", "separate"],
-                        default=["separate"], help=str_help)
+    parser.add_argument(
+        "--updates", choices=["single", "separate"], default=["separate"], help=str_help
+    )
     str_help = "Base prefix IP address for prefix generation"
-    parser.add_argument("--firstprefix", default="8.0.1.0",
-                        type=ipaddr.IPv4Address, help=str_help)
+    parser.add_argument(
+        "--firstprefix", default="8.0.1.0", type=ipaddr.IPv4Address, help=str_help
+    )
     str_help = "The prefix length."
     parser.add_argument("--prefixlen", default=28, type=int, help=str_help)
     str_help = "Listen for connection, instead of initiating it."
     parser.add_argument("--listen", action="store_true", help=str_help)
-    str_help = ("Numeric IP Address to bind to and derive BGP ID from." +
-                "Default value only suitable for listening.")
-    parser.add_argument("--myip", default="0.0.0.0",
-                        type=ipaddr.IPv4Address, help=str_help)
-    str_help = ("TCP port to bind to when listening or initiating connection." +
-                "Default only suitable for initiating.")
+    str_help = (
+        "Numeric IP Address to bind to and derive BGP ID from."
+        + "Default value only suitable for listening."
+    )
+    parser.add_argument(
+        "--myip", default="0.0.0.0", type=ipaddr.IPv4Address, help=str_help
+    )
+    str_help = (
+        "TCP port to bind to when listening or initiating connection."
+        + "Default only suitable for initiating."
+    )
     parser.add_argument("--myport", default="0", type=int, help=str_help)
     str_help = "The IP of the next hop to be placed into the update messages."
-    parser.add_argument("--nexthop", default="192.0.2.1",
-                        type=ipaddr.IPv4Address, dest="nexthop", help=str_help)
+    parser.add_argument(
+        "--nexthop",
+        default="192.0.2.1",
+        type=ipaddr.IPv4Address,
+        dest="nexthop",
+        help=str_help,
+    )
     str_help = "Identifier of the route originator."
-    parser.add_argument("--originator", default=None,
-                        type=ipaddr.IPv4Address, dest="originator", help=str_help)
+    parser.add_argument(
+        "--originator",
+        default=None,
+        type=ipaddr.IPv4Address,
+        dest="originator",
+        help=str_help,
+    )
     str_help = "Cluster list item identifier."
-    parser.add_argument("--cluster", default=None,
-                        type=ipaddr.IPv4Address, dest="cluster", help=str_help)
-    str_help = ("Numeric IP Address to try to connect to." +
-                "Currently no effect in listening mode.")
-    parser.add_argument("--peerip", default="127.0.0.2",
-                        type=ipaddr.IPv4Address, help=str_help)
+    parser.add_argument(
+        "--cluster",
+        default=None,
+        type=ipaddr.IPv4Address,
+        dest="cluster",
+        help=str_help,
+    )
+    str_help = (
+        "Numeric IP Address to try to connect to."
+        + "Currently no effect in listening mode."
+    )
+    parser.add_argument(
+        "--peerip", default="127.0.0.2", type=ipaddr.IPv4Address, help=str_help
+    )
     str_help = "TCP port to try to connect to. No effect in listening mode."
     parser.add_argument("--peerport", default="179", type=int, help=str_help)
     str_help = "Local hold time."
     parser.add_argument("--holdtime", default="180", type=int, help=str_help)
     str_help = "Log level (--error, --warning, --info, --debug)"
-    parser.add_argument("--error", dest="loglevel", action="store_const",
-                        const=logging.ERROR, default=logging.INFO,
-                        help=str_help)
-    parser.add_argument("--warning", dest="loglevel", action="store_const",
-                        const=logging.WARNING, default=logging.INFO,
-                        help=str_help)
-    parser.add_argument("--info", dest="loglevel", action="store_const",
-                        const=logging.INFO, default=logging.INFO,
-                        help=str_help)
-    parser.add_argument("--debug", dest="loglevel", action="store_const",
-                        const=logging.DEBUG, default=logging.INFO,
-                        help=str_help)
+    parser.add_argument(
+        "--error",
+        dest="loglevel",
+        action="store_const",
+        const=logging.ERROR,
+        default=logging.INFO,
+        help=str_help,
+    )
+    parser.add_argument(
+        "--warning",
+        dest="loglevel",
+        action="store_const",
+        const=logging.WARNING,
+        default=logging.INFO,
+        help=str_help,
+    )
+    parser.add_argument(
+        "--info",
+        dest="loglevel",
+        action="store_const",
+        const=logging.INFO,
+        default=logging.INFO,
+        help=str_help,
+    )
+    parser.add_argument(
+        "--debug",
+        dest="loglevel",
+        action="store_const",
+        const=logging.DEBUG,
+        default=logging.INFO,
+        help=str_help,
+    )
     str_help = "Log file name"
     parser.add_argument("--logfile", default="bgp_peer.log", help=str_help)
     str_help = "Trailing part of the csv result files for plotting purposes"
@@ -129,7 +174,9 @@ def parse_arguments():
     str_help = "RFC 4760 Multiprotocol Extensions for BGP-4 supported"
     parser.add_argument("--rfc4760", default=True, type=bool, help=str_help)
     str_help = "Using peerip instead of myip for xmlrpc server"
-    parser.add_argument("--usepeerip", default=False, action="store_true", help=str_help)
+    parser.add_argument(
+        "--usepeerip", default=False, action="store_true", help=str_help
+    )
     str_help = "Link-State NLRI supported"
     parser.add_argument("--bgpls", default=False, type=bool, help=str_help)
     str_help = "Link-State NLRI: Identifier"
@@ -139,11 +186,13 @@ def parse_arguments():
     str_help = "Link-State NLRI: LSP ID"
     parser.add_argument("-lspid", default="1", type=int, help=str_help)
     str_help = "Link-State NLRI: IPv4 Tunnel Sender Address"
-    parser.add_argument("--lstsaddr", default="1.2.3.4",
-                        type=ipaddr.IPv4Address, help=str_help)
+    parser.add_argument(
+        "--lstsaddr", default="1.2.3.4", type=ipaddr.IPv4Address, help=str_help
+    )
     str_help = "Link-State NLRI: IPv4 Tunnel End Point Address"
-    parser.add_argument("--lsteaddr", default="5.6.7.8",
-                        type=ipaddr.IPv4Address, help=str_help)
+    parser.add_argument(
+        "--lsteaddr", default="5.6.7.8", type=ipaddr.IPv4Address, help=str_help
+    )
     str_help = "Link-State NLRI: Identifier Step"
     parser.add_argument("-lsidstep", default="1", type=int, help=str_help)
     str_help = "Link-State NLRI: Tunnel ID Step"
@@ -172,11 +221,17 @@ def parse_arguments():
     str_help = "Open message includes L3VPN-MULTICAST arguments.\
     Enabling this flag makes the script not decoding the update mesage, because of not\
     supported decoding for these elements."
-    parser.add_argument("--l3vpn_mcast", default=False, action="store_true", help=str_help)
-    str_help = "Open message includes L3VPN-UNICAST arguments, without message decoding."
+    parser.add_argument(
+        "--l3vpn_mcast", default=False, action="store_true", help=str_help
+    )
+    str_help = (
+        "Open message includes L3VPN-UNICAST arguments, without message decoding."
+    )
     parser.add_argument("--l3vpn", default=False, action="store_true", help=str_help)
     str_help = "Open message includes ROUTE-TARGET-CONSTRAIN arguments, without message decoding."
-    parser.add_argument("--rt_constrain", default=False, action="store_true", help=str_help)
+    parser.add_argument(
+        "--rt_constrain", default=False, action="store_true", help=str_help
+    )
     str_help = "Open message includes ipv6-unicast family, without message decoding."
     parser.add_argument("--ipv6", default=False, action="store_true", help=str_help)
     str_help = "Add all supported families without message decoding."
@@ -264,7 +319,7 @@ def get_prefix_list_from_hex(prefixes_hex):
         prefix_bit_len_hex = prefixes_hex[offset]
         prefix_bit_len = int(binascii.b2a_hex(prefix_bit_len_hex), 16)
         prefix_len = ((prefix_bit_len - 1) / 8) + 1
-        prefix_hex = prefixes_hex[offset + 1: offset + 1 + prefix_len]
+        prefix_hex = prefixes_hex[offset + 1 : offset + 1 + prefix_len]
         prefix = ".".join(str(i) for i in struct.unpack("BBBB", prefix_hex))
         offset += 1 + prefix_len
         prefix_list.append(prefix + "/" + str(prefix_bit_len))
@@ -324,8 +379,11 @@ def read_open_message(bgp_socket):
     reported_length = get_short_int_from_message(msg_in)
     if len(msg_in) != reported_length:
         error_msg = (
-            "Expected message length (" + reported_length +
-            ") does not match actual length (" + str(len(msg_in)) + ")"
+            "Expected message length ("
+            + reported_length
+            + ") does not match actual length ("
+            + str(len(msg_in))
+            + ")"
         )
         logger.error(error_msg + binascii.hexlify(msg_in))
         raise MessageError(error_msg, msg_in)
@@ -393,34 +451,37 @@ class MessageGenerator(object):
         if self.bgpls:
             self.prefix_count_to_add_default = 1
             self.prefix_count_to_del_default = 0
-        self.ls_nlri_default = {"Identifier": args.lsid,
-                                "TunnelID": args.lstid,
-                                "LSPID": args.lspid,
-                                "IPv4TunnelSenderAddress": args.lstsaddr,
-                                "IPv4TunnelEndPointAddress": args.lsteaddr}
+        self.ls_nlri_default = {
+            "Identifier": args.lsid,
+            "TunnelID": args.lstid,
+            "LSPID": args.lspid,
+            "IPv4TunnelSenderAddress": args.lstsaddr,
+            "IPv4TunnelEndPointAddress": args.lsteaddr,
+        }
         self.lsid_step = args.lsidstep
         self.lstid_step = args.lstidstep
         self.lspid_step = args.lspidstep
         self.lstsaddr_step = args.lstsaddrstep
         self.lsteaddr_step = args.lsteaddrstep
         # Default values used for randomized part
-        s1_slots = ((self.total_prefix_amount -
-                     self.remaining_prefixes_threshold - 1) /
-                    self.prefix_count_to_add_default + 1)
-        s2_slots = (
-            (self.remaining_prefixes_threshold - 1)
-            / (self.prefix_count_to_add_default - self.prefix_count_to_del_default)
-            + 1
-        )
+        s1_slots = (
+            self.total_prefix_amount - self.remaining_prefixes_threshold - 1
+        ) / self.prefix_count_to_add_default + 1
+        s2_slots = (self.remaining_prefixes_threshold - 1) / (
+            self.prefix_count_to_add_default - self.prefix_count_to_del_default
+        ) + 1
         # S1_First_Index = 0
         # S1_Last_Index = s1_slots * self.prefix_count_to_add_default - 1
         s2_first_index = s1_slots * self.prefix_count_to_add_default
-        s2_last_index = (s2_first_index +
-                         s2_slots * (self.prefix_count_to_add_default -
-                                     self.prefix_count_to_del_default) - 1)
-        self.slot_gap_default = ((self.total_prefix_amount -
-                                  self.remaining_prefixes_threshold - 1) /
-                                 self.prefix_count_to_add_default + 1)
+        s2_last_index = (
+            s2_first_index
+            + s2_slots
+            * (self.prefix_count_to_add_default - self.prefix_count_to_del_default)
+            - 1
+        )
+        self.slot_gap_default = (
+            self.total_prefix_amount - self.remaining_prefixes_threshold - 1
+        ) / self.prefix_count_to_add_default + 1
         self.randomize_lowest_default = s2_first_index
         self.randomize_highest_default = s2_last_index
         # Initialising counters
@@ -445,39 +506,62 @@ class MessageGenerator(object):
         """
 
         logger.info("Generator initialisation")
-        logger.info("  Target total number of prefixes to be introduced: " +
-                    str(self.total_prefix_amount))
-        logger.info("  Prefix base: " + str(self.prefix_base_default) + "/" +
-                    str(self.prefix_length_default))
-        logger.info("  My Autonomous System number: " +
-                    str(self.my_autonomous_system_default))
+        logger.info(
+            "  Target total number of prefixes to be introduced: "
+            + str(self.total_prefix_amount)
+        )
+        logger.info(
+            "  Prefix base: "
+            + str(self.prefix_base_default)
+            + "/"
+            + str(self.prefix_length_default)
+        )
+        logger.info(
+            "  My Autonomous System number: " + str(self.my_autonomous_system_default)
+        )
         logger.info("  My Hold Time: " + str(self.hold_time_default))
         logger.info("  My BGP Identifier: " + str(self.bgp_identifier_default))
         logger.info("  Next Hop: " + str(self.next_hop_default))
         logger.info("  Originator ID: " + str(self.originator_id_default))
         logger.info("  Cluster list: " + str(self.cluster_list_item_default))
-        logger.info("  Prefix count to be inserted at once: " +
-                    str(self.prefix_count_to_add_default))
-        logger.info("  Prefix count to be withdrawn at once: " +
-                    str(self.prefix_count_to_del_default))
-        logger.info("  Fast pre-fill up to " +
-                    str(self.total_prefix_amount -
-                        self.remaining_prefixes_threshold) + " prefixes")
-        logger.info("  Remaining number of prefixes to be processed " +
-                    "in parallel with withdrawals: " +
-                    str(self.remaining_prefixes_threshold))
-        logger.debug("  Prefix index range used after pre-fill procedure [" +
-                     str(self.randomize_lowest_default) + ", " +
-                     str(self.randomize_highest_default) + "]")
+        logger.info(
+            "  Prefix count to be inserted at once: "
+            + str(self.prefix_count_to_add_default)
+        )
+        logger.info(
+            "  Prefix count to be withdrawn at once: "
+            + str(self.prefix_count_to_del_default)
+        )
+        logger.info(
+            "  Fast pre-fill up to "
+            + str(self.total_prefix_amount - self.remaining_prefixes_threshold)
+            + " prefixes"
+        )
+        logger.info(
+            "  Remaining number of prefixes to be processed "
+            + "in parallel with withdrawals: "
+            + str(self.remaining_prefixes_threshold)
+        )
+        logger.debug(
+            "  Prefix index range used after pre-fill procedure ["
+            + str(self.randomize_lowest_default)
+            + ", "
+            + str(self.randomize_highest_default)
+            + "]"
+        )
         if self.single_update_default:
-            logger.info("  Common single UPDATE will be generated " +
-                        "for both NLRI & WITHDRAWN lists")
+            logger.info(
+                "  Common single UPDATE will be generated "
+                + "for both NLRI & WITHDRAWN lists"
+            )
         else:
-            logger.info("  Two separate UPDATEs will be generated " +
-                        "for each NLRI & WITHDRAWN lists")
+            logger.info(
+                "  Two separate UPDATEs will be generated "
+                + "for each NLRI & WITHDRAWN lists"
+            )
         if self.randomize_updates_default:
             logger.info("  Generation of UPDATE messages will be randomized")
-        logger.info("  Let\'s go ...\n")
+        logger.info("  Let's go ...\n")
 
         # TODO: Notification for hold timer expiration can be handy.
 
@@ -501,42 +585,63 @@ class MessageGenerator(object):
         # performance calculation
         if self.phase1_updates_sent >= threshold:
             totals1 = self.phase1_updates_sent
-            performance1 = int(self.phase1_updates_sent /
-                               (self.phase1_stop_time - self.phase1_start_time))
+            performance1 = int(
+                self.phase1_updates_sent
+                / (self.phase1_stop_time - self.phase1_start_time)
+            )
         else:
             totals1 = None
             performance1 = None
         if self.phase2_updates_sent >= threshold:
             totals2 = self.phase2_updates_sent
-            performance2 = int(self.phase2_updates_sent /
-                               (self.phase2_stop_time - self.phase2_start_time))
+            performance2 = int(
+                self.phase2_updates_sent
+                / (self.phase2_stop_time - self.phase2_start_time)
+            )
         else:
             totals2 = None
             performance2 = None
 
         logger.info("#" * 10 + " Final results " + "#" * 10)
         logger.info("Number of iterations: " + str(self.iteration))
-        logger.info("Number of UPDATE messages sent in the pre-fill phase: " +
-                    str(self.phase1_updates_sent))
-        logger.info("The pre-fill phase duration: " +
-                    str(self.phase1_stop_time - self.phase1_start_time) + "s")
-        logger.info("Number of UPDATE messages sent in the 2nd test phase: " +
-                    str(self.phase2_updates_sent))
-        logger.info("The 2nd test phase duration: " +
-                    str(self.phase2_stop_time - self.phase2_start_time) + "s")
+        logger.info(
+            "Number of UPDATE messages sent in the pre-fill phase: "
+            + str(self.phase1_updates_sent)
+        )
+        logger.info(
+            "The pre-fill phase duration: "
+            + str(self.phase1_stop_time - self.phase1_start_time)
+            + "s"
+        )
+        logger.info(
+            "Number of UPDATE messages sent in the 2nd test phase: "
+            + str(self.phase2_updates_sent)
+        )
+        logger.info(
+            "The 2nd test phase duration: "
+            + str(self.phase2_stop_time - self.phase2_start_time)
+            + "s"
+        )
         logger.info("Threshold for performance reporting: " + str(threshold))
 
         # making labels
-        phase1_label = ("pre-fill " + str(self.prefix_count_to_add_default) +
-                        " route(s) per UPDATE")
+        phase1_label = (
+            "pre-fill " + str(self.prefix_count_to_add_default) + " route(s) per UPDATE"
+        )
         if self.single_update_default:
-            phase2_label = "+" + (str(self.prefix_count_to_add_default) +
-                                  "/-" + str(self.prefix_count_to_del_default) +
-                                  " routes per UPDATE")
+            phase2_label = "+" + (
+                str(self.prefix_count_to_add_default)
+                + "/-"
+                + str(self.prefix_count_to_del_default)
+                + " routes per UPDATE"
+            )
         else:
-            phase2_label = "+" + (str(self.prefix_count_to_add_default) +
-                                  "/-" + str(self.prefix_count_to_del_default) +
-                                  " routes in two UPDATEs")
+            phase2_label = "+" + (
+                str(self.prefix_count_to_add_default)
+                + "/-"
+                + str(self.prefix_count_to_del_default)
+                + " routes in two UPDATEs"
+            )
         # collecting capacity and performance results
         totals = {}
         performance = {}
@@ -568,8 +673,9 @@ class MessageGenerator(object):
             second_line = second_line[:-2]
             f.write(first_line + "\n")
             f.write(second_line + "\n")
-            logger.info("Message generator performance results stored in " +
-                        file_name + ":")
+            logger.info(
+                "Message generator performance results stored in " + file_name + ":"
+            )
             logger.info("  " + first_line)
             logger.info("  " + second_line)
         finally:
@@ -615,18 +721,33 @@ class MessageGenerator(object):
         """
         # generating list of LS NLRI parameters
         identifier = self.ls_nlri_default["Identifier"] + index / self.lsid_step
-        ipv4_tunnel_sender_address = self.ls_nlri_default["IPv4TunnelSenderAddress"] + index / self.lstsaddr_step
+        ipv4_tunnel_sender_address = (
+            self.ls_nlri_default["IPv4TunnelSenderAddress"] + index / self.lstsaddr_step
+        )
         tunnel_id = self.ls_nlri_default["TunnelID"] + index / self.lstid_step
         lsp_id = self.ls_nlri_default["LSPID"] + index / self.lspid_step
-        ipv4_tunnel_endpoint_address = self.ls_nlri_default["IPv4TunnelEndPointAddress"] + index / self.lsteaddr_step
-        ls_nlri_values = {"Identifier": identifier,
-                          "IPv4TunnelSenderAddress": ipv4_tunnel_sender_address,
-                          "TunnelID": tunnel_id, "LSPID": lsp_id,
-                          "IPv4TunnelEndPointAddress": ipv4_tunnel_endpoint_address}
+        ipv4_tunnel_endpoint_address = (
+            self.ls_nlri_default["IPv4TunnelEndPointAddress"]
+            + index / self.lsteaddr_step
+        )
+        ls_nlri_values = {
+            "Identifier": identifier,
+            "IPv4TunnelSenderAddress": ipv4_tunnel_sender_address,
+            "TunnelID": tunnel_id,
+            "LSPID": lsp_id,
+            "IPv4TunnelEndPointAddress": ipv4_tunnel_endpoint_address,
+        }
         return ls_nlri_values
 
-    def get_prefix_list(self, slot_index, slot_size=None, prefix_base=None,
-                        prefix_len=None, prefix_count=None, randomize=None):
+    def get_prefix_list(
+        self,
+        slot_index,
+        slot_size=None,
+        prefix_base=None,
+        prefix_len=None,
+        prefix_count=None,
+        randomize=None,
+    ):
         """Generates list of IP address prefixes.
 
         Arguments:
@@ -672,8 +793,9 @@ class MessageGenerator(object):
             logger.debug("  Prefix list: " + str(prefixes))
         return prefixes
 
-    def compose_update_message(self, prefix_count_to_add=None,
-                               prefix_count_to_del=None):
+    def compose_update_message(
+        self, prefix_count_to_add=None, prefix_count_to_del=None
+    ):
         """Composes an UPDATE message
 
         Arguments:
@@ -694,17 +816,21 @@ class MessageGenerator(object):
             prefix_count_to_del = self.prefix_count_to_del_default
         # logging
         if self.log_info and not (self.iteration % 1000):
-            logger.info("Iteration: " + str(self.iteration) +
-                        " - total remaining prefixes: " +
-                        str(self.remaining_prefixes))
+            logger.info(
+                "Iteration: "
+                + str(self.iteration)
+                + " - total remaining prefixes: "
+                + str(self.remaining_prefixes)
+            )
         if self.log_debug:
-            logger.debug("#" * 10 + " Iteration: " +
-                         str(self.iteration) + " " + "#" * 10)
-            logger.debug("Remaining prefixes: " +
-                         str(self.remaining_prefixes))
+            logger.debug(
+                "#" * 10 + " Iteration: " + str(self.iteration) + " " + "#" * 10
+            )
+            logger.debug("Remaining prefixes: " + str(self.remaining_prefixes))
         # scenario type & one-shot counter
-        straightforward_scenario = (self.remaining_prefixes >
-                                    self.remaining_prefixes_threshold)
+        straightforward_scenario = (
+            self.remaining_prefixes > self.remaining_prefixes_threshold
+        )
         if straightforward_scenario:
             prefix_count_to_del = 0
             if self.log_debug:
@@ -717,9 +843,8 @@ class MessageGenerator(object):
             if not self.phase2_start_time:
                 self.phase2_start_time = time.time()
         # tailor the number of prefixes if needed
-        prefix_count_to_add = (
-            prefix_count_to_del
-            + min(prefix_count_to_add - prefix_count_to_del, self.remaining_prefixes)
+        prefix_count_to_add = prefix_count_to_del + min(
+            prefix_count_to_add - prefix_count_to_del, self.remaining_prefixes
         )
         # prefix slots selection for insertion and withdrawal
         slot_index_to_add = self.iteration
@@ -727,51 +852,59 @@ class MessageGenerator(object):
         # getting lists of prefixes for insertion in this iteration
         if self.log_debug:
             logger.debug("Prefixes to be inserted in this iteration:")
-        prefix_list_to_add = self.get_prefix_list(slot_index_to_add,
-                                                  prefix_count=prefix_count_to_add)
+        prefix_list_to_add = self.get_prefix_list(
+            slot_index_to_add, prefix_count=prefix_count_to_add
+        )
         # getting lists of prefixes for withdrawal in this iteration
         if self.log_debug:
             logger.debug("Prefixes to be withdrawn in this iteration:")
-        prefix_list_to_del = self.get_prefix_list(slot_index_to_del,
-                                                  prefix_count=prefix_count_to_del)
+        prefix_list_to_del = self.get_prefix_list(
+            slot_index_to_del, prefix_count=prefix_count_to_del
+        )
         # generating the UPDATE mesage with LS-NLRI only
         if self.bgpls:
             ls_nlri = self.get_ls_nlri_values(self.iteration)
-            msg_out = self.update_message(wr_prefixes=[], nlri_prefixes=[],
-                                          **ls_nlri)
+            msg_out = self.update_message(wr_prefixes=[], nlri_prefixes=[], **ls_nlri)
         else:
             # generating the UPDATE message with prefix lists
             if self.single_update_default:
                 # Send prefixes to be introduced and withdrawn
                 # in one UPDATE message
-                msg_out = self.update_message(wr_prefixes=prefix_list_to_del,
-                                              nlri_prefixes=prefix_list_to_add)
+                msg_out = self.update_message(
+                    wr_prefixes=prefix_list_to_del, nlri_prefixes=prefix_list_to_add
+                )
             else:
                 # Send prefixes to be introduced and withdrawn
                 # in separate UPDATE messages (if needed)
-                msg_out = self.update_message(wr_prefixes=[],
-                                              nlri_prefixes=prefix_list_to_add)
+                msg_out = self.update_message(
+                    wr_prefixes=[], nlri_prefixes=prefix_list_to_add
+                )
                 if prefix_count_to_del:
-                    msg_out += self.update_message(wr_prefixes=prefix_list_to_del,
-                                                   nlri_prefixes=[])
+                    msg_out += self.update_message(
+                        wr_prefixes=prefix_list_to_del, nlri_prefixes=[]
+                    )
         # updating counters - who knows ... maybe I am last time here ;)
         if straightforward_scenario:
             self.phase1_stop_time = time.time()
             self.phase1_updates_sent = self.updates_sent
         else:
             self.phase2_stop_time = time.time()
-            self.phase2_updates_sent = (self.updates_sent -
-                                        self.phase1_updates_sent)
+            self.phase2_updates_sent = self.updates_sent - self.phase1_updates_sent
         # updating totals for the next iteration
         self.iteration += 1
-        self.remaining_prefixes -= (prefix_count_to_add - prefix_count_to_del)
+        self.remaining_prefixes -= prefix_count_to_add - prefix_count_to_del
         # returning the encoded message
         return msg_out
 
     # Section of message encoders
 
-    def open_message(self, version=None, my_autonomous_system=None,
-                     hold_time=None, bgp_identifier=None):
+    def open_message(
+        self,
+        version=None,
+        my_autonomous_system=None,
+        hold_time=None,
+        bgp_identifier=None,
+    ):
         """Generates an OPEN Message (rfc4271#section-4.2)
 
         Arguments:
@@ -810,8 +943,7 @@ class MessageGenerator(object):
         # AS number is mappable to 2 bytes
         if my_autonomous_system < 65536:
             my_autonomous_system_2_bytes = my_autonomous_system
-        my_autonomous_system_hex_2_bytes = struct.pack(">H",
-                                                       my_autonomous_system)
+        my_autonomous_system_hex_2_bytes = struct.pack(">H", my_autonomous_system)
 
         # Hold Time
         hold_time_hex = struct.pack(">H", hold_time)
@@ -826,7 +958,7 @@ class MessageGenerator(object):
                 "\x02"  # Param type ("Capability Ad")
                 "\x06"  # Length (6 bytes)
                 "\x01"  # Capability type (NLRI Unicast),
-                        # see RFC 4760, secton 8
+                # see RFC 4760, secton 8
                 "\x04"  # Capability value length
                 "\x00\x01"  # AFI (Ipv4)
                 "\x00"  # (reserved)
@@ -851,7 +983,7 @@ class MessageGenerator(object):
                 "\x02"  # Param type ("Capability Ad")
                 "\x06"  # Length (6 bytes)
                 "\x01"  # Capability type (NLRI Unicast),
-                        # see RFC 4760, secton 8
+                # see RFC 4760, secton 8
                 "\x04"  # Capability value length
                 "\x40\x04"  # AFI (BGP-LS)
                 "\x00"  # (reserved)
@@ -953,27 +1085,27 @@ class MessageGenerator(object):
             "\x02"  # Param type ("Capability Ad")
             "\x06"  # Length (6 bytes)
             "\x41"  # "32 bit AS Numbers Support"
-                    # (see RFC 6793, section 3)
+            # (see RFC 6793, section 3)
             "\x04"  # Capability value length
         )
-        optional_parameter_hex += (
-            struct.pack(">I", my_autonomous_system)  # My AS in 32 bit format
-        )
+        optional_parameter_hex += struct.pack(
+            ">I", my_autonomous_system
+        )  # My AS in 32 bit format
         optional_parameters_hex += optional_parameter_hex
 
         if self.grace != 8:
             b = list(bin(self.grace)[2:])
             b = b + [0] * (3 - len(b))
             length = "\x08"
-            if b[1] == '1':
+            if b[1] == "1":
                 restart_flag = "\x80\x05"
             else:
                 restart_flag = "\x00\x05"
-            if b[2] == '1':
+            if b[2] == "1":
                 ipv4_flag = "\x80"
             else:
                 ipv4_flag = "\x00"
-            if b[0] == '1':
+            if b[0] == "1":
                 ll_gr = "\x47\x07\x00\x01\x01\x00\x00\x00\x1e"
                 length = "\x11"
             else:
@@ -991,70 +1123,105 @@ class MessageGenerator(object):
             # "\x47\x07\x00\x01\x01\x00\x00\x00\x1e" ipv4 ll-graceful-restart capability, timer 30sec
             # ll-gr turned on when grace is between 4-7
             optional_parameter_hex = "\x02{}\x40\x06{}\x00\x01\x01{}{}".format(
-                length, restart_flag, ipv4_flag, ll_gr)
+                length, restart_flag, ipv4_flag, ll_gr
+            )
             optional_parameters_hex += optional_parameter_hex
 
         # Optional Parameters Length
         optional_parameters_length = len(optional_parameters_hex)
-        optional_parameters_length_hex = struct.pack("B",
-                                                     optional_parameters_length)
+        optional_parameters_length_hex = struct.pack("B", optional_parameters_length)
 
         # Length (big-endian)
         length = (
-            len(marker_hex) + 2 + len(type_hex) + len(version_hex) +
-            len(my_autonomous_system_hex_2_bytes) +
-            len(hold_time_hex) + len(bgp_identifier_hex) +
-            len(optional_parameters_length_hex) +
-            len(optional_parameters_hex)
+            len(marker_hex)
+            + 2
+            + len(type_hex)
+            + len(version_hex)
+            + len(my_autonomous_system_hex_2_bytes)
+            + len(hold_time_hex)
+            + len(bgp_identifier_hex)
+            + len(optional_parameters_length_hex)
+            + len(optional_parameters_hex)
         )
         length_hex = struct.pack(">H", length)
 
         # OPEN Message
         message_hex = (
-            marker_hex +
-            length_hex +
-            type_hex +
-            version_hex +
-            my_autonomous_system_hex_2_bytes +
-            hold_time_hex +
-            bgp_identifier_hex +
-            optional_parameters_length_hex +
-            optional_parameters_hex
+            marker_hex
+            + length_hex
+            + type_hex
+            + version_hex
+            + my_autonomous_system_hex_2_bytes
+            + hold_time_hex
+            + bgp_identifier_hex
+            + optional_parameters_length_hex
+            optional_parameters_hex
         )
 
         if self.log_debug:
             logger.debug("OPEN message encoding")
             logger.debug("  Marker=0x" + binascii.hexlify(marker_hex))
-            logger.debug("  Length=" + str(length) + " (0x" +
-                         binascii.hexlify(length_hex) + ")")
-            logger.debug("  Type=" + str(type) + " (0x" +
-                         binascii.hexlify(type_hex) + ")")
-            logger.debug("  Version=" + str(version) + " (0x" +
-                         binascii.hexlify(version_hex) + ")")
-            logger.debug("  My Autonomous System=" +
-                         str(my_autonomous_system_2_bytes) + " (0x" +
-                         binascii.hexlify(my_autonomous_system_hex_2_bytes) +
-                         ")")
-            logger.debug("  Hold Time=" + str(hold_time) + " (0x" +
-                         binascii.hexlify(hold_time_hex) + ")")
-            logger.debug("  BGP Identifier=" + str(bgp_identifier) +
-                         " (0x" + binascii.hexlify(bgp_identifier_hex) + ")")
-            logger.debug("  Optional Parameters Length=" +
-                         str(optional_parameters_length) + " (0x" +
-                         binascii.hexlify(optional_parameters_length_hex) +
-                         ")")
-            logger.debug("  Optional Parameters=0x" +
-                         binascii.hexlify(optional_parameters_hex))
-            logger.debug("OPEN message encoded: 0x%s",
-                         binascii.b2a_hex(message_hex))
+            logger.debug(
+                "  Length=" + str(length) + " (0x" + binascii.hexlify(length_hex) + ")"
+            )
+            logger.debug(
+                "  Type=" + str(type) + " (0x" + binascii.hexlify(type_hex) + ")"
+            )
+            logger.debug(
+                "  Version="
+                + str(version)
+                + " (0x"
+                + binascii.hexlify(version_hex)
+                + ")"
+            )
+            logger.debug(
+                "  My Autonomous System="
+                + str(my_autonomous_system_2_bytes)
+                + " (0x"
+                + binascii.hexlify(my_autonomous_system_hex_2_bytes)
+                + ")"
+            )
+            logger.debug(
+                "  Hold Time="
+                + str(hold_time)
+                + " (0x"
+                + binascii.hexlify(hold_time_hex)
+                + ")"
+            )
+            logger.debug(
+                "  BGP Identifier="
+                + str(bgp_identifier)
+                + " (0x"
+                + binascii.hexlify(bgp_identifier_hex)
+                + ")"
+            )
+            logger.debug(
+                "  Optional Parameters Length="
+                + str(optional_parameters_length)
+                + " (0x"
+                + binascii.hexlify(optional_parameters_length_hex)
+                + ")"
+            )
+            logger.debug(
+                "  Optional Parameters=0x" + binascii.hexlify(optional_parameters_hex)
+            )
+            logger.debug("OPEN message encoded: 0x%s", binascii.b2a_hex(message_hex))
 
         return message_hex
 
-    def update_message(self, wr_prefixes=None, nlri_prefixes=None,
-                       wr_prefix_length=None, nlri_prefix_length=None,
-                       my_autonomous_system=None, next_hop=None,
-                       originator_id=None, cluster_list_item=None,
-                       end_of_rib=False, **ls_nlri_params):
+    def update_message(
+        self,
+        wr_prefixes=None,
+        nlri_prefixes=None,
+        wr_prefix_length=None,
+        nlri_prefix_length=None,
+        my_autonomous_system=None,
+        next_hop=None,
+        originator_id=None,
+        cluster_list_item=None,
+        end_of_rib=False,
+        **ls_nlri_params
+    ):
         """Generates an UPDATE Message (rfc4271#section-4.3)
 
         Arguments:
@@ -1101,8 +1268,10 @@ class MessageGenerator(object):
         if not self.bgpls:
             bytes = ((wr_prefix_length - 1) / 8) + 1
             for prefix in wr_prefixes:
-                withdrawn_route_hex = (struct.pack("B", wr_prefix_length) +
-                                       struct.pack(">I", int(prefix))[:bytes])
+                withdrawn_route_hex = (
+                    struct.pack("B", wr_prefix_length)
+                    + struct.pack(">I", int(prefix))[:bytes]
+                )
                 withdrawn_routes_hex += withdrawn_route_hex
 
         # Withdrawn Routes Length
@@ -1141,22 +1310,20 @@ class MessageGenerator(object):
                 "\x04"  # Length (4)
             )
             next_hop_hex = struct.pack(">I", int(next_hop))
-            path_attributes_hex += (
-                next_hop_hex  # IP address of the next hop (4 bytes)
-            )
+            path_attributes_hex += next_hop_hex  # IP address of the next hop (4 bytes)
             if originator_id is not None:
                 path_attributes_hex += (
                     "\x80"  # Flags ("Optional, non-transitive")
                     "\x09"  # Type (ORIGINATOR_ID)
                     "\x04"  # Length (4)
-                )           # ORIGINATOR_ID (4 bytes)
+                )  # ORIGINATOR_ID (4 bytes)
                 path_attributes_hex += struct.pack(">I", int(originator_id))
             if cluster_list_item is not None:
                 path_attributes_hex += (
                     "\x80"  # Flags ("Optional, non-transitive")
                     "\x0a"  # Type (CLUSTER_LIST)
                     "\x04"  # Length (4)
-                )           # one CLUSTER_LIST item (4 bytes)
+                )  # one CLUSTER_LIST item (4 bytes)
                 path_attributes_hex += struct.pack(">I", int(cluster_list_item))
 
         if self.bgpls and not end_of_rib:
@@ -1169,73 +1336,106 @@ class MessageGenerator(object):
                 "\x04"  # Next Hop Length (4)
             )
             path_attributes_hex += struct.pack(">I", int(next_hop))
-            path_attributes_hex += "\x00"           # Reserved
+            path_attributes_hex += "\x00"  # Reserved
             path_attributes_hex += (
                 "\x00\x05"  # LS-NLRI.NLRIType (IPv4 TE LSP NLRI)
                 "\x00\x15"  # LS-NLRI.TotalNLRILength (21)
-                "\x07"      # LS-NLRI.Variable.ProtocolID (RSVP-TE)
+                "\x07"  # LS-NLRI.Variable.ProtocolID (RSVP-TE)
             )
             path_attributes_hex += struct.pack(">Q", int(ls_nlri["Identifier"]))
-            path_attributes_hex += struct.pack(">I", int(ls_nlri["IPv4TunnelSenderAddress"]))
+            path_attributes_hex += struct.pack(
+                ">I", int(ls_nlri["IPv4TunnelSenderAddress"])
+            )
             path_attributes_hex += struct.pack(">H", int(ls_nlri["TunnelID"]))
             path_attributes_hex += struct.pack(">H", int(ls_nlri["LSPID"]))
-            path_attributes_hex += struct.pack(">I", int(ls_nlri["IPv4TunnelEndPointAddress"]))
+            path_attributes_hex += struct.pack(
+                ">I", int(ls_nlri["IPv4TunnelEndPointAddress"])
+            )
 
         # Total Path Attributes Length
         total_path_attributes_length = len(path_attributes_hex)
-        total_path_attributes_length_hex = struct.pack(">H", total_path_attributes_length)
+        total_path_attributes_length_hex = struct.pack(
+            ">H", total_path_attributes_length
+        )
 
         # Network Layer Reachability Information
         nlri_hex = ""
         if not self.bgpls:
             bytes = ((nlri_prefix_length - 1) / 8) + 1
             for prefix in nlri_prefixes:
-                nlri_prefix_hex = (struct.pack("B", nlri_prefix_length) +
-                                   struct.pack(">I", int(prefix))[:bytes])
+                nlri_prefix_hex = (
+                    struct.pack("B", nlri_prefix_length)
+                    + struct.pack(">I", int(prefix))[:bytes]
+                )
                 nlri_hex += nlri_prefix_hex
 
         # Length (big-endian)
         length = (
-            len(marker_hex) + 2 + len(type_hex) +
-            len(withdrawn_routes_length_hex) + len(withdrawn_routes_hex) +
-            len(total_path_attributes_length_hex) + len(path_attributes_hex) +
-            len(nlri_hex))
+            len(marker_hex)
+            + 2
+            + len(type_hex)
+            + len(withdrawn_routes_length_hex)
+            + len(withdrawn_routes_hex)
+            + len(total_path_attributes_length_hex)
+            + len(path_attributes_hex)
+            + len(nlri_hex)
+        )
         length_hex = struct.pack(">H", length)
 
         # UPDATE Message
         message_hex = (
-            marker_hex +
-            length_hex +
-            type_hex +
-            withdrawn_routes_length_hex +
-            withdrawn_routes_hex +
-            total_path_attributes_length_hex +
-            path_attributes_hex +
-            nlri_hex
+            marker_hex
+            + length_hex
+            + type_hex
+            + withdrawn_routes_length_hex
+            + withdrawn_routes_hex
+            + total_path_attributes_length_hex
+            + path_attributes_hex
+            nlri_hex
         )
 
         if self.grace != 8 and self.grace != 0 and end_of_rib:
-            message_hex = (marker_hex + binascii.unhexlify("00170200000000"))
+            message_hex = marker_hex + binascii.unhexlify("00170200000000")
 
         if self.log_debug:
             logger.debug("UPDATE message encoding")
             logger.debug("  Marker=0x" + binascii.hexlify(marker_hex))
-            logger.debug("  Length=" + str(length) + " (0x" +
-                         binascii.hexlify(length_hex) + ")")
-            logger.debug("  Type=" + str(type) + " (0x" +
-                         binascii.hexlify(type_hex) + ")")
-            logger.debug("  withdrawn_routes_length=" +
-                         str(withdrawn_routes_length) + " (0x" +
-                         binascii.hexlify(withdrawn_routes_length_hex) + ")")
-            logger.debug("  Withdrawn_Routes=" + str(wr_prefixes) + "/" +
-                         str(wr_prefix_length) + " (0x" +
-                         binascii.hexlify(withdrawn_routes_hex) + ")")
+            logger.debug(
+                "  Length=" + str(length) + " (0x" + binascii.hexlify(length_hex) + ")"
+            )
+            logger.debug(
+                "  Type=" + str(type) + " (0x" + binascii.hexlify(type_hex) + ")"
+            )
+            logger.debug(
+                "  withdrawn_routes_length="
+                + str(withdrawn_routes_length)
+                + " (0x"
+                + binascii.hexlify(withdrawn_routes_length_hex)
+                + ")"
+            )
+            logger.debug(
+                "  Withdrawn_Routes="
+                + str(wr_prefixes)
+                + "/"
+                + str(wr_prefix_length)
+                + " (0x"
+                + binascii.hexlify(withdrawn_routes_hex)
+                + ")"
+            )
             if total_path_attributes_length:
-                logger.debug("  Total Path Attributes Length=" +
-                             str(total_path_attributes_length) + " (0x" +
-                             binascii.hexlify(total_path_attributes_length_hex) + ")")
-                logger.debug("  Path Attributes=" + "(0x" +
-                             binascii.hexlify(path_attributes_hex) + ")")
+                logger.debug(
+                    "  Total Path Attributes Length="
+                    + str(total_path_attributes_length)
+                    + " (0x"
+                    + binascii.hexlify(total_path_attributes_length_hex)
+                    + ")"
+                )
+                logger.debug(
+                    "  Path Attributes="
+                    + "(0x"
+                    + binascii.hexlify(path_attributes_hex)
+                    + ")"
+                )
                 logger.debug("    Origin=IGP")
                 logger.debug("    AS path=" + str(my_autonomous_system))
                 logger.debug("    Next hop=" + str(next_hop))
@@ -1245,11 +1445,16 @@ class MessageGenerator(object):
                     logger.debug("    Cluster list=" + str(cluster_list_item))
                 if self.bgpls:
                     logger.debug("    MP_REACH_NLRI: %s", ls_nlri)
-            logger.debug("  Network Layer Reachability Information=" +
-                         str(nlri_prefixes) + "/" + str(nlri_prefix_length) +
-                         " (0x" + binascii.hexlify(nlri_hex) + ")")
-            logger.debug("UPDATE message encoded: 0x" +
-                         binascii.b2a_hex(message_hex))
+            logger.debug(
+                "  Network Layer Reachability Information="
+                + str(nlri_prefixes)
+                + "/"
+                + str(nlri_prefix_length)
+                + " (0x"
+                + binascii.hexlify(nlri_hex)
+                + ")"
+            )
+            logger.debug("UPDATE message encoded: 0x" + binascii.b2a_hex(message_hex))
 
         # updating counter
         self.updates_sent += 1
@@ -1281,34 +1486,53 @@ class MessageGenerator(object):
         error_subcode_hex = struct.pack("B", error_subcode)
 
         # Length (big-endian)
-        length = (len(marker_hex) + 2 + len(type_hex) + len(error_code_hex) +
-                  len(error_subcode_hex) + len(data_hex))
+        length = (
+            len(marker_hex)
+            + 2
+            + len(type_hex)
+            + len(error_code_hex)
+            + len(error_subcode_hex)
+            + len(data_hex)
+        )
         length_hex = struct.pack(">H", length)
 
         # NOTIFICATION Message
         message_hex = (
-            marker_hex +
-            length_hex +
-            type_hex +
-            error_code_hex +
-            error_subcode_hex +
-            data_hex
+            marker_hex
+            + length_hex
+            + type_hex
+            + error_code_hex
+            + error_subcode_hex
+            data_hex
         )
 
         if self.log_debug:
             logger.debug("NOTIFICATION message encoding")
             logger.debug("  Marker=0x" + binascii.hexlify(marker_hex))
-            logger.debug("  Length=" + str(length) + " (0x" +
-                         binascii.hexlify(length_hex) + ")")
-            logger.debug("  Type=" + str(type) + " (0x" +
-                         binascii.hexlify(type_hex) + ")")
-            logger.debug("  Error Code=" + str(error_code) + " (0x" +
-                         binascii.hexlify(error_code_hex) + ")")
-            logger.debug("  Error Subode=" + str(error_subcode) + " (0x" +
-                         binascii.hexlify(error_subcode_hex) + ")")
+            logger.debug(
+                "  Length=" + str(length) + " (0x" + binascii.hexlify(length_hex) + ")"
+            )
+            logger.debug(
+                "  Type=" + str(type) + " (0x" + binascii.hexlify(type_hex) + ")"
+            )
+            logger.debug(
+                "  Error Code="
+                + str(error_code)
+                + " (0x"
+                + binascii.hexlify(error_code_hex)
+                + ")"
+            )
+            logger.debug(
+                "  Error Subode="
+                + str(error_subcode)
+                + " (0x"
+                + binascii.hexlify(error_subcode_hex)
+                + ")"
+            )
             logger.debug("  Data=" + " (0x" + binascii.hexlify(data_hex) + ")")
-            logger.debug("NOTIFICATION message encoded: 0x%s",
-                         binascii.b2a_hex(message_hex))
+            logger.debug(
+                "NOTIFICATION message encoded: 0x%s", binascii.b2a_hex(message_hex)
+            )
 
         return message_hex
 
@@ -1331,21 +1555,20 @@ class MessageGenerator(object):
         length_hex = struct.pack(">H", length)
 
         # KEEP ALIVE Message
-        message_hex = (
-            marker_hex +
-            length_hex +
-            type_hex
-        )
+        message_hex = marker_hex + length_hex + type_hex
 
         if self.log_debug:
             logger.debug("KEEP ALIVE message encoding")
             logger.debug("  Marker=0x" + binascii.hexlify(marker_hex))
-            logger.debug("  Length=" + str(length) + " (0x" +
-                         binascii.hexlify(length_hex) + ")")
-            logger.debug("  Type=" + str(type) + " (0x" +
-                         binascii.hexlify(type_hex) + ")")
-            logger.debug("KEEP ALIVE message encoded: 0x%s",
-                         binascii.b2a_hex(message_hex))
+            logger.debug(
+                "  Length=" + str(length) + " (0x" + binascii.hexlify(length_hex) + ")"
+            )
+            logger.debug(
+                "  Type=" + str(type) + " (0x" + binascii.hexlify(type_hex) + ")"
+            )
+            logger.debug(
+                "KEEP ALIVE message encoded: 0x%s", binascii.b2a_hex(message_hex)
+            )
 
         return message_hex
 
@@ -1440,9 +1663,21 @@ class ReadTracker(object):
     for idle waiting.
     """
 
-    def __init__(self, bgp_socket, timer, storage, evpn=False, mvpn=False,
-                 l3vpn_mcast=False, allf=False, l3vpn=False, rt_constrain=False,
-                 ipv6=False, grace=8, wait_for_read=10):
+    def __init__(
+        self,
+        bgp_socket,
+        timer,
+        storage,
+        evpn=False,
+        mvpn=False,
+        l3vpn_mcast=False,
+        allf=False,
+        l3vpn=False,
+        rt_constrain=False,
+        ipv6=False,
+        grace=8,
+        wait_for_read=10,
+    ):
         """The reader initialisation.
 
         Arguments:
@@ -1505,8 +1740,9 @@ class ReadTracker(object):
                 # The logical block was a BGP header.
                 # Now we know the size of the message.
                 self.reading_header = False
-                self.bytes_to_read = (get_short_int_from_message(self.msg_in) -
-                                      self.header_length)
+                self.bytes_to_read = (
+                    get_short_int_from_message(self.msg_in) - self.header_length
+                )
             else:  # We have finished reading the body of the message.
                 # Peer has just proven it is still alive.
                 self.timer.reset_peer_hold_time()
@@ -1517,21 +1753,29 @@ class ReadTracker(object):
                 # Prepare state for reading another message.
                 message_type_hex = self.msg_in[self.header_length]
                 if message_type_hex == "\x01":
-                    logger.info("OPEN message received: 0x%s",
-                                binascii.b2a_hex(self.msg_in))
+                    logger.info(
+                        "OPEN message received: 0x%s", binascii.b2a_hex(self.msg_in)
+                    )
                 elif message_type_hex == "\x02":
-                    logger.debug("UPDATE message received: 0x%s",
-                                 binascii.b2a_hex(self.msg_in))
+                    logger.debug(
+                        "UPDATE message received: 0x%s", binascii.b2a_hex(self.msg_in)
+                    )
                     self.decode_update_message(self.msg_in)
                 elif message_type_hex == "\x03":
-                    logger.info("NOTIFICATION message received: 0x%s",
-                                binascii.b2a_hex(self.msg_in))
+                    logger.info(
+                        "NOTIFICATION message received: 0x%s",
+                        binascii.b2a_hex(self.msg_in),
+                    )
                 elif message_type_hex == "\x04":
-                    logger.info("KEEP ALIVE message received: 0x%s",
-                                binascii.b2a_hex(self.msg_in))
+                    logger.info(
+                        "KEEP ALIVE message received: 0x%s",
+                        binascii.b2a_hex(self.msg_in),
+                    )
                 else:
-                    logger.warning("Unexpected message received: 0x%s",
-                                   binascii.b2a_hex(self.msg_in))
+                    logger.warning(
+                        "Unexpected message received: 0x%s",
+                        binascii.b2a_hex(self.msg_in),
+                    )
                 self.msg_in = ""
                 self.reading_header = True
                 self.bytes_to_read = self.header_length
@@ -1552,9 +1796,9 @@ class ReadTracker(object):
         while len(hex_to_decode):
             attr_flags_hex = hex_to_decode[0]
             attr_flags = int(binascii.b2a_hex(attr_flags_hex), 16)
-#            attr_optional_bit = attr_flags & 128
-#            attr_transitive_bit = attr_flags & 64
-#            attr_partial_bit = attr_flags & 32
+            #            attr_optional_bit = attr_flags & 128
+            #            attr_transitive_bit = attr_flags & 64
+            #            attr_partial_bit = attr_flags & 32
             attr_extended_length_bit = attr_flags & 16
 
             attr_type_code_hex = hex_to_decode[1]
@@ -1563,103 +1807,146 @@ class ReadTracker(object):
             if attr_extended_length_bit:
                 attr_length_hex = hex_to_decode[2:4]
                 attr_length = int(binascii.b2a_hex(attr_length_hex), 16)
-                attr_value_hex = hex_to_decode[4:4 + attr_length]
-                hex_to_decode = hex_to_decode[4 + attr_length:]
+                attr_value_hex = hex_to_decode[4 : 4 + attr_length]
+                hex_to_decode = hex_to_decode[4 + attr_length :]
             else:
                 attr_length_hex = hex_to_decode[2]
                 attr_length = int(binascii.b2a_hex(attr_length_hex), 16)
-                attr_value_hex = hex_to_decode[3:3 + attr_length]
-                hex_to_decode = hex_to_decode[3 + attr_length:]
+                attr_value_hex = hex_to_decode[3 : 3 + attr_length]
+                hex_to_decode = hex_to_decode[3 + attr_length :]
 
             if attr_type_code == 1:
-                logger.debug("Attribute type=1 (ORIGIN, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=1 (ORIGIN, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
             elif attr_type_code == 2:
-                logger.debug("Attribute type=2 (AS_PATH, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=2 (AS_PATH, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
             elif attr_type_code == 3:
-                logger.debug("Attribute type=3 (NEXT_HOP, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=3 (NEXT_HOP, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
             elif attr_type_code == 4:
-                logger.debug("Attribute type=4 (MULTI_EXIT_DISC, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=4 (MULTI_EXIT_DISC, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
             elif attr_type_code == 5:
-                logger.debug("Attribute type=5 (LOCAL_PREF, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=5 (LOCAL_PREF, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
             elif attr_type_code == 6:
-                logger.debug("Attribute type=6 (ATOMIC_AGGREGATE, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=6 (ATOMIC_AGGREGATE, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
             elif attr_type_code == 7:
-                logger.debug("Attribute type=7 (AGGREGATOR, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=7 (AGGREGATOR, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
             elif attr_type_code == 9:  # rfc4456#section-8
-                logger.debug("Attribute type=9 (ORIGINATOR_ID, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=9 (ORIGINATOR_ID, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
             elif attr_type_code == 10:  # rfc4456#section-8
-                logger.debug("Attribute type=10 (CLUSTER_LIST, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=10 (CLUSTER_LIST, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
             elif attr_type_code == 14:  # rfc4760#section-3
-                logger.debug("Attribute type=14 (MP_REACH_NLRI, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=14 (MP_REACH_NLRI, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
                 address_family_identifier_hex = attr_value_hex[0:2]
-                logger.debug("  Address Family Identifier=0x%s",
-                             binascii.b2a_hex(address_family_identifier_hex))
+                logger.debug(
+                    "  Address Family Identifier=0x%s",
+                    binascii.b2a_hex(address_family_identifier_hex),
+                )
                 subsequent_address_family_identifier_hex = attr_value_hex[2]
-                logger.debug("  Subsequent Address Family Identifier=0x%s",
-                             binascii.b2a_hex(subsequent_address_family_identifier_hex))
+                logger.debug(
+                    "  Subsequent Address Family Identifier=0x%s",
+                    binascii.b2a_hex(subsequent_address_family_identifier_hex),
+                )
                 next_hop_netaddr_len_hex = attr_value_hex[3]
-                next_hop_netaddr_len = int(binascii.b2a_hex(next_hop_netaddr_len_hex), 16)
-                logger.debug("  Length of Next Hop Network Address=%s (0x%s)",
-                             next_hop_netaddr_len,
-                             binascii.b2a_hex(next_hop_netaddr_len_hex))
-                next_hop_netaddr_hex = attr_value_hex[4:4 + next_hop_netaddr_len]
-                next_hop_netaddr = ".".join(str(i) for i in struct.unpack("BBBB", next_hop_netaddr_hex))
-                logger.debug("  Network Address of Next Hop=%s (0x%s)",
-                             next_hop_netaddr, binascii.b2a_hex(next_hop_netaddr_hex))
+                next_hop_netaddr_len = int(
+                    binascii.b2a_hex(next_hop_netaddr_len_hex), 16
+                )
+                logger.debug(
+                    "  Length of Next Hop Network Address=%s (0x%s)",
+                    next_hop_netaddr_len,
+                    binascii.b2a_hex(next_hop_netaddr_len_hex),
+                )
+                next_hop_netaddr_hex = attr_value_hex[4 : 4 + next_hop_netaddr_len]
+                next_hop_netaddr = ".".join(
+                    str(i) for i in struct.unpack("BBBB", next_hop_netaddr_hex)
+                )
+                logger.debug(
+                    "  Network Address of Next Hop=%s (0x%s)",
+                    next_hop_netaddr,
+                    binascii.b2a_hex(next_hop_netaddr_hex),
+                )
                 reserved_hex = attr_value_hex[4 + next_hop_netaddr_len]
-                logger.debug("  Reserved=0x%s",
-                             binascii.b2a_hex(reserved_hex))
-                nlri_hex = attr_value_hex[4 + next_hop_netaddr_len + 1:]
-                logger.debug("  Network Layer Reachability Information=0x%s",
-                             binascii.b2a_hex(nlri_hex))
+                logger.debug("  Reserved=0x%s", binascii.b2a_hex(reserved_hex))
+                nlri_hex = attr_value_hex[4 + next_hop_netaddr_len + 1 :]
+                logger.debug(
+                    "  Network Layer Reachability Information=0x%s",
+                    binascii.b2a_hex(nlri_hex),
+                )
                 nlri_prefix_list = get_prefix_list_from_hex(nlri_hex)
                 logger.debug("  NLRI prefix list: %s", nlri_prefix_list)
                 for prefix in nlri_prefix_list:
                     logger.debug("  nlri_prefix_received: %s", prefix)
                 self.prefixes_introduced += len(nlri_prefix_list)  # update counter
             elif attr_type_code == 15:  # rfc4760#section-4
-                logger.debug("Attribute type=15 (MP_UNREACH_NLRI, flags:0x%s)",
-                             binascii.b2a_hex(attr_flags_hex))
+                logger.debug(
+                    "Attribute type=15 (MP_UNREACH_NLRI, flags:0x%s)",
+                    binascii.b2a_hex(attr_flags_hex),
+                )
                 logger.debug("Attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
                 address_family_identifier_hex = attr_value_hex[0:2]
-                logger.debug("  Address Family Identifier=0x%s",
-                             binascii.b2a_hex(address_family_identifier_hex))
+                logger.debug(
+                    "  Address Family Identifier=0x%s",
+                    binascii.b2a_hex(address_family_identifier_hex),
+                )
                 subsequent_address_family_identifier_hex = attr_value_hex[2]
-                logger.debug("  Subsequent Address Family Identifier=0x%s",
-                             binascii.b2a_hex(subsequent_address_family_identifier_hex))
+                logger.debug(
+                    "  Subsequent Address Family Identifier=0x%s",
+                    binascii.b2a_hex(subsequent_address_family_identifier_hex),
+                )
                 wd_hex = attr_value_hex[3:]
-                logger.debug("  Withdrawn Routes=0x%s",
-                             binascii.b2a_hex(wd_hex))
+                logger.debug("  Withdrawn Routes=0x%s", binascii.b2a_hex(wd_hex))
                 wdr_prefix_list = get_prefix_list_from_hex(wd_hex)
-                logger.debug("  Withdrawn routes prefix list: %s",
-                             wdr_prefix_list)
+                logger.debug("  Withdrawn routes prefix list: %s", wdr_prefix_list)
                 for prefix in wdr_prefix_list:
                     logger.debug("  withdrawn_prefix_received: %s", prefix)
                 self.prefixes_withdrawn += len(wdr_prefix_list)  # update counter
             else:
-                logger.debug("Unknown attribute type=%s, flags:0x%s)", attr_type_code,
-                             binascii.b2a_hex(attr_flags_hex))
-                logger.debug("Unknown attribute value=0x%s", binascii.b2a_hex(attr_value_hex))
+                logger.debug(
+                    "Unknown attribute type=%s, flags:0x%s)",
+                    attr_type_code,
+                    binascii.b2a_hex(attr_flags_hex),
+                )
+                logger.debug(
+                    "Unknown attribute value=0x%s", binascii.b2a_hex(attr_value_hex)
+                )
         return None
 
     def decode_update_message(self, msg):
@@ -1673,20 +1960,20 @@ class ReadTracker(object):
         logger.debug("Decoding update message:")
         # message header - marker
         marker_hex = msg[:16]
-        logger.debug("Message header marker: 0x%s",
-                     binascii.b2a_hex(marker_hex))
+        logger.debug("Message header marker: 0x%s", binascii.b2a_hex(marker_hex))
         # message header - message length
         msg_length_hex = msg[16:18]
         msg_length = int(binascii.b2a_hex(msg_length_hex), 16)
-        logger.debug("Message lenght: 0x%s (%s)",
-                     binascii.b2a_hex(msg_length_hex), msg_length)
+        logger.debug(
+            "Message lenght: 0x%s (%s)", binascii.b2a_hex(msg_length_hex), msg_length
+        )
         # message header - message type
         msg_type_hex = msg[18:19]
         msg_type = int(binascii.b2a_hex(msg_type_hex), 16)
 
         with self.storage as stor:
             # this will replace the previously stored message
-            stor['update'] = binascii.hexlify(msg)
+            stor["update"] = binascii.hexlify(msg)
 
         logger.debug("Evpn {}".format(self.evpn))
         if self.evpn:
@@ -1695,7 +1982,9 @@ class ReadTracker(object):
 
         logger.debug("Graceful-restart {}".format(self.grace))
         if self.grace != 8:
-            logger.debug("Skipping update decoding due to graceful-restart data expected")
+            logger.debug(
+                "Skipping update decoding due to graceful-restart data expected"
+            )
             return
 
         logger.debug("Mvpn {}".format(self.mvpn))
@@ -1715,7 +2004,9 @@ class ReadTracker(object):
 
         logger.debug("Route-Target-Constrain {}".format(self.rt_constrain))
         if self.rt_constrain:
-            logger.debug("Skipping update decoding due to Route-Target-Constrain data expected")
+            logger.debug(
+                "Skipping update decoding due to Route-Target-Constrain data expected"
+            )
             return
 
         logger.debug("Ipv6-Unicast {}".format(self.ipv6))
@@ -1729,31 +2020,36 @@ class ReadTracker(object):
             return
 
         if msg_type == 2:
-            logger.debug("Message type: 0x%s (update)",
-                         binascii.b2a_hex(msg_type_hex))
+            logger.debug("Message type: 0x%s (update)", binascii.b2a_hex(msg_type_hex))
             # withdrawn routes length
             wdr_length_hex = msg[19:21]
             wdr_length = int(binascii.b2a_hex(wdr_length_hex), 16)
-            logger.debug("Withdrawn routes lenght: 0x%s (%s)",
-                         binascii.b2a_hex(wdr_length_hex), wdr_length)
+            logger.debug(
+                "Withdrawn routes lenght: 0x%s (%s)",
+                binascii.b2a_hex(wdr_length_hex),
+                wdr_length,
+            )
             # withdrawn routes
-            wdr_hex = msg[21:21 + wdr_length]
-            logger.debug("Withdrawn routes: 0x%s",
-                         binascii.b2a_hex(wdr_hex))
+            wdr_hex = msg[21 : 21 + wdr_length]
+            logger.debug("Withdrawn routes: 0x%s", binascii.b2a_hex(wdr_hex))
             wdr_prefix_list = get_prefix_list_from_hex(wdr_hex)
-            logger.debug("Withdrawn routes prefix list: %s",
-                         wdr_prefix_list)
+            logger.debug("Withdrawn routes prefix list: %s", wdr_prefix_list)
             for prefix in wdr_prefix_list:
                 logger.debug("withdrawn_prefix_received: %s", prefix)
             # total path attribute length
             total_pa_length_offset = 21 + wdr_length
-            total_pa_length_hex = msg[total_pa_length_offset:total_pa_length_offset + 2]
+            total_pa_length_hex = msg[
+                total_pa_length_offset : total_pa_length_offset + 2
+            ]
             total_pa_length = int(binascii.b2a_hex(total_pa_length_hex), 16)
-            logger.debug("Total path attribute lenght: 0x%s (%s)",
-                         binascii.b2a_hex(total_pa_length_hex), total_pa_length)
+            logger.debug(
+                "Total path attribute lenght: 0x%s (%s)",
+                binascii.b2a_hex(total_pa_length_hex),
+                total_pa_length,
+            )
             # path attributes
             pa_offset = total_pa_length_offset + 2
-            pa_hex = msg[pa_offset:pa_offset + total_pa_length]
+            pa_hex = msg[pa_offset : pa_offset + total_pa_length]
             logger.debug("Path attributes: 0x%s", binascii.b2a_hex(pa_hex))
             self.decode_path_attributes(pa_hex)
             # network layer reachability information length
@@ -1761,7 +2057,7 @@ class ReadTracker(object):
             logger.debug("Calculated NLRI length: %s", nlri_length)
             # network layer reachability information
             nlri_offset = pa_offset + total_pa_length
-            nlri_hex = msg[nlri_offset:nlri_offset + nlri_length]
+            nlri_hex = msg[nlri_offset : nlri_offset + nlri_length]
             logger.debug("NLRI: 0x%s", binascii.b2a_hex(nlri_hex))
             nlri_prefix_list = get_prefix_list_from_hex(nlri_hex)
             logger.debug("NLRI prefix list: %s", nlri_prefix_list)
@@ -1772,8 +2068,11 @@ class ReadTracker(object):
             self.prefixes_introduced += len(nlri_prefix_list)
             self.prefixes_withdrawn += len(wdr_prefix_list)
         else:
-            logger.error("Unexpeced message type 0x%s in 0x%s",
-                         binascii.b2a_hex(msg_type_hex), binascii.b2a_hex(msg))
+            logger.error(
+                "Unexpeced message type 0x%s in 0x%s",
+                binascii.b2a_hex(msg_type_hex),
+                binascii.b2a_hex(msg),
+            )
 
     def wait_for_read(self):
         """Read message until timeout (next expected event).
@@ -1798,12 +2097,15 @@ class ReadTracker(object):
         if not self.rx_activity_detected or not (self.updates_received % 100):
             # right time to write statistics to the log (not for every update and
             # not too frequently to avoid having large log files)
-            logger.info("total_received_update_message_counter: %s",
-                        self.updates_received)
-            logger.info("total_received_nlri_prefix_counter: %s",
-                        self.prefixes_introduced)
-            logger.info("total_received_withdrawn_prefix_counter: %s",
-                        self.prefixes_withdrawn)
+            logger.info(
+                "total_received_update_message_counter: %s", self.updates_received
+            )
+            logger.info(
+                "total_received_nlri_prefix_counter: %s", self.prefixes_introduced
+            )
+            logger.info(
+                "total_received_withdrawn_prefix_counter: %s", self.prefixes_withdrawn
+            )
 
         start_time = time.time()
         select.select([self.socket], [], [self.socket], wait_timedelta)
@@ -1891,10 +2193,20 @@ class StateTracker(object):
         self.generator = generator
         self.timer = timer
         # Sub-trackers.
-        self.reader = ReadTracker(bgp_socket, timer, storage, evpn=cliargs.evpn, mvpn=cliargs.mvpn,
-                                  l3vpn_mcast=cliargs.l3vpn_mcast, l3vpn=cliargs.l3vpn, allf=cliargs.allf,
-                                  rt_constrain=cliargs.rt_constrain, ipv6=cliargs.ipv6, grace=cliargs.grace,
-                                  wait_for_read=cliargs.wfr)
+        self.reader = ReadTracker(
+            bgp_socket,
+            timer,
+            storage,
+            evpn=cliargs.evpn,
+            mvpn=cliargs.mvpn,
+            l3vpn_mcast=cliargs.l3vpn_mcast,
+            l3vpn=cliargs.l3vpn,
+            allf=cliargs.allf,
+            rt_constrain=cliargs.rt_constrain,
+            ipv6=cliargs.ipv6,
+            grace=cliargs.grace,
+            wait_for_read=cliargs.wfr,
+        )
         self.writer = WriteTracker(bgp_socket, generator, timer)
         # Prioritization state.
         self.prioritize_writing = False
@@ -1921,7 +2233,9 @@ class StateTracker(object):
             if self.timer.is_time_for_my_keepalive():
                 if not self.writer.sending_message:
                     # We need to schedule a keepalive ASAP.
-                    self.writer.enqueue_message_for_sending(self.generator.keepalive_message())
+                    self.writer.enqueue_message_for_sending(
+                        self.generator.keepalive_message()
+                    )
                     logger.info("KEEP ALIVE is sent.")
                 # We are sending a message now, so let's prioritize it.
                 self.prioritize_writing = True
@@ -1937,8 +2251,9 @@ class StateTracker(object):
         # which actions are available.
         # socket.socket() returns three lists,
         # we store them to list of lists.
-        list_list = select.select([self.socket], [self.socket], [self.socket],
-                                  self.timer.report_timedelta)
+        list_list = select.select(
+            [self.socket], [self.socket], [self.socket], self.timer.report_timedelta
+        )
         read_list, write_list, except_list = list_list
         # Lists are unpacked, each is either [] or [self.socket],
         # so we will test them as boolean.
@@ -1981,9 +2296,9 @@ class StateTracker(object):
                     logger.info("Storing performance results.")
                     self.generator.store_results()
                     logger.info("Finally an END-OF-RIB is sent.")
-                    msg_out += self.generator.update_message(wr_prefixes=[],
-                                                             nlri_prefixes=[],
-                                                             end_of_rib=True)
+                    msg_out += self.generator.update_message(
+                        wr_prefixes=[], nlri_prefixes=[], end_of_rib=True
+                    )
                 self.writer.enqueue_message_for_sending(msg_out)
                 # Attempt for real sending to be done in next iteration.
                 return
@@ -1992,8 +2307,11 @@ class StateTracker(object):
             self.reader.wait_for_read()
             return
         # We can neither read nor write.
-        logger.warning("Input and output both blocked for " +
-                       str(self.timer.report_timedelta) + " seconds.")
+        logger.warning(
+            "Input and output both blocked for "
+            + str(self.timer.report_timedelta)
+            + " seconds."
+        )
         # FIXME: Are we sure select has been really waiting
         # the whole period?
         return
@@ -2009,7 +2327,9 @@ def create_logger(loglevel, logfile):
         :return: logger object
     """
     logger = logging.getLogger("logger")
-    log_formatter = logging.Formatter("%(asctime)s %(levelname)s BGP-%(threadName)s: %(message)s")
+    log_formatter = logging.Formatter(
+        "%(asctime)s %(levelname)s BGP-%(threadName)s: %(message)s"
+    )
     console_handler = logging.StreamHandler()
     file_handler = logging.FileHandler(logfile, mode="w")
     console_handler.setFormatter(log_formatter)
@@ -2039,7 +2359,7 @@ def job(arguments, inqueue, storage):
     # to work with "you first" peers.
     msg_in = read_open_message(bgp_socket)
     logger.info(binascii.hexlify(msg_in))
-    storage['open'] = binascii.hexlify(msg_in)
+    storage["open"] = binascii.hexlify(msg_in)
     timer = TimeTracker(msg_in)
     generator = MessageGenerator(arguments)
     msg_out = generator.open_message()
@@ -2069,28 +2389,28 @@ def job(arguments, inqueue, storage):
 
 
 class Rpcs:
-    '''Handler for SimpleXMLRPCServer'''
+    """Handler for SimpleXMLRPCServer"""
 
     def __init__(self, sendqueue, storage):
-        '''Init method
+        """Init method
 
         Arguments:
             :sendqueue: queue for data to be sent towards odl
             :storage: thread safe dict
-        '''
+        """
         self.queue = sendqueue
         self.storage = storage
 
     def send(self, text):
-        '''Data to be sent
+        """Data to be sent
 
         Arguments:
             :text: hes string of the data to be sent
-        '''
+        """
         self.queue.put(text)
 
-    def get(self, text=''):
-        '''Reads data form the storage
+    def get(self, text=""):
+        """Reads data form the storage
 
         - returns stored data or an empty string, at the moment only
           'update' is stored
@@ -2099,16 +2419,16 @@ class Rpcs:
             :text: a key to the storage to get the data
         Returns:
             :data: stored data
-        '''
+        """
         with self.storage as stor:
-            return stor.get(text, '')
+            return stor.get(text, "")
 
-    def clean(self, text=''):
-        '''Cleans data form the storage
+    def clean(self, text=""):
+        """Cleans data form the storage
 
         Arguments:
             :text: a key to the storage to clean the data
-        '''
+        """
         with self.storage as stor:
             if text in stor:
                 del stor[text]
index 5e7b553c0c8a44f6d0c9d55b9c7ed202e5dfb10d..32888ddbfd57c41172d8714771b85c1fb260c827 100755 (executable)
@@ -14,42 +14,107 @@ __license__ = "New-style BSD"
 __email__ = "jmedved@cisco.com"
 
 
-parser = argparse.ArgumentParser(description='Datastore Benchmarking'
-                                             ''
-                                             'See documentation @:'
-                                             'https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns'  # noqa
-                                             '')
+parser = argparse.ArgumentParser(
+    description="Datastore Benchmarking"
+    ""
+    "See documentation @:"
+    "https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns"  # noqa
+    ""
+)
 
 # Host Config
-parser.add_argument("--host", default="localhost", help="the IP of the target host to initiate benchmark testing on.")
-parser.add_argument("--port", type=int, default=8181, help="the port number of target host.")
+parser.add_argument(
+    "--host",
+    default="localhost",
+    help="the IP of the target host to initiate benchmark testing on.",
+)
+parser.add_argument(
+    "--port", type=int, default=8181, help="the port number of target host."
+)
 
 # Test Parameters
-parser.add_argument("--txtype", choices=["TX-CHAINING", "SIMPLE-TX"], nargs='+', default=["TX-CHAINING", "SIMPLE-TX"],
-                    help="list of the transaction types to execute.")
-parser.add_argument("--total", type=int, default=100000, help="total number of elements to process.")
-parser.add_argument("--inner", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
-                    help="number of inner elements to process.")
-parser.add_argument("--ops", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
-                    help="number of operations per transaction.")
-parser.add_argument("--optype", choices=["PUT", "MERGE", "DELETE", "READ"], nargs='+',
-                    default=["PUT", "MERGE", "DELETE", "READ"], help="list of the types operations to execute.")
-parser.add_argument("--format", choices=["BINDING-AWARE", "BINDING-INDEPENDENT"], nargs='+',
-                    default=["BINDING-AWARE", "BINDING-INDEPENDENT"], help="list of data formats to execute.")
-parser.add_argument("--datastore", choices=["CONFIG", "OPERATIONAL", "BOTH"], nargs='+',
-                    default=["OPERATIONAL", "CONFIG"], help="data-store type (config/operational) to use")
+parser.add_argument(
+    "--txtype",
+    choices=["TX-CHAINING", "SIMPLE-TX"],
+    nargs="+",
+    default=["TX-CHAINING", "SIMPLE-TX"],
+    help="list of the transaction types to execute.",
+)
+parser.add_argument(
+    "--total", type=int, default=100000, help="total number of elements to process."
+)
+parser.add_argument(
+    "--inner",
+    type=int,
+    default=[1, 10, 100, 1000, 10000, 100000],
+    nargs="+",
+    help="number of inner elements to process.",
+)
+parser.add_argument(
+    "--ops",
+    type=int,
+    default=[1, 10, 100, 1000, 10000, 100000],
+    nargs="+",
+    help="number of operations per transaction.",
+)
+parser.add_argument(
+    "--optype",
+    choices=["PUT", "MERGE", "DELETE", "READ"],
+    nargs="+",
+    default=["PUT", "MERGE", "DELETE", "READ"],
+    help="list of the types operations to execute.",
+)
+parser.add_argument(
+    "--format",
+    choices=["BINDING-AWARE", "BINDING-INDEPENDENT"],
+    nargs="+",
+    default=["BINDING-AWARE", "BINDING-INDEPENDENT"],
+    help="list of data formats to execute.",
+)
+parser.add_argument(
+    "--datastore",
+    choices=["CONFIG", "OPERATIONAL", "BOTH"],
+    nargs="+",
+    default=["OPERATIONAL", "CONFIG"],
+    help="data-store type (config/operational) to use",
+)
 # There is also "listeners" parameter specified in the Yang file now.
-parser.add_argument("--warmup", type=int, default=10, help="number of warmup runs before official test runs")
-parser.add_argument("--runs", type=int, default=10,
-                    help="number of official test runs. Note: Reported results are based on these runs.")
-parser.add_argument("--plot", type=str, default='none',
-                    help="keywords filter for results to be drawn in plot (special keywords: all, none).")
-parser.add_argument("--units", choices=["miliseconds", "microseconds"], default="microseconds",
-                    help="units of test duration values provided by dsbenchmark controller feature")
-parser.add_argument("--outfile-struct", dest="outfilestruct", default="perf_per_struct.csv",
-                    help="units of test duration values provided by dsbenchmark controller feature")
-parser.add_argument("--outfile-ops", dest="outfileops", default="perf_per_ops.csv",
-                    help="units of test duration values provided by dsbenchmark controller feature")
+parser.add_argument(
+    "--warmup",
+    type=int,
+    default=10,
+    help="number of warmup runs before official test runs",
+)
+parser.add_argument(
+    "--runs",
+    type=int,
+    default=10,
+    help="number of official test runs. Note: Reported results are based on these runs.",
+)
+parser.add_argument(
+    "--plot",
+    type=str,
+    default="none",
+    help="keywords filter for results to be drawn in plot (special keywords: all, none).",
+)
+parser.add_argument(
+    "--units",
+    choices=["miliseconds", "microseconds"],
+    default="microseconds",
+    help="units of test duration values provided by dsbenchmark controller feature",
+)
+parser.add_argument(
+    "--outfile-struct",
+    dest="outfilestruct",
+    default="perf_per_struct.csv",
+    help="units of test duration values provided by dsbenchmark controller feature",
+)
+parser.add_argument(
+    "--outfile-ops",
+    dest="outfileops",
+    default="perf_per_ops.csv",
+    help="units of test duration values provided by dsbenchmark controller feature",
+)
 args = parser.parse_args()
 
 
@@ -64,11 +129,13 @@ def send_clear_request():
     """
     url = BASE_URL + "operations/dsbenchmark:cleanup-store"
 
-    r = requests.post(url, stream=False, auth=('admin', 'admin'))
+    r = requests.post(url, stream=False, auth=("admin", "admin"))
     print(r.status_code)
 
 
-def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
+def send_test_request(
+    tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
+):
     """
     Sends a request to the dsbenchmark app to start a data store benchmark test run.
     The dsbenchmark app will perform the requested benchmark test and return measured
@@ -82,9 +149,9 @@ def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner
     :return:
     """
     url = BASE_URL + "operations/dsbenchmark:start-test"
-    postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
+    postheaders = {"content-type": "application/json", "Accept": "application/json"}
 
-    test_request_template = '''{
+    test_request_template = """{
         "input": {
             "transaction-type": "%s",
             "operation": "%s",
@@ -94,14 +161,24 @@ def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner
             "innerElements": %d,
             "putsPerTx": %d
         }
-    }'''
-    data = test_request_template % (tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
-    r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
-    result = {u'http-status': r.status_code}
+    }"""
+    data = test_request_template % (
+        tx_type,
+        operation,
+        data_fmt,
+        datastore,
+        outer_elem,
+        inner_elem,
+        ops_per_tx,
+    )
+    r = requests.post(
+        url, data, headers=postheaders, stream=False, auth=("admin", "admin")
+    )
+    result = {u"http-status": r.status_code}
     if r.status_code == 200:
-        result = dict(result.items() + json.loads(r.content)['output'].items())
+        result = dict(result.items() + json.loads(r.content)["output"].items())
     else:
-        print('Error %s, %s' % (r.status_code, r.content))
+        print("Error %s, %s" % (r.status_code, r.content))
     return result
 
 
@@ -115,11 +192,31 @@ def print_results(run_type, idx, res):
                 test run
     :return: None
     """
-    print('%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' %
-          (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError']))
-
-
-def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
+    print(
+        "%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d"
+        % (
+            run_type,
+            idx,
+            res[u"status"],
+            res[u"listBuildTime"],
+            res[u"execTime"],
+            res[u"txOk"],
+            res[u"txError"],
+        )
+    )
+
+
+def run_test(
+    warmup_runs,
+    test_runs,
+    tx_type,
+    operation,
+    data_fmt,
+    datastore,
+    outer_elem,
+    inner_elem,
+    ops_per_tx,
+):
     """
     Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
     the specified number of dsbenchmark test runs and computes the average time
@@ -138,23 +235,53 @@ def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, ou
     total_build_time = 0.0
     total_exec_time = 0.0
 
-    print("Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore,)
-    print("Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx)
+    print(
+        "Tx Type:",
+        tx_type,
+        "Operation:",
+        operation,
+        "Data Format:",
+        data_fmt,
+        "Datastore:",
+        datastore,
+    )
+    print(
+        "Outer Elements:",
+        outer_elem,
+        "Inner Elements:",
+        inner_elem,
+        "PutsPerTx:",
+        ops_per_tx,
+    )
     for idx in range(warmup_runs):
-        res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
-        print_results('WARMUP', idx, res)
+        res = send_test_request(
+            tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
+        )
+        print_results("WARMUP", idx, res)
 
     for idx in range(test_runs):
-        res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
-        print_results('TEST', idx, res)
-        total_build_time += res['listBuildTime']
-        total_exec_time += res['execTime']
+        res = send_test_request(
+            tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
+        )
+        print_results("TEST", idx, res)
+        total_build_time += res["listBuildTime"]
+        total_exec_time += res["execTime"]
 
     return total_build_time / test_runs, total_exec_time / test_runs
 
 
-def store_result(values, tx_type, operation, data_fmt, datastore,
-                 outer_elem, inner_elem, ops_per_tx, value_name, value):
+def store_result(
+    values,
+    tx_type,
+    operation,
+    data_fmt,
+    datastore,
+    outer_elem,
+    inner_elem,
+    ops_per_tx,
+    value_name,
+    value,
+):
     """
     Stores a record to the list (dictionary) of values to be written into a csv file for plotting purposes.
     :param values: The list (dictionary) to be used for storing the result
@@ -168,8 +295,23 @@ def store_result(values, tx_type, operation, data_fmt, datastore,
     :param value: The (measured) value
     :return: none
     """
-    plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/'
-                + str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
+    plot_key = (
+        datastore
+        + "-"
+        + data_fmt
+        + "-"
+        + tx_type
+        + "-"
+        + operation
+        + "-"
+        + str(outer_elem)
+        + "/"
+        + str(inner_elem)
+        + "OUTER/INNER-"
+        + str(ops_per_tx)
+        + "OP-"
+        + value_name
+    )
     values[plot_key] = value
 
 
@@ -182,18 +324,20 @@ def write_results_to_file(values, file_name, key_filter):
     :param key_filter: A regexp string to filter the results to be finally put into the file
     :return: none
     """
-    first_line = ''
-    second_line = ''
-    f = open(file_name, 'wt')
+    first_line = ""
+    second_line = ""
+    f = open(file_name, "wt")
     try:
         for key in sorted(values):
-            if (key_filter != 'none') & ((key_filter == 'all') | (re.search(key_filter, key) is not None)):
-                first_line += key + ','
-                second_line += str(values[key]) + ','
+            if (key_filter != "none") & (
+                (key_filter == "all") | (re.search(key_filter, key) is not None)
+            ):
+                first_line += key + ","
+                second_line += str(values[key]) + ","
         first_line = first_line[:-1]
         second_line = second_line[:-1]
-        f.write(first_line + '\n')
-        f.write(second_line + '\n')
+        f.write(first_line + "\n")
+        f.write(second_line + "\n")
     finally:
         f.close()
 
@@ -208,7 +352,7 @@ if __name__ == "__main__":
     DATA_FORMATS = args.format
     DATASTORES = args.datastore
     PLOT_FILTER = args.plot
-    if args.units == 'miliseconds':
+    if args.units == "miliseconds":
         TIME_DIV = 1
     else:
         TIME_DIV = 1000
@@ -225,7 +369,7 @@ if __name__ == "__main__":
     send_clear_request()
 
     # Run the benchmark tests and collect data in a csv file for import into a graphing software
-    f = open('test.csv', 'wt')
+    f = open("test.csv", "wt")
     try:
         start_time = time.time()
         print("Start time: %f " % (start_time))
@@ -235,77 +379,153 @@ if __name__ == "__main__":
         # Determine the impact of transaction type, data format and data structure on performance.
         # Iterate over all transaction types, data formats, operation types, and different
         # list-of-lists layouts; always use a single operation in each transaction
-        print('\n#######################################')
-        print('Tx type, data format & data structure')
-        print('#######################################')
+        print("\n#######################################")
+        print("Tx type, data format & data structure")
+        print("#######################################")
         for tx_type in TX_TYPES:
-            print('***************************************')
-            print('Transaction Type: %s' % tx_type)
-            print('***************************************')
-            writer.writerow((('%s:' % tx_type), '', ''))
+            print("***************************************")
+            print("Transaction Type: %s" % tx_type)
+            print("***************************************")
+            writer.writerow((("%s:" % tx_type), "", ""))
 
             for fmt in DATA_FORMATS:
-                print('---------------------------------------')
-                print('Data format: %s' % fmt)
-                print('---------------------------------------')
-                writer.writerow(('', ('%s:' % fmt), ''))
+                print("---------------------------------------")
+                print("Data format: %s" % fmt)
+                print("---------------------------------------")
+                writer.writerow(("", ("%s:" % fmt), ""))
 
                 for datastore in DATASTORES:
                     print
-                    print('Data store: %s' % datastore)
+                    print("Data store: %s" % datastore)
                     print
 
                     for oper in OPERATIONS:
-                        print('Operation: %s' % oper)
-                        writer.writerow(('', '', '%s:' % oper))
+                        print("Operation: %s" % oper)
+                        writer.writerow(("", "", "%s:" % oper))
 
                         for elem in INNER_ELEMENTS:
-                            avg_build_time, avg_exec_time = run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt,
-                                                                     datastore, TOTAL_ELEMENTS / elem, elem, 1)
-                            e_label = '%d/%d' % (TOTAL_ELEMENTS / elem, elem)
-                            writer.writerow(('', '', '', e_label, avg_build_time, avg_exec_time,
-                                             (avg_build_time + avg_exec_time)))
-                            store_result(PLOT1, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, elem, 1,
-                                         'BUILD', avg_build_time / TIME_DIV)
-                            store_result(PLOT1, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, elem, 1,
-                                         'EXEC', avg_exec_time / TIME_DIV)
+                            avg_build_time, avg_exec_time = run_test(
+                                WARMUP_RUNS,
+                                TEST_RUNS,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                elem,
+                                1,
+                            )
+                            e_label = "%d/%d" % (TOTAL_ELEMENTS / elem, elem)
+                            writer.writerow(
+                                (
+                                    "",
+                                    "",
+                                    "",
+                                    e_label,
+                                    avg_build_time,
+                                    avg_exec_time,
+                                    (avg_build_time + avg_exec_time),
+                                )
+                            )
+                            store_result(
+                                PLOT1,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                elem,
+                                1,
+                                "BUILD",
+                                avg_build_time / TIME_DIV,
+                            )
+                            store_result(
+                                PLOT1,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                elem,
+                                1,
+                                "EXEC",
+                                avg_exec_time / TIME_DIV,
+                            )
 
         # Determine the impact of number of writes per transaction on performance.
         # Iterate over all transaction types, data formats, operation types, and
         # operations-per-transaction; always use a list of lists where the inner list has one parameter
-        print('\n#######################################')
-        print('Puts per tx')
-        print('#######################################')
+        print("\n#######################################")
+        print("Puts per tx")
+        print("#######################################")
         for tx_type in TX_TYPES:
-            print('***************************************')
-            print('Transaction Type: %s' % tx_type)
-            print('***************************************')
-            writer.writerow((('%s:' % tx_type), '', ''))
+            print("***************************************")
+            print("Transaction Type: %s" % tx_type)
+            print("***************************************")
+            writer.writerow((("%s:" % tx_type), "", ""))
 
             for fmt in DATA_FORMATS:
-                print('---------------------------------------')
-                print('Data format: %s' % fmt)
-                print('---------------------------------------')
-                writer.writerow(('', ('%s:' % fmt), ''))
+                print("---------------------------------------")
+                print("Data format: %s" % fmt)
+                print("---------------------------------------")
+                writer.writerow(("", ("%s:" % fmt), ""))
 
                 for datastore in DATASTORES:
                     print
-                    print('Data store: %s' % datastore)
+                    print("Data store: %s" % datastore)
                     print
 
                     for oper in OPERATIONS:
-                        print('Operation: %s' % oper)
-                        writer.writerow(('', '', '%s:' % oper))
+                        print("Operation: %s" % oper)
+                        writer.writerow(("", "", "%s:" % oper))
 
                         for wtx in OPS_PER_TX:
-                            avg_build_time, avg_exec_time = \
-                                run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS, 1, wtx)
-                            writer.writerow(('', '', '', wtx, avg_build_time, avg_exec_time,
-                                             (avg_build_time + avg_exec_time)))
-                            store_result(PLOT2, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, 1, wtx,
-                                         'BUILD', avg_build_time / TIME_DIV)
-                            store_result(PLOT2, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, 1, wtx,
-                                         'EXEC', avg_exec_time / TIME_DIV)
+                            avg_build_time, avg_exec_time = run_test(
+                                WARMUP_RUNS,
+                                TEST_RUNS,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS,
+                                1,
+                                wtx,
+                            )
+                            writer.writerow(
+                                (
+                                    "",
+                                    "",
+                                    "",
+                                    wtx,
+                                    avg_build_time,
+                                    avg_exec_time,
+                                    (avg_build_time + avg_exec_time),
+                                )
+                            )
+                            store_result(
+                                PLOT2,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                1,
+                                wtx,
+                                "BUILD",
+                                avg_build_time / TIME_DIV,
+                            )
+                            store_result(
+                                PLOT2,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                1,
+                                wtx,
+                                "EXEC",
+                                avg_exec_time / TIME_DIV,
+                            )
 
         write_results_to_file(PLOT1, args.outfilestruct, PLOT_FILTER)
         write_results_to_file(PLOT2, args.outfileops, PLOT_FILTER)
index 2537c649c1e5d682e9777294659100b3da91b05f..c386aa6fa70a2fd5e9043a32598af8c7a6edf67d 100755 (executable)
@@ -36,9 +36,9 @@ def send_test_request(producer_type, producers, listeners, payload_size, iterati
     :return: Result from the test request REST call (json)
     """
     url = BASE_URL + "operations/ntfbenchmark:start-test"
-    postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
+    postheaders = {"content-type": "application/json", "Accept": "application/json"}
 
-    test_request_template = '''{
+    test_request_template = """{
         "input": {
             "producer-type": "%s",
             "producers": "%s",
@@ -46,14 +46,22 @@ def send_test_request(producer_type, producers, listeners, payload_size, iterati
             "payload-size": "%s",
             "iterations": "%s"
         }
-    }'''
-    data = test_request_template % (producer_type, producers, listeners, payload_size, iterations)
-    r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
-    result = {u'http-status': r.status_code}
+    }"""
+    data = test_request_template % (
+        producer_type,
+        producers,
+        listeners,
+        payload_size,
+        iterations,
+    )
+    r = requests.post(
+        url, data, headers=postheaders, stream=False, auth=("admin", "admin")
+    )
+    result = {u"http-status": r.status_code}
     if r.status_code == 200:
-        result = dict(result.items() + json.loads(r.content)['output'].items())
+        result = dict(result.items() + json.loads(r.content)["output"].items())
     else:
-        print('Error %s, %s' % (r.status_code, r.content))
+        print("Error %s, %s" % (r.status_code, r.content))
     return result
 
 
@@ -67,13 +75,31 @@ def print_results(run_type, idx, res):
                 test run
     :return: None
     """
-    print('%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d' %
-          (run_type, idx,
-           res[u'producer-ok'], res[u'producer-error'], res[u'listener-ok'], res[u'producer-rate'],
-           res[u'listener-rate'], res[u'producer-elapsed-time'], res[u'listener-elapsed-time']))
-
-
-def run_test(warmup_runs, test_runs, producer_type, producers, listeners, payload_size, iterations):
+    print(
+        "%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d"
+        % (
+            run_type,
+            idx,
+            res[u"producer-ok"],
+            res[u"producer-error"],
+            res[u"listener-ok"],
+            res[u"producer-rate"],
+            res[u"listener-rate"],
+            res[u"producer-elapsed-time"],
+            res[u"listener-elapsed-time"],
+        )
+    )
+
+
+def run_test(
+    warmup_runs,
+    test_runs,
+    producer_type,
+    producers,
+    listeners,
+    payload_size,
+    iterations,
+):
     """
     Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
     the specified number of dsbenchmark test runs and computes the average time
@@ -93,59 +119,112 @@ def run_test(warmup_runs, test_runs, producer_type, producers, listeners, payloa
     total_lrate = 0.0
 
     for idx in range(warmup_runs):
-        res = send_test_request(producer_type, producers, listeners, payload_size, iterations)
-        print_results('WARM-UP', idx, res)
+        res = send_test_request(
+            producer_type, producers, listeners, payload_size, iterations
+        )
+        print_results("WARM-UP", idx, res)
 
     for idx in range(test_runs):
-        res = send_test_request(producer_type, producers, listeners, payload_size, iterations)
-        print_results('TEST', idx, res)
-        total_exec_time += res['listener-elapsed-time']
-        total_prate += res['producer-rate']
-        total_lrate += res['listener-rate']
+        res = send_test_request(
+            producer_type, producers, listeners, payload_size, iterations
+        )
+        print_results("TEST", idx, res)
+        total_exec_time += res["listener-elapsed-time"]
+        total_prate += res["producer-rate"]
+        total_lrate += res["listener-rate"]
 
     return total_exec_time / test_runs, total_prate / test_runs, total_lrate / test_runs
 
 
 if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description='RPC Benchmarking')
+    parser = argparse.ArgumentParser(description="RPC Benchmarking")
 
     # Host Config
-    parser.add_argument("--host", default="localhost", help="IP of the target host where benchmarks will be run.")
-    parser.add_argument("--port", type=int, default=8181, help="The port number of target host.")
+    parser.add_argument(
+        "--host",
+        default="localhost",
+        help="IP of the target host where benchmarks will be run.",
+    )
+    parser.add_argument(
+        "--port", type=int, default=8181, help="The port number of target host."
+    )
 
     # Test Parameters
-    parser.add_argument("--ptype", choices=["DROPPING", "BLOCKING"], nargs='+', default='BLOCKING',
-                        help='Producer type. (default: BLOCKING)')
-    parser.add_argument("--warm", type=int, default=10, help='The number of warm-up runs before the measured test runs'
-                                                             '(Default 10)')
-    parser.add_argument("--run", type=int, default=10,
-                        help='The number of measured test runs. Reported results are based on these average of all'
-                             " measured runs. (Default 10)")
-    parser.add_argument("--producers", type=int, nargs='+', default=[1, 2, 4, 8, 16, 32],
-                        help='The number of test producers to start. (Default 10)')
-    parser.add_argument("--listeners", type=int, nargs='+', default=[1, 2, 4, 8, 16, 32],
-                        help='The number of test listeners to start. (Default 10)')
-    parser.add_argument("--iterations", type=int, default=100, help='The number requests that each producer issues '
-                                                                    'during the test run. (Default 10)')
-    parser.add_argument("--payload", type=int, default=10, help='Payload size for the RPC - number of elements in a '
-                                                                'simple integer list. (Default 10)')
+    parser.add_argument(
+        "--ptype",
+        choices=["DROPPING", "BLOCKING"],
+        nargs="+",
+        default="BLOCKING",
+        help="Producer type. (default: BLOCKING)",
+    )
+    parser.add_argument(
+        "--warm",
+        type=int,
+        default=10,
+        help="The number of warm-up runs before the measured test runs" "(Default 10)",
+    )
+    parser.add_argument(
+        "--run",
+        type=int,
+        default=10,
+        help="The number of measured test runs. Reported results are based on these average of all"
+        " measured runs. (Default 10)",
+    )
+    parser.add_argument(
+        "--producers",
+        type=int,
+        nargs="+",
+        default=[1, 2, 4, 8, 16, 32],
+        help="The number of test producers to start. (Default 10)",
+    )
+    parser.add_argument(
+        "--listeners",
+        type=int,
+        nargs="+",
+        default=[1, 2, 4, 8, 16, 32],
+        help="The number of test listeners to start. (Default 10)",
+    )
+    parser.add_argument(
+        "--iterations",
+        type=int,
+        default=100,
+        help="The number requests that each producer issues "
+        "during the test run. (Default 10)",
+    )
+    parser.add_argument(
+        "--payload",
+        type=int,
+        default=10,
+        help="Payload size for the RPC - number of elements in a "
+        "simple integer list. (Default 10)",
+    )
 
     args = parser.parse_args()
     BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
 
     # Run the benchmark tests and collect data in a csv file for import into a graphing software
-    f = open('test.csv', 'wt')
+    f = open("test.csv", "wt")
     try:
         writer = csv.writer(f)
         lrate_matrix = []
         prate_matrix = []
         for prod in args.producers:
-            lrate_row = ['']
-            prate_row = ['']
+            lrate_row = [""]
+            prate_row = [""]
             for lis in args.listeners:
-                exec_time, prate, lrate = run_test(args.warm, args.run, args.ptype, prod, lis,
-                                                   args.payload, args.iterations)
-                print('Producers: %d, Listeners: %d, prate: %d, lrate: %d' % (prod, lis, prate, lrate))
+                exec_time, prate, lrate = run_test(
+                    args.warm,
+                    args.run,
+                    args.ptype,
+                    prod,
+                    lis,
+                    args.payload,
+                    args.iterations,
+                )
+                print(
+                    "Producers: %d, Listeners: %d, prate: %d, lrate: %d"
+                    % (prod, lis, prate, lrate)
+                )
                 lrate_row.append(lrate)
                 prate_row.append(prate)
 
@@ -158,9 +237,9 @@ if __name__ == "__main__":
         # writer.writerow((('%s:' % args.ptype), '', '', ''))
         # writer.writerow(('', exec_time, prate, lrate))
 
-        writer.writerow(('Listener Rates:', ''))
+        writer.writerow(("Listener Rates:", ""))
         writer.writerows(lrate_matrix)
-        writer.writerow(('Producer Rates:', ''))
+        writer.writerow(("Producer Rates:", ""))
         writer.writerows(prate_matrix)
 
     finally:
index 9c32ae48d9531f84dc5d0019e83f8e8d25dd4ce8..563ac95caf5de5ffcbc243741681d9ba3094df65 100755 (executable)
@@ -36,9 +36,9 @@ def send_test_request(operation, clients, servers, payload_size, iterations):
     :return: Result from the test request REST call (json)
     """
     url = BASE_URL + "operations/rpcbenchmark:start-test"
-    postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
+    postheaders = {"content-type": "application/json", "Accept": "application/json"}
 
-    test_request_template = '''{
+    test_request_template = """{
         "input": {
             "operation": "%s",
             "num-clients": "%s",
@@ -46,14 +46,22 @@ def send_test_request(operation, clients, servers, payload_size, iterations):
             "payload-size": "%s",
             "iterations": "%s"
         }
-    }'''
-    data = test_request_template % (operation, clients, servers, payload_size, iterations)
-    r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
-    result = {u'http-status': r.status_code}
+    }"""
+    data = test_request_template % (
+        operation,
+        clients,
+        servers,
+        payload_size,
+        iterations,
+    )
+    r = requests.post(
+        url, data, headers=postheaders, stream=False, auth=("admin", "admin")
+    )
+    result = {u"http-status": r.status_code}
     if r.status_code == 200:
-        result = dict(result.items() + json.loads(r.content)['output'].items())
+        result = dict(result.items() + json.loads(r.content)["output"].items())
     else:
-        print('Error %s, %s' % (r.status_code, r.content))
+        print("Error %s, %s" % (r.status_code, r.content))
     return result
 
 
@@ -67,12 +75,22 @@ def print_results(run_type, idx, res):
                 test run
     :return: None
     """
-    print('%s #%d: Ok: %d, Error: %d, Rate: %d, Exec time: %d' %
-          (run_type, idx,
-           res[u'global-rtc-client-ok'], res[u'global-rtc-client-error'], res[u'rate'], res[u'exec-time']))
-
-
-def run_test(warmup_runs, test_runs, operation, clients, servers, payload_size, iterations):
+    print(
+        "%s #%d: Ok: %d, Error: %d, Rate: %d, Exec time: %d"
+        % (
+            run_type,
+            idx,
+            res[u"global-rtc-client-ok"],
+            res[u"global-rtc-client-error"],
+            res[u"rate"],
+            res[u"exec-time"],
+        )
+    )
+
+
+def run_test(
+    warmup_runs, test_runs, operation, clients, servers, payload_size, iterations
+):
     """
     Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
     the specified number of dsbenchmark test runs and computes the average time
@@ -92,67 +110,112 @@ def run_test(warmup_runs, test_runs, operation, clients, servers, payload_size,
 
     for idx in range(warmup_runs):
         res = send_test_request(operation, clients, servers, payload_size, iterations)
-        print_results('WARM-UP', idx, res)
+        print_results("WARM-UP", idx, res)
 
     for idx in range(test_runs):
         res = send_test_request(operation, clients, servers, payload_size, iterations)
-        print_results('TEST', idx, res)
-        total_exec_time += res['exec-time']
-        total_rate += res['rate']
+        print_results("TEST", idx, res)
+        total_exec_time += res["exec-time"]
+        total_rate += res["rate"]
 
     return total_exec_time / test_runs, total_rate / test_runs
 
 
 if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description='RPC Benchmarking')
+    parser = argparse.ArgumentParser(description="RPC Benchmarking")
 
     # Host Config
-    parser.add_argument("--host", default="localhost", help="IP of the target host where benchmarks will be run.")
-    parser.add_argument("--port", type=int, default=8181, help="The port number of target host.")
+    parser.add_argument(
+        "--host",
+        default="localhost",
+        help="IP of the target host where benchmarks will be run.",
+    )
+    parser.add_argument(
+        "--port", type=int, default=8181, help="The port number of target host."
+    )
 
     # Test Parameters
-    parser.add_argument("--operation", choices=["GLOBAL-RTC", "ROUTED-RTC"], default='GLOBAL-RTC',
-                        help='RPC and client type. RPC can be global or routcan be run-to-completion (RTC).'
-                             '(default: GLOBAL-RTC - Global RPC, Run-to-completion client)')
-    parser.add_argument("--warm", type=int, default=10, help='The number of warm-up runs before the measured test runs'
-                                                             '(Default 10)')
-    parser.add_argument("--run", type=int, default=10,
-                        help='The number of measured test runs. Reported results are based on these average of all'
-                             " measured runs. (Default 10)")
-    parser.add_argument("--clients", type=int, nargs='+', default=[1, 2, 4, 8, 16, 32, 64],
-                        help='The number of test RPC clients to start. (Default 10)')
-    parser.add_argument("--servers", type=int, nargs='+', default=[1, 2, 4, 8, 16, 32, 64],
-                        help='The number of routed RPC servers to start in the routed RPC test. Ignored in the global '
-                             'RPC test. (Default 10)')
-    parser.add_argument("--iterations", type=int, default=10, help='The number requests that each RPC client issues '
-                                                                   'during the test run. (Default 10)')
-    parser.add_argument("--payload", type=int, default=10, help='Payload size for the RPC - number of elements in a '
-                                                                'simple integer list. (Default 10)')
+    parser.add_argument(
+        "--operation",
+        choices=["GLOBAL-RTC", "ROUTED-RTC"],
+        default="GLOBAL-RTC",
+        help="RPC and client type. RPC can be global or routcan be run-to-completion (RTC)."
+        "(default: GLOBAL-RTC - Global RPC, Run-to-completion client)",
+    )
+    parser.add_argument(
+        "--warm",
+        type=int,
+        default=10,
+        help="The number of warm-up runs before the measured test runs" "(Default 10)",
+    )
+    parser.add_argument(
+        "--run",
+        type=int,
+        default=10,
+        help="The number of measured test runs. Reported results are based on these average of all"
+        " measured runs. (Default 10)",
+    )
+    parser.add_argument(
+        "--clients",
+        type=int,
+        nargs="+",
+        default=[1, 2, 4, 8, 16, 32, 64],
+        help="The number of test RPC clients to start. (Default 10)",
+    )
+    parser.add_argument(
+        "--servers",
+        type=int,
+        nargs="+",
+        default=[1, 2, 4, 8, 16, 32, 64],
+        help="The number of routed RPC servers to start in the routed RPC test. Ignored in the global "
+        "RPC test. (Default 10)",
+    )
+    parser.add_argument(
+        "--iterations",
+        type=int,
+        default=10,
+        help="The number requests that each RPC client issues "
+        "during the test run. (Default 10)",
+    )
+    parser.add_argument(
+        "--payload",
+        type=int,
+        default=10,
+        help="Payload size for the RPC - number of elements in a "
+        "simple integer list. (Default 10)",
+    )
 
     args = parser.parse_args()
     BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
 
-    if args.operation == 'GLOBAL-RTC':
+    if args.operation == "GLOBAL-RTC":
         servers = [1]
     else:
         servers = args.servers
 
     # Run the benchmark tests and collect data in a csv file for import into a graphing software
-    f = open('test.csv', 'wt')
+    f = open("test.csv", "wt")
     try:
         writer = csv.writer(f)
         rate_matrix = []
 
         for svr in servers:
-            rate_row = ['']
+            rate_row = [""]
             for client in args.clients:
-                exec_time, rate = \
-                    run_test(args.warm, args.run, args.operation, client, svr, args.payload, args.iterations)
+                exec_time, rate = run_test(
+                    args.warm,
+                    args.run,
+                    args.operation,
+                    client,
+                    svr,
+                    args.payload,
+                    args.iterations,
+                )
                 rate_row.append(rate)
             rate_matrix.append(rate_row)
         print(rate_matrix)
 
-        writer.writerow(('RPC Rates:', ''))
+        writer.writerow(("RPC Rates:", ""))
         writer.writerows(rate_matrix)
     finally:
         f.close()
index d3c10bc6e528a644a2883ea9997aa74c140a6c0a..98892c15b8c79f91d522ee1447ad8922dd697ac7 100644 (file)
@@ -53,39 +53,80 @@ def str2bool(text):
 def parse_arguments():
     """Return parsed form of command-line arguments."""
     parser = argparse.ArgumentParser()
-    parser.add_argument("--odladdress", default="127.0.0.1",
-                        help="IP address of ODL Restconf to be used")
-    parser.add_argument("--restconfport", default="8181",
-                        help="Port on which ODL Restconf to be used")
-    parser.add_argument("--restconfuser", default="admin",
-                        help="Username for ODL Restconf authentication")
-    parser.add_argument("--restconfpassword", default="admin",
-                        help="Password for ODL Restconf authentication")
-    parser.add_argument("--scope", default="sdn",
-                        help="Scope for ODL Restconf authentication")
-    parser.add_argument("--deviceaddress", default="127.0.0.1",
-                        help="Common IP address for all available devices")
-    parser.add_argument("--devices", default="1", type=int,
-                        help="Number of devices available for connecting")
-    parser.add_argument("--deviceuser", default="admin",
-                        help="Username for netconf device authentication")
-    parser.add_argument("--devicepassword", default="admin",
-                        help="Password for netconf device authentication")
-    parser.add_argument("--startport", default="17830", type=int,
-                        help="Port number of first device")
+    parser.add_argument(
+        "--odladdress",
+        default="127.0.0.1",
+        help="IP address of ODL Restconf to be used",
+    )
+    parser.add_argument(
+        "--restconfport", default="8181", help="Port on which ODL Restconf to be used"
+    )
+    parser.add_argument(
+        "--restconfuser",
+        default="admin",
+        help="Username for ODL Restconf authentication",
+    )
+    parser.add_argument(
+        "--restconfpassword",
+        default="admin",
+        help="Password for ODL Restconf authentication",
+    )
+    parser.add_argument(
+        "--scope", default="sdn", help="Scope for ODL Restconf authentication"
+    )
+    parser.add_argument(
+        "--deviceaddress",
+        default="127.0.0.1",
+        help="Common IP address for all available devices",
+    )
+    parser.add_argument(
+        "--devices",
+        default="1",
+        type=int,
+        help="Number of devices available for connecting",
+    )
+    parser.add_argument(
+        "--deviceuser",
+        default="admin",
+        help="Username for netconf device authentication",
+    )
+    parser.add_argument(
+        "--devicepassword",
+        default="admin",
+        help="Password for netconf device authentication",
+    )
+    parser.add_argument(
+        "--startport", default="17830", type=int, help="Port number of first device"
+    )
     # FIXME: There has to be a better name, "delay" evokes seconds, not number of connections.
-    parser.add_argument("--disconndelay", default="0", type=int,
-                        help="Deconfigure oldest device if more than this devices were configured")
-    parser.add_argument("--connsleep", default="0.0", type=float,
-                        help="Sleep this many seconds after configuration to allow operational update.")
-    parser.add_argument("--basename", default="sim-device",
-                        help="Name of device without the generated suffixes")
-    parser.add_argument("--reuse", default="True", type=str2bool,
-                        help="Should single requests session be re-used")
+    parser.add_argument(
+        "--disconndelay",
+        default="0",
+        type=int,
+        help="Deconfigure oldest device if more than this devices were configured",
+    )
+    parser.add_argument(
+        "--connsleep",
+        default="0.0",
+        type=float,
+        help="Sleep this many seconds after configuration to allow operational update.",
+    )
+    parser.add_argument(
+        "--basename",
+        default="sim-device",
+        help="Name of device without the generated suffixes",
+    )
+    parser.add_argument(
+        "--reuse",
+        default="True",
+        type=str2bool,
+        help="Should single requests session be re-used",
+    )
     return parser.parse_args()  # arguments are read
 
 
-DATA_TEMPLATE = string.Template('''{
+DATA_TEMPLATE = string.Template(
+    """{
     "network-topology:node": {
         "node-id": "$DEVICE_NAME",
         "netconf-node-topology:host": "$DEVICE_IP",
@@ -95,7 +136,8 @@ DATA_TEMPLATE = string.Template('''{
         "netconf-node-topology:tcp-only": "false",
         "netconf-node-topology:keepalive-delay": 0
     }
-}''')
+}"""
+)
 
 
 def count_response(counter, response, method):
@@ -113,7 +155,12 @@ def sorted_repr(counter):
     for key_tuple in counter:
         short_counter[(key_tuple[0], key_tuple[1])] += counter[key_tuple]
     short_list = sorted(short_counter.keys())
-    short_text = ", ".join(["(" + item[0] + ":" + item[1] + ")x" + str(short_counter[item]) for item in short_list])
+    short_text = ", ".join(
+        [
+            "(" + item[0] + ":" + item[1] + ")x" + str(short_counter[item])
+            for item in short_list
+        ]
+    )
     long_text = "\n".join([item[2] for item in sorted(counter.keys(), reverse=True)])
     return short_text + "\nresponses:\n" + long_text
 
@@ -121,12 +168,16 @@ def sorted_repr(counter):
 def main():
     """Top-level logic to execute."""
     args = parse_arguments()
-    uri_part = "config/network-topology:network-topology/topology/topology-netconf/node/"
+    uri_part = (
+        "config/network-topology:network-topology/topology/topology-netconf/node/"
+    )
     put_headers = {"Content-Type": "application/json", "Accept": "application/json"}
     delete_headers = {"Accept": "application/json"}
     counter = collections.Counter()
 
-    def handle_sigint(received_signal, frame):  # This is a closure as it refers to the counter.
+    def handle_sigint(
+        received_signal, frame
+    ):  # This is a closure as it refers to the counter.
         """Upon SIGINT, print counter contents and exit gracefully."""
         signal.signal(signal.SIGINT, signal.SIG_DFL)
         print(sorted_repr(counter))
@@ -134,7 +185,12 @@ def main():
 
     signal.signal(signal.SIGINT, handle_sigint)
     session = AuthStandalone.Init_Session(
-        args.odladdress, args.restconfuser, args.restconfpassword, args.scope, args.reuse)
+        args.odladdress,
+        args.restconfuser,
+        args.restconfpassword,
+        args.scope,
+        args.reuse,
+    )
     subst_dict = {}
     subst_dict["DEVICE_IP"] = args.deviceaddress
     subst_dict["DEVICE_USER"] = args.deviceuser
@@ -148,14 +204,18 @@ def main():
         while port < wrap_port:
             if len(delayed) > args.disconndelay:
                 delete_name = delayed.popleft()
-                response = AuthStandalone.Delete_Using_Session(session, uri_part + delete_name, headers=delete_headers)
+                response = AuthStandalone.Delete_Using_Session(
+                    session, uri_part + delete_name, headers=delete_headers
+                )
                 count_response(counter, response, "delete")
             put_name = args.basename + "-" + str(port) + "-" + str(iteration)
             subst_dict["DEVICE_NAME"] = put_name
             subst_dict["DEVICE_PORT"] = str(port)
             put_data = DATA_TEMPLATE.substitute(subst_dict)
             uri = uri_part + put_name
-            response = AuthStandalone.Put_Using_Session(session, uri, data=put_data, headers=put_headers)
+            response = AuthStandalone.Put_Using_Session(
+                session, uri, data=put_data, headers=put_headers
+            )
             count_response(counter, response, "put")
             delayed.append(put_name)  # schedule for deconfiguration unconditionally
             time.sleep(args.connsleep)
index 3655800410563cd904bf159e08163074b7fefc40..f88c1010521b74f6e56ebbddf03f92349af3dec7 100644 (file)
@@ -45,36 +45,54 @@ def parse_arguments():
     parser = argparse.ArgumentParser()
 
     # Netconf and Restconf related arguments.
-    parser.add_argument('--odladdress', default='127.0.0.1',
-                        help='IP address of ODL Restconf to be used')
-    parser.add_argument('--restconfport', default='8181',
-                        help='Port on which ODL Restconf to be used')
-    parser.add_argument('--user', default='admin',
-                        help='Username for ODL Restconf authentication')
-    parser.add_argument('--password', default='admin',
-                        help='Password for ODL Restconf authentication')
-    parser.add_argument('--scope',
-                        help='Scope for ODL Restconf authentication')
-    parser.add_argument('--count', type=int,
-                        help='Count of devices to query')
-    parser.add_argument('--name',
-                        help='Name of device without the ID suffix')
-    parser.add_argument('--reuse', default='True', type=str2bool,
-                        help='Should single requests session be re-used')
+    parser.add_argument(
+        "--odladdress",
+        default="127.0.0.1",
+        help="IP address of ODL Restconf to be used",
+    )
+    parser.add_argument(
+        "--restconfport", default="8181", help="Port on which ODL Restconf to be used"
+    )
+    parser.add_argument(
+        "--user", default="admin", help="Username for ODL Restconf authentication"
+    )
+    parser.add_argument(
+        "--password", default="admin", help="Password for ODL Restconf authentication"
+    )
+    parser.add_argument("--scope", help="Scope for ODL Restconf authentication")
+    parser.add_argument("--count", type=int, help="Count of devices to query")
+    parser.add_argument("--name", help="Name of device without the ID suffix")
+    parser.add_argument(
+        "--reuse",
+        default="True",
+        type=str2bool,
+        help="Should single requests session be re-used",
+    )
 
     # Work related arguments.
-    parser.add_argument('--workers', default='1', type=int,
-                        help='number of blocking http threads to use')
-    parser.add_argument('--timeout', default='300', type=float,
-                        help='timeout in seconds for all jobs to complete')
-    parser.add_argument('--refresh', default='0.1', type=float,
-                        help='seconds to sleep in main thread if nothing to do')
+    parser.add_argument(
+        "--workers",
+        default="1",
+        type=int,
+        help="number of blocking http threads to use",
+    )
+    parser.add_argument(
+        "--timeout",
+        default="300",
+        type=float,
+        help="timeout in seconds for all jobs to complete",
+    )
+    parser.add_argument(
+        "--refresh",
+        default="0.1",
+        type=float,
+        help="seconds to sleep in main thread if nothing to do",
+    )
 
     return parser.parse_args()  # arguments are read
 
 
 class TRequestWithResponse(object):
-
     def __init__(self, uri, kwargs):
         self.uri = uri
         self.kwargs = kwargs
@@ -98,7 +116,9 @@ def queued_send(session, queue_messages):
         except IndexError:  # nothing more to send
             break
         start = time.time()
-        response = AuthStandalone.Get_Using_Session(session, request.uri, **request.kwargs)
+        response = AuthStandalone.Get_Using_Session(
+            session, request.uri, **request.kwargs
+        )
         stop = time.time()
         status = int(response.status_code)
         content = repr(response.content)
@@ -115,7 +135,7 @@ def collect_results(request_list, response_queue):
 
 def watch_for_timeout(timeout, response_queue):
     time.sleep(timeout)
-    response_queue.append((None, 'Time is up!'))
+    response_queue.append((None, "Time is up!"))
 
 
 def run_thread(thread_target, *thread_args):
@@ -129,11 +149,11 @@ def run_thread(thread_target, *thread_args):
 args = parse_arguments()
 
 # Construct the work for the workers.
-url_start = 'config/network-topology:network-topology/'
+url_start = "config/network-topology:network-topology/"
 url_start += "topology/topology-netconf/node/"
 url_start += args.name + "-"
 url_end = "/yang-ext:mount"
-headers = {'Content-Type': 'application/xml', "Accept": "application/xml"}
+headers = {"Content-Type": "application/xml", "Accept": "application/xml"}
 kwargs = {"headers": headers}
 requests = []
 for device_number in range(args.count):
@@ -154,7 +174,9 @@ for request in requests:
 # Spawn the workers, giving each a queue.
 threads = []
 for queue_messages in list_q_msg:
-    session = AuthStandalone.Init_Session(args.odladdress, args.user, args.password, args.scope, args.reuse)
+    session = AuthStandalone.Init_Session(
+        args.odladdress, args.user, args.password, args.scope, args.reuse
+    )
     thread = run_thread(queued_send, session, queue_messages)
     threads.append(thread)
 
index d2784da71ee2267a00f3142cc8d0d4ecf4ae2e2b..00ed0e70f57c6fd059877fd2947d05977a200edf 100755 (executable)
@@ -29,8 +29,9 @@ def generate_eids_random(base, n):
     """
     eids = []
     for i in range(0, n):
-        eids.append(str(netaddr.IPAddress(base) +
-                        random.randint(0, (n - 1) * increment)))
+        eids.append(
+            str(netaddr.IPAddress(base) + random.randint(0, (n - 1) * increment))
+        )
     return eids
 
 
@@ -57,12 +58,12 @@ def generate_map_request(eid):
     """
     sport1 = random.randint(60000, 65000)
     sport2 = random.randint(60000, 65000)
-    rnonce = random.randint(0, 2**63)
+    rnonce = random.randint(0, 2 ** 63)
 
     itr_rloc = [lisp.LISP_AFI_Address(address=src_rloc, afi=1)]
-    record = [lisp.LISP_MapRequestRecord(request_address=eid,
-                                         request_afi=1,
-                                         eid_mask_len=32)]
+    record = [
+        lisp.LISP_MapRequestRecord(request_address=eid, request_afi=1, eid_mask_len=32)
+    ]
 
     packet = lisp.Ether(dst=dst_mac, src=src_mac)
     packet /= lisp.IP(dst=dst_rloc, src=src_rloc)
@@ -70,10 +71,14 @@ def generate_map_request(eid):
     packet /= lisp.LISP_Encapsulated_Control_Message(ptype=8)
     packet /= lisp.IP(dst=eid, src=src_eid)
     packet /= lisp.UDP(sport=sport2, dport=4342)
-    packet /= lisp.LISP_MapRequest(nonce=rnonce, request_afi=1,
-                                   address=src_eid, ptype=1,
-                                   itr_rloc_records=itr_rloc,
-                                   request_records=record)
+    packet /= lisp.LISP_MapRequest(
+        nonce=rnonce,
+        request_afi=1,
+        address=src_eid,
+        ptype=1,
+        itr_rloc_records=itr_rloc,
+        request_records=record,
+    )
     return packet
 
 
@@ -87,59 +92,102 @@ def generate_map_register(eid, rloc, key_id):
         :return : returns a Scapy Map-Request packet object
     """
     sport1 = random.randint(60000, 65000)
-    rnonce = random.randint(0, 2**63)
-
-    rlocs = [lisp.LISP_Locator_Record(priority=1, weight=1,
-                                      multicast_priority=255,
-                                      multicast_weight=0,
-                                      reserved=0, locator_flags=5,
-                                      locator_afi=1, address=rloc)]
-
-    record = [lisp.LISP_MapRecord(record_ttl=1440, locator_count=1,
-                                  eid_prefix_length=32, action=0,
-                                  authoritative=1, reserved=0,
-                                  map_version_number=0, record_afi=1,
-                                  record_address=eid, locators=rlocs)]
+    rnonce = random.randint(0, 2 ** 63)
+
+    rlocs = [
+        lisp.LISP_Locator_Record(
+            priority=1,
+            weight=1,
+            multicast_priority=255,
+            multicast_weight=0,
+            reserved=0,
+            locator_flags=5,
+            locator_afi=1,
+            address=rloc,
+        )
+    ]
+
+    record = [
+        lisp.LISP_MapRecord(
+            record_ttl=1440,
+            locator_count=1,
+            eid_prefix_length=32,
+            action=0,
+            authoritative=1,
+            reserved=0,
+            map_version_number=0,
+            record_afi=1,
+            record_address=eid,
+            locators=rlocs,
+        )
+    ]
 
     packet = lisp.Ether(dst=dst_mac, src=src_mac)
     packet /= lisp.IP(dst=dst_rloc, src=src_rloc)
     packet /= lisp.UDP(sport=sport1, dport=4342)
-    packet /= lisp.LISP_MapRegister(ptype=3, nonce=rnonce,
-                                    register_flags=10,
-                                    additional_register_flags=1,
-                                    register_count=1,
-                                    key_id=key_id,
-                                    register_records=record,
-                                    xtr_id_low=netaddr.IPAddress(eid))
+    packet /= lisp.LISP_MapRegister(
+        ptype=3,
+        nonce=rnonce,
+        register_flags=10,
+        additional_register_flags=1,
+        register_count=1,
+        key_id=key_id,
+        register_records=record,
+        xtr_id_low=netaddr.IPAddress(eid),
+    )
     return packet
 
 
-parser = argparse.ArgumentParser(description='Create a Map-Request trace file')
-
-parser.add_argument('--dst-mac', default='00:00:00:00:00:00',
-                    help='Map-Request destination MAC address \
-                        (default is 00:00:00:00:00:00)')
-parser.add_argument('--src-mac', default='00:00:00:00:00:00',
-                    help='Map-Request source MAC address \
-                        (default is 00:00:00:00:00:00)')
-parser.add_argument('--dst-rloc', default='127.0.0.1',
-                    help='Send Map-Request to the Map-Server with this RLOC \
-                        (default is 127.0.0.1)')
-parser.add_argument('--src-rloc', default='127.0.0.1',
-                    help='Send Map-Request with this source RLOC \
-                        (default is 127.0.0.1)')
-parser.add_argument('--src-eid', default='192.0.2.1',
-                    help='Send Map-Request with this source EID \
-                        (default is 192.0.2.1)')
-parser.add_argument('--base-eid', default='10.0.0.0',
-                    help='Start incrementing EID from this address \
-                        (default is 10.0.0.0)')
-parser.add_argument('--requests', type=int, default=1,
-                    help='Number of requests to create (default 1)')
-parser.add_argument('--increment', type=int, default=1,
-                    help='Increment EID requests (default 1)')
-parser.add_argument('--random', type=bool, default=False,
-                    help='Create random EID requests (default False)')
+parser = argparse.ArgumentParser(description="Create a Map-Request trace file")
+
+parser.add_argument(
+    "--dst-mac",
+    default="00:00:00:00:00:00",
+    help="Map-Request destination MAC address \
+                        (default is 00:00:00:00:00:00)",
+)
+parser.add_argument(
+    "--src-mac",
+    default="00:00:00:00:00:00",
+    help="Map-Request source MAC address \
+                        (default is 00:00:00:00:00:00)",
+)
+parser.add_argument(
+    "--dst-rloc",
+    default="127.0.0.1",
+    help="Send Map-Request to the Map-Server with this RLOC \
+                        (default is 127.0.0.1)",
+)
+parser.add_argument(
+    "--src-rloc",
+    default="127.0.0.1",
+    help="Send Map-Request with this source RLOC \
+                        (default is 127.0.0.1)",
+)
+parser.add_argument(
+    "--src-eid",
+    default="192.0.2.1",
+    help="Send Map-Request with this source EID \
+                        (default is 192.0.2.1)",
+)
+parser.add_argument(
+    "--base-eid",
+    default="10.0.0.0",
+    help="Start incrementing EID from this address \
+                        (default is 10.0.0.0)",
+)
+parser.add_argument(
+    "--requests", type=int, default=1, help="Number of requests to create (default 1)"
+)
+parser.add_argument(
+    "--increment", type=int, default=1, help="Increment EID requests (default 1)"
+)
+parser.add_argument(
+    "--random",
+    type=bool,
+    default=False,
+    help="Create random EID requests (default False)",
+)
 
 in_args = parser.parse_args()
 dst_mac = in_args.dst_mac
index fb7e7587055ff35b8f0b2cc461522533cf0bdc72..2e05dac5d5d2bd7a3ddec4d80c585bca3c078918 100755 (executable)
@@ -24,54 +24,46 @@ __version__ = "0.0.3"
 
 
 class MappingRPCBlaster(object):
-    putheaders = {'Content-type': 'application/json'}
-    getheaders = {'Accept': 'application/json'}
+    putheaders = {"Content-type": "application/json"}
+    getheaders = {"Accept": "application/json"}
 
-    RPC_URL_LI = 'restconf/operations/lfm-mapping-database:'
-    RPC_URL_BE = 'restconf/operations/odl-mappingservice:'
+    RPC_URL_LI = "restconf/operations/lfm-mapping-database:"
+    RPC_URL_BE = "restconf/operations/odl-mappingservice:"
     TIMEOUT = 10
 
     # Template for adding mappings
     add_mapping_template = {
-        u'input': {
-            u'mapping-record': {
-                u'recordTtl': 60,
-                u'action': u'NoAction',
-                u'authoritative': True,
-                u'eid': {
-                    u'address-type':
-                        u'ietf-lisp-address-types:ipv4-prefix-afi',
-                    u'ipv4-prefix': u'10.0.0.0/32'
+        u"input": {
+            u"mapping-record": {
+                u"recordTtl": 60,
+                u"action": u"NoAction",
+                u"authoritative": True,
+                u"eid": {
+                    u"address-type": u"ietf-lisp-address-types:ipv4-prefix-afi",
+                    u"ipv4-prefix": u"10.0.0.0/32",
                 },
-                u'LocatorRecord': [
+                u"LocatorRecord": [
                     {
-                        u'locator-id': u'ipv4:172.16.0.0',
-                        u'priority': 1,
-                        u'weight': 1,
-                        u'multicastPriority': 255,
-                        u'multicastWeight': 0,
-                        u'localLocator': True,
-                        u'rlocProbed': False,
-                        u'routed': True,
-                        u'rloc': {
-                            u'address-type':
-                                u'ietf-lisp-address-types:ipv4-afi',
-                            u'ipv4': u'172.16.0.0'
-                        }
+                        u"locator-id": u"ipv4:172.16.0.0",
+                        u"priority": 1,
+                        u"weight": 1,
+                        u"multicastPriority": 255,
+                        u"multicastWeight": 0,
+                        u"localLocator": True,
+                        u"rlocProbed": False,
+                        u"routed": True,
+                        u"rloc": {
+                            u"address-type": u"ietf-lisp-address-types:ipv4-afi",
+                            u"ipv4": u"172.16.0.0",
+                        },
                     }
-                ]
+                ],
             }
         }
     }
 
     # Template for getting mappings
-    get_mapping_template = {
-        u'input': {
-            u'eid': {
-                u'ipv4-prefix': u'10.0.0.0'
-            }
-        }
-    }
+    get_mapping_template = {u"input": {u"eid": {u"ipv4-prefix": u"10.0.0.0"}}}
 
     def __init__(self, host, port, start_eid, mask, start_rloc, nmappings, v):
         """
@@ -101,8 +93,7 @@ class MappingRPCBlaster(object):
             print("Using the Beryllium and later RPC URL")
             rpc_url = self.RPC_URL_BE
 
-        self.post_url_template = 'http://' + self.host + ':' \
-            + self.port + '/' + rpc_url
+        self.post_url_template = "http://" + self.host + ":" + self.port + "/" + rpc_url
 
     def mapping_from_tpl(self, eid, mask, rloc):
         """Create an add-mapping RPC input dictionary from the mapping template
@@ -113,14 +104,12 @@ class MappingRPCBlaster(object):
         Returns:
             :return dict: mapping - template modified with the arguments
         """
-        mapping = copy.deepcopy(
-            self.add_mapping_template['input']['mapping-record'])
-        mapping['eid']['ipv4-prefix'] = str(netaddr.IPAddress(eid)) \
-            + '/' + mask
-        mapping['LocatorRecord'][0]['locator-id'] = 'ipv4:' \
-            + str(netaddr.IPAddress(rloc))
-        mapping['LocatorRecord'][0]['rloc']['ipv4'] \
-            = str(netaddr.IPAddress(rloc))
+        mapping = copy.deepcopy(self.add_mapping_template["input"]["mapping-record"])
+        mapping["eid"]["ipv4-prefix"] = str(netaddr.IPAddress(eid)) + "/" + mask
+        mapping["LocatorRecord"][0]["locator-id"] = "ipv4:" + str(
+            netaddr.IPAddress(rloc)
+        )
+        mapping["LocatorRecord"][0]["rloc"]["ipv4"] = str(netaddr.IPAddress(rloc))
         return mapping
 
     def send_rpc(self, session, method, body):
@@ -133,9 +122,14 @@ class MappingRPCBlaster(object):
             :return int: status_code - HTTP status code
         """
         rpc_url = self.post_url_template + method
-        r = session.post(rpc_url, data=body, headers=self.putheaders,
-                         stream=False, auth=('admin', 'admin'),
-                         timeout=self.TIMEOUT)
+        r = session.post(
+            rpc_url,
+            data=body,
+            headers=self.putheaders,
+            stream=False,
+            auth=("admin", "admin"),
+            timeout=self.TIMEOUT,
+        )
         return r.status_code
 
     def add_n_mappings(self):
@@ -143,11 +137,11 @@ class MappingRPCBlaster(object):
         rpc = dict(self.add_mapping_template)
         increment = pow(2, 32 - int(self.mask))
         for i in range(self.nmappings):
-            rpc['input']['mapping-record'] = self.mapping_from_tpl(
-                self.start_eid + i * increment, self.mask,
-                self.start_rloc + i)
+            rpc["input"]["mapping-record"] = self.mapping_from_tpl(
+                self.start_eid + i * increment, self.mask, self.start_rloc + i
+            )
             rpc_json = json.dumps(rpc)
-            self.send_rpc(self.session, 'add-mapping', rpc_json)
+            self.send_rpc(self.session, "add-mapping", rpc_json)
         self.session.close()
 
     def get_n_mappings(self):
@@ -157,48 +151,80 @@ class MappingRPCBlaster(object):
         increment = pow(2, 32 - int(self.mask))
         for i in range(self.nmappings):
             eid = self.start_eid + i * increment
-            rpc['input']['eid']['ipv4-prefix'] = str(netaddr.IPAddress(eid)) \
-                + '/' + self.mask
+            rpc["input"]["eid"]["ipv4-prefix"] = (
+                str(netaddr.IPAddress(eid)) + "/" + self.mask
+            )
             rpc_json = json.dumps(rpc)
-            self.send_rpc(self.session, 'get-mapping', rpc_json)
+            self.send_rpc(self.session, "get-mapping", rpc_json)
         self.session.close()
 
 
 if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description='Add simple IPv4 \
-        prefix-to-IPv4 locator LISP mappings to OpenDaylight')
-
-    parser.add_argument('--mode', default='add',
-                        help='Operating mode, can be "add" or "get" \
-                            (default is "add")')
-    parser.add_argument('--host', default='127.0.0.1',
-                        help='Host where ODL controller is running (default \
-                            is 127.0.0.1)')
-    parser.add_argument('--port', default='8181',
-                        help='Port on which ODL\'s RESTCONF is listening \
-                            (default is 8181)')
-    parser.add_argument('--start-eid', default='10.0.0.0',
-                        help='Start incrementing EID from this address \
-                            (default is 10.0.0.0)')
-    parser.add_argument('--mask', default='32',
-                        help='Network mask for the IPv4 EID prefixes \
-                            (default is 32)')
-    parser.add_argument('--start-rloc', default='172.16.0.0',
-                        help='Start incrementing RLOC from this address \
-                            (default is 172.16.0.0, ignored for "get")')
-    parser.add_argument('--mappings', type=int, default=1,
-                        help='Number of mappings to add/get (default 1)')
-    parser.add_argument('--odl-version', default='Be',
-                        help='OpenDaylight version, can be "Li" or "Be" \
-                            (default is "Be")')
+    parser = argparse.ArgumentParser(
+        description="Add simple IPv4 \
+        prefix-to-IPv4 locator LISP mappings to OpenDaylight"
+    )
+
+    parser.add_argument(
+        "--mode",
+        default="add",
+        help='Operating mode, can be "add" or "get" \
+                            (default is "add")',
+    )
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where ODL controller is running (default \
+                            is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which ODL's RESTCONF is listening \
+                            (default is 8181)",
+    )
+    parser.add_argument(
+        "--start-eid",
+        default="10.0.0.0",
+        help="Start incrementing EID from this address \
+                            (default is 10.0.0.0)",
+    )
+    parser.add_argument(
+        "--mask",
+        default="32",
+        help="Network mask for the IPv4 EID prefixes \
+                            (default is 32)",
+    )
+    parser.add_argument(
+        "--start-rloc",
+        default="172.16.0.0",
+        help='Start incrementing RLOC from this address \
+                            (default is 172.16.0.0, ignored for "get")',
+    )
+    parser.add_argument(
+        "--mappings",
+        type=int,
+        default=1,
+        help="Number of mappings to add/get (default 1)",
+    )
+    parser.add_argument(
+        "--odl-version",
+        default="Be",
+        help='OpenDaylight version, can be "Li" or "Be" \
+                            (default is "Be")',
+    )
 
     in_args = parser.parse_args()
 
-    mapping_rpc_blaster = MappingRPCBlaster(in_args.host, in_args.port,
-                                            in_args.start_eid, in_args.mask,
-                                            in_args.start_rloc,
-                                            in_args.mappings,
-                                            in_args.odl_version)
+    mapping_rpc_blaster = MappingRPCBlaster(
+        in_args.host,
+        in_args.port,
+        in_args.start_eid,
+        in_args.mask,
+        in_args.start_rloc,
+        in_args.mappings,
+        in_args.odl_version,
+    )
 
     if in_args.mode == "add":
         mapping_rpc_blaster.add_n_mappings()
index 25344c24442dd5d1576e6058621c11611703e113..0de6f2d823b1974d0022a51720ac133d513aae9b 100644 (file)
@@ -14,9 +14,12 @@ def addCar(numberOfCars):
     for x in range(1, numberOfCars + 1):
         strId = str(x)
         payload = settings.add_car_payload_template.substitute(
-            id=strId, category="category" + strId, model="model" + strId,
+            id=strId,
+            category="category" + strId,
+            model="model" + strId,
             manufacturer="manufacturer" + strId,
-            year=(2000 + x % 100))
+            year=(2000 + x % 100),
+        )
         print("payload formed after template substitution=")
         print(payload)
         # Send the POST request
@@ -42,16 +45,19 @@ def addPerson(numberOfPersons):
     if numberOfPersons == 0:
         strId = str(numberOfPersons)
         payload = settings.add_person_payload_template.substitute(
-            personId="user" + strId, gender="unknown", age=0,
+            personId="user" + strId,
+            gender="unknown",
+            age=0,
             address=strId + "Way, Some Country, Some Zip  " + strId,
-            contactNo="some number" + strId)
+            contactNo="some number" + strId,
+        )
         # Send the POST request using RESTCONF
         resp = util.nonprintpost(settings.getAddPersonUrl(), "admin", "admin", payload)
         return
 
     genderToggle = "Male"
     for x in range(1, numberOfPersons + 1):
-        if(genderToggle == "Male"):
+        if genderToggle == "Male":
             genderToggle = "Female"
         else:
             genderToggle = "Male"
@@ -59,9 +65,12 @@ def addPerson(numberOfPersons):
         strId = str(x)
 
         payload = settings.add_person_rpc_payload_template.substitute(
-            personId="user" + strId, gender=genderToggle, age=(20 + x % 100),
+            personId="user" + strId,
+            gender=genderToggle,
+            age=(20 + x % 100),
             address=strId + "Way, Some Country, Some Zip  " + str(x % 1000),
-            contactNo="some number" + strId)
+            contactNo="some number" + strId,
+        )
         # Send the POST request using RPC
         resp = util.post(settings.getAddPersonRpcUrl(), "admin", "admin", payload)
 
@@ -88,16 +97,21 @@ def addCarPerson(numberOfCarPersons):
     # FOR RPC TO WORK PROPERLY THE FIRST ENTRY SHOULD BE VIA RESTCONF
     if numberOfCarPersons == 0:
         payload = settings.add_car_person_template.substitute(
-            Id=str(numberOfCarPersons), personId="user" + str(numberOfCarPersons))
+            Id=str(numberOfCarPersons), personId="user" + str(numberOfCarPersons)
+        )
         # Send the POST request REST CONF
-        resp = util.nonprintpost(settings.getAddCarPersonUrl(), "admin", "admin", payload)
+        resp = util.nonprintpost(
+            settings.getAddCarPersonUrl(), "admin", "admin", payload
+        )
 
         return
 
     for x in range(1, numberOfCarPersons + 1):
         strId = str(x)
 
-        payload = settings.add_car_person_template.substitute(Id=strId, personId="user" + strId)
+        payload = settings.add_car_person_template.substitute(
+            Id=strId, personId="user" + strId
+        )
 
         # Send the POST request REST CONF
         resp = util.post(settings.getAddCarPersonUrl(), "admin", "admin", payload)
@@ -125,7 +139,9 @@ def buyCar(numberOfCarBuyers):
     for x in range(1, numberOfCarBuyers + 1):
         strId = str(x)
 
-        payload = settings.buy_car_rpc_template.substitute(personId="user" + strId, carId=strId)
+        payload = settings.buy_car_rpc_template.substitute(
+            personId="user" + strId, carId=strId
+        )
 
         # Send the POST request using RPC
         resp = util.post(settings.getBuyCarRpcUrl(), "admin", "admin", payload)
@@ -191,19 +207,25 @@ def deleteAllPersons(ignore):
 # Usage message shown to user
 #
 
-def options():
-
-    command = 'ac=Add Car\n\t\tap=Add Person \n\t\tbc=Buy Car\n\t\tgc=Get Cars\n\t\tgp=Get Persons\n\t\t' \
-              'gcp=Get Car-Person Mappings\n\t\tdc=Delete All Cars\n\t\tdp=Delete All Persons)'
 
-    param = '\n\t<param> is\n\t\t' \
-            'number of cars to be added if <command>=ac\n\t\t' \
-            'number of persons to be added if <command>=ap\n\t\t' \
-            'number of car buyers if <command>=bc\n\t\t'\
-            'pass 0 if <command>=gc or gp or gcp or dc or dp'\
+def options():
 
-    usageString = 'usage: python crud <ipaddress> <command> <param>\nwhere\n\t<ipaddress> = ODL server ip address' \
-                  '\n\t<command> = any of the following commands \n\t\t'
+    command = (
+        "ac=Add Car\n\t\tap=Add Person \n\t\tbc=Buy Car\n\t\tgc=Get Cars\n\t\tgp=Get Persons\n\t\t"
+        "gcp=Get Car-Person Mappings\n\t\tdc=Delete All Cars\n\t\tdp=Delete All Persons)"
+    )
+
+    param = (
+        "\n\t<param> is\n\t\t"
+        "number of cars to be added if <command>=ac\n\t\t"
+        "number of persons to be added if <command>=ap\n\t\t"
+        "number of car buyers if <command>=bc\n\t\t"
+        "pass 0 if <command>=gc or gp or gcp or dc or dp"
+    )
+    usageString = (
+        "usage: python crud <ipaddress> <command> <param>\nwhere\n\t<ipaddress> = ODL server ip address"
+        "\n\t<command> = any of the following commands \n\t\t"
+    )
 
     usageString = usageString + command + param
 
@@ -214,14 +236,23 @@ def options():
 # entry point for command executions
 #
 
+
 def main():
     if len(sys.argv) < 4:
         options()
         quit(0)
     settings.hostname = sys.argv[1]
-    settings.port = '8080'
-    call = dict(ac=addCar, ap=addPerson, bc=buyCar,
-                gc=getCars, gp=getPersons, gcp=getCarPersonMappings, dc=deleteAllCars, dp=deleteAllPersons)
+    settings.port = "8080"
+    call = dict(
+        ac=addCar,
+        ap=addPerson,
+        bc=buyCar,
+        gc=getCars,
+        gp=getPersons,
+        gcp=getCarPersonMappings,
+        dc=deleteAllCars,
+        dp=deleteAllPersons,
+    )
 
     # FOR RPC TO WORK PROPERLY THE FIRST PERSON SHOULD BE ADDED VIA RESTCONF
     addPerson(0)
index 053cde70e47b83ab2bd7034b260c2e9b14e4807d..c87bcf8a91bacd22de0c6bcfcb9edc4ae9774c14 100644 (file)
@@ -69,7 +69,8 @@ add_car_payload_template = Template(
             }
         ]
     }}
-    """)
+    """
+)
 
 # Template for Person resource payload
 add_person_payload_template = Template(
@@ -85,7 +86,8 @@ add_person_payload_template = Template(
             }
         ]
     }}
-    """)
+    """
+)
 
 # Template for Car Person mapping  payload
 add_car_person_template = Template(
@@ -98,7 +100,8 @@ add_car_person_template = Template(
             }
         ]
     }}
-    """)
+    """
+)
 
 # Template for adding person using RPC
 add_person_rpc_payload_template = Template(
@@ -113,7 +116,8 @@ add_person_rpc_payload_template = Template(
             "people:age":"$age"
         }
     }
-    """)
+    """
+)
 
 # Template for buying car rpc
 buy_car_rpc_template = Template(
@@ -126,4 +130,5 @@ buy_car_rpc_template = Template(
             "car-purchase:car-id" : "$carId"
         }
     }
-    """)
+    """
+)
index e1667dc48d9bee41d239a10ec6a01a161d55c696..2b1751937f368380af4db559db7007364d9f330b 100644 (file)
@@ -10,7 +10,7 @@ __email__ = "syedbahm@cisco.com"
 def get(url, userId, password):
     """Helps in making GET REST calls"""
     headers = {}
-    headers['Accept'] = 'application/xml'
+    headers["Accept"] = "application/xml"
 
     # Send the GET request
     req = requests.get(url, None, headers)
@@ -22,7 +22,7 @@ def get(url, userId, password):
 def nonprintpost(url, userId, password, data):
     """Helps in making POST REST calls without outputs"""
     headers = {}
-    headers['Content-Type'] = 'application/json'
+    headers["Content-Type"] = "application/json"
     # headers['Accept']= 'application/xml'
 
     resp = requests.post(url, data.encode(), headers=headers)
@@ -35,7 +35,7 @@ def post(url, userId, password, data):
     print("post request with url " + url)
     print("post request with data " + data)
     headers = {}
-    headers['Content-Type'] = 'application/json'
+    headers["Content-Type"] = "application/json"
     # headers['Accept']= 'application/xml'
 
     resp = requests.post(url, data.encode(), headers=headers)
index 188f6437c57822d58f8c40e453f1f5edb9f96c21..13a84912d5e7818b91c5bff24863cde15c3bac17 100755 (executable)
@@ -10,13 +10,13 @@ __license__ = "New-style BSD"
 __email__ = "jmedved@cisco.com"
 
 
-getheaders = {'Accept': 'application/json'}
+getheaders = {"Accept": "application/json"}
 
 
 def cleanup_config_fl(host, port):
     global getheaders
 
-    url = 'http://' + host + ":" + port + '/wm/staticflowentrypusher/clear/all/json'
+    url = "http://" + host + ":" + port + "/wm/staticflowentrypusher/clear/all/json"
     r = requests.get(url, headers=getheaders)
     return r.status_code
 
@@ -24,42 +24,59 @@ def cleanup_config_fl(host, port):
 def cleanup_config_odl(host, port, auth):
     global getheaders
 
-    url = 'http://' + host + ":" + port + '/restconf/config/opendaylight-inventory:nodes'
+    url = (
+        "http://" + host + ":" + port + "/restconf/config/opendaylight-inventory:nodes"
+    )
 
     if not auth:
         r = requests.delete(url, headers=getheaders)
     else:
-        r = requests.delete(url, headers=getheaders, auth=('admin', 'admin'))
+        r = requests.delete(url, headers=getheaders, auth=("admin", "admin"))
 
     return r.status_code
 
 
 if __name__ == "__main__":
 
-    parser = argparse.ArgumentParser(description='Cleans up the config space')
-    parser.add_argument('--host', default='127.0.0.1', help='host where '
-                        'odl controller is running (default is 127.0.0.1)')
-    parser.add_argument('--port', default='8181', help='port on '
-                        'which odl\'s RESTCONF is listening (default is 8181)')
-    parser.add_argument('--auth', dest='auth', action='store_true', default=False,
-                        help="Use authenticated access to REST "
-                        "(username: 'admin', password: 'admin').")
-    parser.add_argument('--controller', choices=['odl', 'floodlight'], default='odl',
-                        help='Controller type (ODL or Floodlight); default odl (OpenDaylight)')
+    parser = argparse.ArgumentParser(description="Cleans up the config space")
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="host where " "odl controller is running (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="port on " "which odl's RESTCONF is listening (default is 8181)",
+    )
+    parser.add_argument(
+        "--auth",
+        dest="auth",
+        action="store_true",
+        default=False,
+        help="Use authenticated access to REST "
+        "(username: 'admin', password: 'admin').",
+    )
+    parser.add_argument(
+        "--controller",
+        choices=["odl", "floodlight"],
+        default="odl",
+        help="Controller type (ODL or Floodlight); default odl (OpenDaylight)",
+    )
 
     in_args = parser.parse_args()
 
-    if in_args.controller == 'odl':
+    if in_args.controller == "odl":
         sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
         exp = 200
-    elif in_args.controller == 'floodlight':
+    elif in_args.controller == "floodlight":
         sts = cleanup_config_fl(in_args.host, in_args.port)
         exp = 204
     else:
-        print('Unknown controller type')
+        print("Unknown controller type")
         sys.exit(-1)
 
     if sts != exp:
-        print('Failed to delete nodes in the config space, code %d' % sts)
+        print("Failed to delete nodes in the config space, code %d" % sts)
     else:
-        print('Nodes in config space deleted.')
+        print("Nodes in config space deleted.")
index 154a89f3ed410a67a6c9bff223b6a7c3d4080d26..8ba19c144a55adecd8d70bdd4ede8c556ca95a2a 100644 (file)
@@ -8,27 +8,29 @@ text_file.close()
 rate = []
 time = []
 
-pat_rate = re.compile(r'Avg. requests/s: (?P<rate1>[0-9,\.]+) OK, (?P<rate2>[0-9,\.]+) Total')
-pat_time = re.compile(r'Stats collected in (?P<time1>[0-9,\.]+) seconds')
+pat_rate = re.compile(
+    r"Avg. requests/s: (?P<rate1>[0-9,\.]+) OK, (?P<rate2>[0-9,\.]+) Total"
+)
+pat_time = re.compile(r"Stats collected in (?P<time1>[0-9,\.]+) seconds")
 
 for line in log.splitlines():
     res = pat_rate.search(line)
     if res is not None:
-        rate.append(res.groups('rate1')[0])
+        rate.append(res.groups("rate1")[0])
 print(rate)
 
 for line in log.splitlines():
     res = pat_time.search(line)
     if res is not None:
-        time.append(res.groups('time1')[0])
+        time.append(res.groups("time1")[0])
 print(time)
 
 text_file = open("rates.csv", "w")
-text_file.write('Add,Delete\n')
-text_file.write('{0},{1}\n'.format(rate[0], rate[1]))
+text_file.write("Add,Delete\n")
+text_file.write("{0},{1}\n".format(rate[0], rate[1]))
 text_file.close()
 
 text_file = open("times.csv", "w")
-text_file.write('Add,Delete\n')
-text_file.write('{0},{1}\n'.format(time[0], time[1]))
+text_file.write("Add,Delete\n")
+text_file.write("{0},{1}\n".format(time[0], time[1]))
 text_file.close()
index 57bb31ddaff9ae3cefe8eacb326beb98f00a51a8..0f319e442b00340d75067d529e760f6dfe0fe47e 100755 (executable)
@@ -43,21 +43,23 @@ def wait_for_stats(crawler, exp_found, timeout, delay):
     :return: None
     """
     total_delay = 0
-    print('Waiting for stats to catch up:')
+    print("Waiting for stats to catch up:")
 
     with Timer() as t:
         while True:
             crawler.crawl_inventory()
-            print('   %d, %d' % (crawler.reported_flows, crawler.found_flows))
+            print("   %d, %d" % (crawler.reported_flows, crawler.found_flows))
             if crawler.found_flows == exp_found or total_delay > timeout:
                 break
             total_delay += delay
             time.sleep(delay)
 
     if total_delay < timeout:
-        print('Stats collected in %d seconds.' % t.secs)
+        print("Stats collected in %d seconds." % t.secs)
     else:
-        print('Stats collection did not finish in %d seconds. Aborting...' % total_delay)
+        print(
+            "Stats collection did not finish in %d seconds. Aborting..." % total_delay
+        )
 
 
 if __name__ == "__main__":
@@ -76,71 +78,131 @@ if __name__ == "__main__":
     #     flow_config_blaster 'delete' method)
     ############################################################################
 
-    parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
-                                                 'into the config tree, as specified by optional parameters.')
-
-    parser.add_argument('--host', default='127.0.0.1',
-                        help='Host where odl controller is running (default is 127.0.0.1)')
-    parser.add_argument('--port', default='8181',
-                        help='Port on which odl\'s RESTCONF is listening (default is 8181)')
-    parser.add_argument('--cycles', type=int, default=1,
-                        help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are '
-                             'performed in cycles. <THREADS> worker threads are started in each cycle and the cycle '
-                             'ends when all threads finish. Another cycle is started when the previous cycle finished.')
-    parser.add_argument('--threads', type=int, default=1,
-                        help='Number of request worker threads to start in each cycle; default=1. '
-                             'Each thread will add/delete <FLOWS> flows.')
-    parser.add_argument('--flows', type=int, default=10,
-                        help='Number of flows that will be added/deleted by each worker thread in each cycle; '
-                             'default 10')
-    parser.add_argument('--fpr', type=int, default=1,
-                        help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
-                             'default 1')
-    parser.add_argument('--delay', type=int, default=2,
-                        help='Time (seconds) to between inventory polls when waiting for stats to catch up; default=1')
-    parser.add_argument('--timeout', type=int, default=100,
-                        help='The maximum time (seconds) to wait between the add and delete cycles; default=100')
-    parser.add_argument('--delete', dest='delete', action='store_true', default=True,
-                        help='Delete all added flows one by one, benchmark delete '
-                             'performance.')
-    parser.add_argument('--bulk-delete', dest='bulk_delete', action='store_true', default=False,
-                        help='Delete all flows in bulk; default=False')
-    parser.add_argument('--auth', dest='auth', action='store_true',
-                        help="Use authenticated access to REST (username: 'admin', password: 'admin'); default=False")
-    parser.add_argument('--startflow', type=int, default=0,
-                        help='The starting Flow ID; default=0')
-    parser.add_argument('--file', default='',
-                        help='File from which to read the JSON flow template; default: no file, use a built in '
-                             'template.')
+    parser = argparse.ArgumentParser(
+        description="Flow programming performance test: First adds and then deletes flows "
+        "into the config tree, as specified by optional parameters."
+    )
+
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where odl controller is running (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which odl's RESTCONF is listening (default is 8181)",
+    )
+    parser.add_argument(
+        "--cycles",
+        type=int,
+        default=1,
+        help="Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are "
+        "performed in cycles. <THREADS> worker threads are started in each cycle and the cycle "
+        "ends when all threads finish. Another cycle is started when the previous cycle finished.",
+    )
+    parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in each cycle; default=1. "
+        "Each thread will add/delete <FLOWS> flows.",
+    )
+    parser.add_argument(
+        "--flows",
+        type=int,
+        default=10,
+        help="Number of flows that will be added/deleted by each worker thread in each cycle; "
+        "default 10",
+    )
+    parser.add_argument(
+        "--fpr",
+        type=int,
+        default=1,
+        help="Flows-per-Request - number of flows (batch size) sent in each HTTP request; "
+        "default 1",
+    )
+    parser.add_argument(
+        "--delay",
+        type=int,
+        default=2,
+        help="Time (seconds) to between inventory polls when waiting for stats to catch up; default=1",
+    )
+    parser.add_argument(
+        "--timeout",
+        type=int,
+        default=100,
+        help="The maximum time (seconds) to wait between the add and delete cycles; default=100",
+    )
+    parser.add_argument(
+        "--delete",
+        dest="delete",
+        action="store_true",
+        default=True,
+        help="Delete all added flows one by one, benchmark delete " "performance.",
+    )
+    parser.add_argument(
+        "--bulk-delete",
+        dest="bulk_delete",
+        action="store_true",
+        default=False,
+        help="Delete all flows in bulk; default=False",
+    )
+    parser.add_argument(
+        "--auth",
+        dest="auth",
+        action="store_true",
+        help="Use authenticated access to REST (username: 'admin', password: 'admin'); default=False",
+    )
+    parser.add_argument(
+        "--startflow", type=int, default=0, help="The starting Flow ID; default=0"
+    )
+    parser.add_argument(
+        "--file",
+        default="",
+        help="File from which to read the JSON flow template; default: no file, use a built in "
+        "template.",
+    )
 
     in_args = parser.parse_args()
 
     # Initialize
-    if in_args.file != '':
+    if in_args.file != "":
         flow_template = get_json_from_file(in_args.file)
     else:
         flow_template = None
 
-    ic = InventoryCrawler(in_args.host, in_args.port, 0, 'operational', in_args.auth, False)
-
-    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr,
-                            16, in_args.flows, in_args.startflow, in_args.auth)
+    ic = InventoryCrawler(
+        in_args.host, in_args.port, 0, "operational", in_args.auth, False
+    )
+
+    fct = FlowConfigBlaster(
+        in_args.host,
+        in_args.port,
+        in_args.cycles,
+        in_args.threads,
+        in_args.fpr,
+        16,
+        in_args.flows,
+        in_args.startflow,
+        in_args.auth,
+    )
     # Get the baseline stats. Required in Step 3 to validate if the delete
     # function gets the controller back to the baseline
     ic.crawl_inventory()
     reported = ic.reported_flows
     found = ic.found_flows
 
-    print('Baseline:')
-    print('   Reported flows: %d' % reported)
-    print('   Found flows:    %d' % found)
+    print("Baseline:")
+    print("   Reported flows: %d" % reported)
+    print("   Found flows:    %d" % found)
 
     # Run through <CYCLES> add cycles, where <THREADS> threads are started in
     # each cycle and <FLOWS> flows are added from each thread
     fct.add_blaster()
 
-    print('\n*** Total flows added: %d' % fct.get_ok_flows())
-    print('    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts())
+    print("\n*** Total flows added: %d" % fct.get_ok_flows())
+    print("    HTTP[OK] results:  %d\n" % fct.get_ok_rqsts())
 
     # Wait for stats to catch up
     wait_for_stats(ic, found + fct.get_ok_flows(), in_args.timeout, in_args.delay)
@@ -149,17 +211,17 @@ if __name__ == "__main__":
     # in each cycle and <FLOWS> flows previously added in an add cycle are
     # deleted in each thread
     if in_args.bulk_delete:
-        print('\nDeleting all flows in bulk:')
+        print("\nDeleting all flows in bulk:")
         sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
         if sts != 200:
-            print('   Failed to delete flows, code %d' % sts)
+            print("   Failed to delete flows, code %d" % sts)
         else:
-            print('   All flows deleted.')
+            print("   All flows deleted.")
     else:
-        print('\nDeleting flows one by one\n   ',)
+        print("\nDeleting flows one by one\n   ")
         fct.delete_blaster()
-        print('\n*** Total flows deleted: %d' % fct.get_ok_flows())
-        print('    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts())
+        print("\n*** Total flows deleted: %d" % fct.get_ok_flows())
+        print("    HTTP[OK] results:    %d\n" % fct.get_ok_rqsts())
 
     # Wait for stats to catch up back to baseline
     wait_for_stats(ic, found, in_args.timeout, in_args.delay)
index a07748150cbe768f743b9e537069725ef9f71f87..62844f7ee62d820d5627a2cf49225f3a25686875 100755 (executable)
@@ -50,53 +50,46 @@ class Timer(object):
 
 
 class FlowConfigBlaster(object):
-    putheaders = {'content-type': 'application/json'}
-    getheaders = {'Accept': 'application/json'}
+    putheaders = {"content-type": "application/json"}
+    getheaders = {"Accept": "application/json"}
 
-    FLWURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+    FLWURL = (
+        "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+    )
     TBLURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0"
-    INVURL = 'restconf/operational/opendaylight-inventory:nodes'
+    INVURL = "restconf/operational/opendaylight-inventory:nodes"
     TIMEOUT = 10
 
     flows = {}
 
     # The "built-in" flow template
     flow_mode_template = {
-        u'flow': [
+        u"flow": [
             {
-                u'hard-timeout': 65000,
-                u'idle-timeout': 65000,
-                u'cookie_mask': 4294967295,
-                u'flow-name': u'FLOW-NAME-TEMPLATE',
-                u'priority': 2,
-                u'strict': False,
-                u'cookie': 0,
-                u'table_id': 0,
-                u'installHw': False,
-                u'id': u'FLOW-ID-TEMPLATE',
-                u'match': {
-                    u'ipv4-destination': u'0.0.0.0/32',
-                    u'ethernet-match': {
-                        u'ethernet-type': {
-                            u'type': 2048
-                        }
-                    }
+                u"hard-timeout": 65000,
+                u"idle-timeout": 65000,
+                u"cookie_mask": 4294967295,
+                u"flow-name": u"FLOW-NAME-TEMPLATE",
+                u"priority": 2,
+                u"strict": False,
+                u"cookie": 0,
+                u"table_id": 0,
+                u"installHw": False,
+                u"id": u"FLOW-ID-TEMPLATE",
+                u"match": {
+                    u"ipv4-destination": u"0.0.0.0/32",
+                    u"ethernet-match": {u"ethernet-type": {u"type": 2048}},
                 },
-                u'instructions': {
-                    u'instruction': [
+                u"instructions": {
+                    u"instruction": [
                         {
-                            u'order': 0,
-                            u'apply-actions': {
-                                u'action': [
-                                    {
-                                        u'drop-action': {},
-                                        u'order': 0
-                                    }
-                                ]
-                            }
+                            u"order": 0,
+                            u"apply-actions": {
+                                u"action": [{u"drop-action": {}, u"order": 0}]
+                            },
                         }
                     ]
-                }
+                },
             }
         ]
     }
@@ -177,7 +170,19 @@ class FlowConfigBlaster(object):
         def get_total_flows(self):
             return self.total_flows.value
 
-    def __init__(self, host, port, ncycles, nthreads, fpr, nnodes, nflows, startflow, auth, flow_mod_template=None):
+    def __init__(
+        self,
+        host,
+        port,
+        ncycles,
+        nthreads,
+        fpr,
+        nnodes,
+        nflows,
+        startflow,
+        auth,
+        flow_mod_template=None,
+    ):
         self.host = host
         self.port = port
         self.ncycles = ncycles
@@ -191,14 +196,14 @@ class FlowConfigBlaster(object):
         if flow_mod_template:
             self.flow_mode_template = flow_mod_template
 
-        self.post_url_template = 'http://%s:' + self.port + '/' + self.TBLURL
-        self.del_url_template = 'http://%s:' + self.port + '/' + self.FLWURL
+        self.post_url_template = "http://%s:" + self.port + "/" + self.TBLURL
+        self.del_url_template = "http://%s:" + self.port + "/" + self.FLWURL
 
         self.stats = self.FcbStats()
         self.total_ok_flows = 0
         self.total_ok_rqsts = 0
 
-        self.ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')) + startflow)
+        self.ip_addr = Counter(int(netaddr.IPAddress("10.0.0.1")) + startflow)
 
         self.print_lock = threading.Lock()
         self.cond = threading.Condition()
@@ -217,21 +222,31 @@ class FlowConfigBlaster(object):
         """
         hosts = self.host.split(",")
         host = hosts[0]
-        inventory_url = 'http://' + host + ":" + self.port + '/' + self.INVURL
+        inventory_url = "http://" + host + ":" + self.port + "/" + self.INVURL
         nodes = self.nnodes
 
         if not self.auth:
-            r = session.get(inventory_url, headers=self.getheaders, stream=False, timeout=self.TIMEOUT)
+            r = session.get(
+                inventory_url,
+                headers=self.getheaders,
+                stream=False,
+                timeout=self.TIMEOUT,
+            )
         else:
-            r = session.get(inventory_url, headers=self.getheaders, stream=False, auth=('admin', 'admin'),
-                            timeout=self.TIMEOUT)
+            r = session.get(
+                inventory_url,
+                headers=self.getheaders,
+                stream=False,
+                auth=("admin", "admin"),
+                timeout=self.TIMEOUT,
+            )
 
         if r.status_code == 200:
             try:
-                inv = json.loads(r.content)['nodes']['node']
+                inv = json.loads(r.content)["nodes"]["node"]
                 nn = 0
                 for n in range(len(inv)):
-                    if re.search('openflow', inv[n]['id']) is not None:
+                    if re.search("openflow", inv[n]["id"]) is not None:
                         nn += 1
                 if nn != 0:
                     nodes = nn
@@ -255,11 +270,11 @@ class FlowConfigBlaster(object):
         Returns: The flow that gas been created from the template
 
         """
-        flow = copy.deepcopy(self.flow_mode_template['flow'][0])
-        flow['cookie'] = flow_id
-        flow['flow-name'] = self.create_flow_name(flow_id)
-        flow['id'] = str(flow_id)
-        flow['match']['ipv4-destination'] = '%s/32' % str(netaddr.IPAddress(ipaddr))
+        flow = copy.deepcopy(self.flow_mode_template["flow"][0])
+        flow["cookie"] = flow_id
+        flow["flow-name"] = self.create_flow_name(flow_id)
+        flow["id"] = str(flow_id)
+        flow["match"]["ipv4-destination"] = "%s/32" % str(netaddr.IPAddress(ipaddr))
         return flow
 
     def post_flows(self, session, node, flow_list, flow_count):
@@ -279,10 +294,22 @@ class FlowConfigBlaster(object):
         flow_url = self.assemble_post_url(host, node)
 
         if not self.auth:
-            r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, timeout=self.TIMEOUT)
+            r = session.post(
+                flow_url,
+                data=flow_data,
+                headers=self.putheaders,
+                stream=False,
+                timeout=self.TIMEOUT,
+            )
         else:
-            r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False, auth=('admin', 'admin'),
-                             timeout=self.TIMEOUT)
+            r = session.post(
+                flow_url,
+                data=flow_data,
+                headers=self.putheaders,
+                stream=False,
+                auth=("admin", "admin"),
+                timeout=self.TIMEOUT,
+            )
 
         return r.status_code
 
@@ -303,7 +330,7 @@ class FlowConfigBlaster(object):
         :return: string containing plain json
         """
         fmod = dict(self.flow_mode_template)
-        fmod['flow'] = flow_list
+        fmod["flow"] = flow_list
         flow_data = json.dumps(fmod)
         return flow_data
 
@@ -328,7 +355,10 @@ class FlowConfigBlaster(object):
         n_nodes = self.get_num_nodes(s)
 
         with self.print_lock:
-            print('    Thread %d:\n        Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes))
+            print(
+                "    Thread %d:\n        Adding %d flows on %d nodes"
+                % (tid, self.nflows, n_nodes)
+            )
 
         nflows = 0
         nb_actions = []
@@ -336,9 +366,18 @@ class FlowConfigBlaster(object):
             node_id = randrange(1, n_nodes + 1)
             flow_list = []
             for i in range(self.fpr):
-                flow_id = tid * (self.ncycles * self.nflows) + nflows + start_flow_id + self.startflow
+                flow_id = (
+                    tid * (self.ncycles * self.nflows)
+                    + nflows
+                    + start_flow_id
+                    + self.startflow
+                )
                 self.flows[tid][flow_id] = node_id
-                flow_list.append(self.create_flow_from_template(flow_id, self.ip_addr.increment(), node_id))
+                flow_list.append(
+                    self.create_flow_from_template(
+                        flow_id, self.ip_addr.increment(), node_id
+                    )
+                )
                 nflows += 1
                 if nflows >= self.nflows:
                     break
@@ -354,15 +393,17 @@ class FlowConfigBlaster(object):
                     rqst_stats[sts] = 1
                     flow_stats[sts] = len(nb_action[2])
 
-        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, flow_stats, t.secs)
+        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(
+            rqst_stats, flow_stats, t.secs
+        )
 
         with self.print_lock:
-            print('\n    Thread %d results (ADD): ' % tid)
-            print('        Elapsed time: %.2fs,' % t.secs)
-            print('        Requests/s: %.2f OK, %.2f Total' % (ok_rps, total_rps))
-            print('        Flows/s:    %.2f OK, %.2f Total' % (ok_fps, total_fps))
-            print('        Stats ({Requests}, {Flows}): ')
-            print(rqst_stats,)
+            print("\n    Thread %d results (ADD): " % tid)
+            print("        Elapsed time: %.2fs," % t.secs)
+            print("        Requests/s: %.2f OK, %.2f Total" % (ok_rps, total_rps))
+            print("        Flows/s:    %.2f OK, %.2f Total" % (ok_fps, total_fps))
+            print("        Stats ({Requests}, {Flows}): ")
+            print(rqst_stats)
             print(flow_stats)
             self.threads_done += 1
 
@@ -391,7 +432,12 @@ class FlowConfigBlaster(object):
         if not self.auth:
             r = session.delete(flow_url, headers=self.getheaders, timeout=self.TIMEOUT)
         else:
-            r = session.delete(flow_url, headers=self.getheaders, auth=('admin', 'admin'), timeout=self.TIMEOUT)
+            r = session.delete(
+                flow_url,
+                headers=self.getheaders,
+                auth=("admin", "admin"),
+                timeout=self.TIMEOUT,
+            )
 
         return r.status_code
 
@@ -412,25 +458,34 @@ class FlowConfigBlaster(object):
         n_nodes = self.get_num_nodes(s)
 
         with self.print_lock:
-            print('Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes))
+            print(
+                "Thread %d: Deleting %d flows on %d nodes" % (tid, self.nflows, n_nodes)
+            )
 
         with Timer() as t:
             for flow in range(self.nflows):
-                flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
+                flow_id = (
+                    tid * (self.ncycles * self.nflows)
+                    + flow
+                    + start_flow
+                    + self.startflow
+                )
                 sts = self.delete_flow(s, self.flows[tid][flow_id], flow_id, flow)
                 try:
                     rqst_stats[sts] += 1
                 except KeyError:
                     rqst_stats[sts] = 1
 
-        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(rqst_stats, rqst_stats, t.secs)
+        ok_rps, total_rps, ok_fps, total_fps = self.stats.process_stats(
+            rqst_stats, rqst_stats, t.secs
+        )
 
         with self.print_lock:
-            print('\n    Thread %d results (DELETE): ' % tid)
-            print('        Elapsed time: %.2fs,' % t.secs)
-            print('        Requests/s:  %.2f OK,  %.2f Total' % (ok_rps, total_rps))
-            print('        Flows/s:     %.2f OK,  %.2f Total' % (ok_fps, total_fps))
-            print('        Stats ({Requests})',)
+            print("\n    Thread %d results (DELETE): " % tid)
+            print("        Elapsed time: %.2fs," % t.secs)
+            print("        Requests/s:  %.2f OK,  %.2f Total" % (ok_rps, total_rps))
+            print("        Flows/s:     %.2f OK,  %.2f Total" % (ok_fps, total_fps))
+            print("        Stats ({Requests})")
             print(rqst_stats)
             self.threads_done += 1
 
@@ -454,7 +509,7 @@ class FlowConfigBlaster(object):
         for c in range(self.ncycles):
             self.stats = self.FcbStats()
             with self.print_lock:
-                print('\nCycle %d:' % c)
+                print("\nCycle %d:" % c)
 
             threads = []
             for i in range(self.nthreads):
@@ -468,20 +523,34 @@ class FlowConfigBlaster(object):
                     thread.join()
 
             with self.print_lock:
-                print('\n*** Test summary:')
-                print('    Elapsed time:    %.2fs' % t.secs)
-                print('    Peak requests/s: %.2f OK, %.2f Total' % (
-                    self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate()))
-                print('    Peak flows/s:    %.2f OK, %.2f Total' % (
-                    self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate()))
-                print('    Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)' % (
-                    self.stats.get_ok_rqsts() / t.secs,
-                    self.stats.get_total_rqsts() / t.secs,
-                    (self.stats.get_total_rqsts() / t.secs * 100) / self.stats.get_total_rqst_rate()))
-                print('    Avg. flows/s:    %.2f OK, %.2f Total (%.2f%% of peak total)' % (
-                      self.stats.get_ok_flows() / t.secs,
-                      self.stats.get_total_flows() / t.secs,
-                      (self.stats.get_total_flows() / t.secs * 100) / self.stats.get_total_flow_rate()))
+                print("\n*** Test summary:")
+                print("    Elapsed time:    %.2fs" % t.secs)
+                print(
+                    "    Peak requests/s: %.2f OK, %.2f Total"
+                    % (self.stats.get_ok_rqst_rate(), self.stats.get_total_rqst_rate())
+                )
+                print(
+                    "    Peak flows/s:    %.2f OK, %.2f Total"
+                    % (self.stats.get_ok_flow_rate(), self.stats.get_total_flow_rate())
+                )
+                print(
+                    "    Avg. requests/s: %.2f OK, %.2f Total (%.2f%% of peak total)"
+                    % (
+                        self.stats.get_ok_rqsts() / t.secs,
+                        self.stats.get_total_rqsts() / t.secs,
+                        (self.stats.get_total_rqsts() / t.secs * 100)
+                        / self.stats.get_total_rqst_rate(),
+                    )
+                )
+                print(
+                    "    Avg. flows/s:    %.2f OK, %.2f Total (%.2f%% of peak total)"
+                    % (
+                        self.stats.get_ok_flows() / t.secs,
+                        self.stats.get_total_flows() / t.secs,
+                        (self.stats.get_total_flows() / t.secs * 100)
+                        / self.stats.get_total_flow_rate(),
+                    )
+                )
 
                 self.total_ok_flows += self.stats.get_ok_flows()
                 self.total_ok_rqsts += self.stats.get_ok_rqsts()
@@ -500,7 +569,7 @@ class FlowConfigBlaster(object):
         return self.total_ok_rqsts
 
     def create_flow_name(self, flow_id):
-        return 'TestFlow-%d' % flow_id
+        return "TestFlow-%d" % flow_id
 
 
 def get_json_from_file(filename):
@@ -509,16 +578,21 @@ def get_json_from_file(filename):
     :param filename: File from which to get the template
     :return: The json flow template (string)
     """
-    with open(filename, 'r') as f:
+    with open(filename, "r") as f:
         try:
             ft = json.load(f)
-            keys = ft['flow'][0].keys()
-            if (u'cookie' in keys) and (u'flow-name' in keys) and (u'id' in keys) and (u'match' in keys):
-                if u'ipv4-destination' in ft[u'flow'][0]['match'].keys():
+            keys = ft["flow"][0].keys()
+            if (
+                (u"cookie" in keys)
+                and (u"flow-name" in keys)
+                and (u"id" in keys)
+                and (u"match" in keys)
+            ):
+                if u"ipv4-destination" in ft[u"flow"][0]["match"].keys():
                     print('File "%s" ok to use as flow template' % filename)
                     return ft
         except ValueError:
-            print('JSON parsing of file %s failed' % filename)
+            print("JSON parsing of file %s failed" % filename)
             pass
 
     return None
@@ -531,7 +605,7 @@ def get_json_from_file(filename):
 # also beneficial to have unique "cookie" and "flow-name" attributes for easier
 # identification of the flow.
 ###############################################################################
-example_flow_mod_json = '''{
+example_flow_mod_json = """{
     "flow": [
         {
             "id": "38",
@@ -570,7 +644,7 @@ example_flow_mod_json = '''{
         }
 
     ]
-}'''
+}"""
 
 
 def create_arguments_parser():
@@ -578,47 +652,96 @@ def create_arguments_parser():
     Shorthand to arg parser on library level in order to access and eventually enhance in ancestors.
     :return: argument parser supporting config blaster arguments and parameters
     """
-    my_parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then'
-                                                    ' deletes flows into the config tree, as specified by'
-                                                    ' optional parameters.')
-
-    my_parser.add_argument('--host', default='127.0.0.1',
-                           help='Host where odl controller is running (default is 127.0.0.1).  '
-                                'Specify a comma-separated list of hosts to perform round-robin load-balancing.')
-    my_parser.add_argument('--port', default='8181',
-                           help='Port on which odl\'s RESTCONF is listening (default is 8181)')
-    my_parser.add_argument('--cycles', type=int, default=1,
-                           help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are '
-                                'performed in cycles. <THREADS> worker threads are started in each cycle and the cycle '
-                                'ends when all threads finish. Another cycle is started when the previous cycle '
-                                'finished.')
-    my_parser.add_argument('--threads', type=int, default=1,
-                           help='Number of request worker threads to start in each cycle; default=1. '
-                                'Each thread will add/delete <FLOWS> flows.')
-    my_parser.add_argument('--flows', type=int, default=10,
-                           help='Number of flows that will be added/deleted by each worker thread in each cycle; '
-                                'default 10')
-    my_parser.add_argument('--fpr', type=int, default=1,
-                           help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
-                                'default 1')
-    my_parser.add_argument('--nodes', type=int, default=16,
-                           help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
-                                'flows will be evenly distributed (programmed) into connected nodes.')
-    my_parser.add_argument('--delay', type=int, default=0,
-                           help='Time (in seconds) to wait between the add and delete cycles; default=0')
-    my_parser.add_argument('--delete', dest='delete', action='store_true', default=True,
-                           help='Delete all added flows one by one, benchmark delete '
-                                'performance.')
-    my_parser.add_argument('--no-delete', dest='delete', action='store_false',
-                           help='Do not perform the delete cycle.')
-    my_parser.add_argument('--auth', dest='auth', action='store_true', default=False,
-                           help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
-                                'default: no authentication')
-    my_parser.add_argument('--startflow', type=int, default=0,
-                           help='The starting Flow ID; default=0')
-    my_parser.add_argument('--file', default='',
-                           help='File from which to read the JSON flow template; default: no file, use a built in '
-                                'template.')
+    my_parser = argparse.ArgumentParser(
+        description="Flow programming performance test: First adds and then"
+        " deletes flows into the config tree, as specified by"
+        " optional parameters."
+    )
+
+    my_parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where odl controller is running (default is 127.0.0.1).  "
+        "Specify a comma-separated list of hosts to perform round-robin load-balancing.",
+    )
+    my_parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which odl's RESTCONF is listening (default is 8181)",
+    )
+    my_parser.add_argument(
+        "--cycles",
+        type=int,
+        default=1,
+        help="Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are "
+        "performed in cycles. <THREADS> worker threads are started in each cycle and the cycle "
+        "ends when all threads finish. Another cycle is started when the previous cycle "
+        "finished.",
+    )
+    my_parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in each cycle; default=1. "
+        "Each thread will add/delete <FLOWS> flows.",
+    )
+    my_parser.add_argument(
+        "--flows",
+        type=int,
+        default=10,
+        help="Number of flows that will be added/deleted by each worker thread in each cycle; "
+        "default 10",
+    )
+    my_parser.add_argument(
+        "--fpr",
+        type=int,
+        default=1,
+        help="Flows-per-Request - number of flows (batch size) sent in each HTTP request; "
+        "default 1",
+    )
+    my_parser.add_argument(
+        "--nodes",
+        type=int,
+        default=16,
+        help="Number of nodes if mininet is not connected; default=16. If mininet is connected, "
+        "flows will be evenly distributed (programmed) into connected nodes.",
+    )
+    my_parser.add_argument(
+        "--delay",
+        type=int,
+        default=0,
+        help="Time (in seconds) to wait between the add and delete cycles; default=0",
+    )
+    my_parser.add_argument(
+        "--delete",
+        dest="delete",
+        action="store_true",
+        default=True,
+        help="Delete all added flows one by one, benchmark delete " "performance.",
+    )
+    my_parser.add_argument(
+        "--no-delete",
+        dest="delete",
+        action="store_false",
+        help="Do not perform the delete cycle.",
+    )
+    my_parser.add_argument(
+        "--auth",
+        dest="auth",
+        action="store_true",
+        default=False,
+        help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
+        "default: no authentication",
+    )
+    my_parser.add_argument(
+        "--startflow", type=int, default=0, help="The starting Flow ID; default=0"
+    )
+    my_parser.add_argument(
+        "--file",
+        default="",
+        help="File from which to read the JSON flow template; default: no file, use a built in "
+        "template.",
+    )
     return my_parser
 
 
@@ -633,28 +756,39 @@ if __name__ == "__main__":
     parser = create_arguments_parser()
     in_args = parser.parse_args()
 
-    if in_args.file != '':
+    if in_args.file != "":
         flow_template = get_json_from_file(in_args.file)
     else:
         flow_template = None
 
-    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr, in_args.nodes,
-                            in_args.flows, in_args.startflow, in_args.auth)
+    fct = FlowConfigBlaster(
+        in_args.host,
+        in_args.port,
+        in_args.cycles,
+        in_args.threads,
+        in_args.fpr,
+        in_args.nodes,
+        in_args.flows,
+        in_args.startflow,
+        in_args.auth,
+    )
 
     # Run through <cycles>, where <threads> are started in each cycle and
     # <flows> are added from each thread
     fct.add_blaster()
 
-    print('\n*** Total flows added: %s' % fct.get_ok_flows())
-    print('    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts())
+    print("\n*** Total flows added: %s" % fct.get_ok_flows())
+    print("    HTTP[OK] results:  %d\n" % fct.get_ok_rqsts())
 
     if in_args.delay > 0:
-        print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
+        print(
+            "*** Waiting for %d seconds before the delete cycle ***\n" % in_args.delay
+        )
         time.sleep(in_args.delay)
 
     # Run through <cycles>, where <threads> are started in each cycle and
     # <flows> previously added in an add cycle are deleted in each thread
     if in_args.delete:
         fct.delete_blaster()
-        print('\n*** Total flows deleted: %s' % fct.get_ok_flows())
-        print('    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts())
+        print("\n*** Total flows deleted: %s" % fct.get_ok_flows())
+        print("    HTTP[OK] results:    %d\n" % fct.get_ok_rqsts())
index f24d5ab06239df2ae0839208a49ca905e48609c0..56f0cdc891ebaccb87aed626f34b42d9b47677c2 100755 (executable)
@@ -17,24 +17,24 @@ class FlowConfigBulkBlaster(flow_config_blaster.FlowConfigBlaster):
 
     def __init__(self, *args, **kwargs):
         super(FlowConfigBulkBlaster, self).__init__(*args, **kwargs)
-        self.bulk_type = 'RPC'
+        self.bulk_type = "RPC"
 
     def update_post_url_template(self, action):
         """
         Update url templates (defined in parent class) in order to point to bulk API rpcs.
         :param action: user intention (currently only 'ADD' is supported)
         """
-        if self.bulk_type == 'RPC':
-            self.post_url_template = 'http://%s:' + self.port + '/'
-            if action == 'ADD':
+        if self.bulk_type == "RPC":
+            self.post_url_template = "http://%s:" + self.port + "/"
+            if action == "ADD":
                 self.post_url_template += self.FLW_ADD_RPC_URL
-            elif action == 'REMOVE':
+            elif action == "REMOVE":
                 self.post_url_template += self.FLW_REMOVE_RPC_URL
-        elif self.bulk_type == 'DS':
-            self.post_url_template = 'http://%s:' + self.port + '/'
-            if action == 'ADD':
+        elif self.bulk_type == "DS":
+            self.post_url_template = "http://%s:" + self.port + "/"
+            if action == "ADD":
                 self.post_url_template += self.FLW_ADD_DS_URL
-            elif action == 'REMOVE':
+            elif action == "REMOVE":
                 self.post_url_template += self.FLW_REMOVE_DS_URL
 
     def assemble_post_url(self, host, node):
@@ -55,13 +55,17 @@ class FlowConfigBulkBlaster(flow_config_blaster.FlowConfigBlaster):
         :return: flow structure ready to use
         """
         # python 2.7 specific syntax (super)
-        flow = super(FlowConfigBulkBlaster, self).create_flow_from_template(flow_id, ipaddr, node_id)
-        flow_id = flow['id']
-        del(flow['id'])
-        if self.bulk_type == 'DS':
-            flow['flow-id'] = flow_id
-        flow['node'] = '/opendaylight-inventory:nodes/opendaylight-inventory' \
-                       ':node[opendaylight-inventory:id="openflow:{}"]'.format(node_id)
+        flow = super(FlowConfigBulkBlaster, self).create_flow_from_template(
+            flow_id, ipaddr, node_id
+        )
+        flow_id = flow["id"]
+        del flow["id"]
+        if self.bulk_type == "DS":
+            flow["flow-id"] = flow_id
+        flow["node"] = (
+            "/opendaylight-inventory:nodes/opendaylight-inventory"
+            ':node[opendaylight-inventory:id="openflow:{}"]'.format(node_id)
+        )
         return flow
 
     def convert_to_json(self, flow_list, node_id=None):
@@ -72,10 +76,10 @@ class FlowConfigBulkBlaster(flow_config_blaster.FlowConfigBlaster):
         :return: json string
         """
         json_input = None
-        if self.bulk_type == 'RPC':
-            json_input = {'input': {'bulk-flow-item': flow_list}}
-        elif self.bulk_type == 'DS':
-            json_input = {'input': {'bulk-flow-ds-item': flow_list}}
+        if self.bulk_type == "RPC":
+            json_input = {"input": {"bulk-flow-item": flow_list}}
+        elif self.bulk_type == "DS":
+            json_input = {"input": {"bulk-flow-ds-item": flow_list}}
 
         flow_data = json.dumps(json_input)
         return flow_data
@@ -89,37 +93,51 @@ if __name__ == "__main__":
     # deleting flows from the controller's config data store
     ############################################################################
     parser = flow_config_blaster.create_arguments_parser()
-    parser.add_argument('--bulk-type', default='RPC', dest='bulk_type',
-                        choices=['RPC', 'DS'],
-                        help='Bulk type to use: RPC, DS (default is RPC)')
+    parser.add_argument(
+        "--bulk-type",
+        default="RPC",
+        dest="bulk_type",
+        choices=["RPC", "DS"],
+        help="Bulk type to use: RPC, DS (default is RPC)",
+    )
 
     in_args = parser.parse_args()
 
-    if in_args.file != '':
+    if in_args.file != "":
         flow_template = flow_config_blaster.get_json_from_file(in_args.file)
     else:
         flow_template = None
 
-    fcbb = FlowConfigBulkBlaster(in_args.host, in_args.port, in_args.cycles,
-                                 in_args.threads, in_args.fpr, in_args.nodes,
-                                 in_args.flows, in_args.startflow, in_args.auth)
+    fcbb = FlowConfigBulkBlaster(
+        in_args.host,
+        in_args.port,
+        in_args.cycles,
+        in_args.threads,
+        in_args.fpr,
+        in_args.nodes,
+        in_args.flows,
+        in_args.startflow,
+        in_args.auth,
+    )
     fcbb.bulk_type = in_args.bulk_type
-    fcbb.update_post_url_template('ADD')
+    fcbb.update_post_url_template("ADD")
 
     # Run through <cycles>, where <threads> are started in each cycle and
     # <flows> are added from each thread
     fcbb.add_blaster()
 
-    print('\n*** Total flows added: %s' % fcbb.get_ok_flows())
-    print('    HTTP[OK] results:  %d\n' % fcbb.get_ok_rqsts())
+    print("\n*** Total flows added: %s" % fcbb.get_ok_flows())
+    print("    HTTP[OK] results:  %d\n" % fcbb.get_ok_rqsts())
 
     if in_args.delay > 0:
-        print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
+        print(
+            "*** Waiting for %d seconds before the delete cycle ***\n" % in_args.delay
+        )
         time.sleep(in_args.delay)
 
     # Run through <cycles>, where <threads> are started in each cycle and
     # <flows> previously added in an add cycle are deleted in each thread
     if in_args.delete:
         fcbb.delete_blaster()
-        print('\n*** Total flows deleted: %s' % fcbb.get_ok_flows())
-        print('    HTTP[OK] results:    %d\n' % fcbb.get_ok_rqsts())
+        print("\n*** Total flows deleted: %s" % fcbb.get_ok_flows())
+        print("    HTTP[OK] results:    %d\n" % fcbb.get_ok_rqsts())
index cbed315ecae33a37879b1e22df49c593610f524f..f32348f8e174ac0cae0f4fc3aca6e6d86f6d61af 100755 (executable)
@@ -17,22 +17,25 @@ class FlowConfigBlasterFLE(FlowConfigBlaster):
     """
     FlowConfigBlaster, Floodlight Edition; Uses the Floodlight Static Flow Entry Pusher REST API to inject flows.
     """
+
     flow = {
-        'switch': "00:00:00:00:00:00:00:01",
+        "switch": "00:00:00:00:00:00:00:01",
         "name": "flow-mod",
         "cookie": "0",
         "priority": "32768",
         "eth_type": "2048",
         "ipv4_dst": "10.0.0.1/32",
         "active": "true",
-        "actions": "output=flood"
+        "actions": "output=flood",
     }
 
     def __init__(self, host, port, ncycles, nthreads, nnodes, nflows, startflow):
-        FlowConfigBlaster.__init__(self, host, port, ncycles, nthreads, 1, nnodes, nflows, startflow, False)
+        FlowConfigBlaster.__init__(
+            self, host, port, ncycles, nthreads, 1, nnodes, nflows, startflow, False
+        )
 
     def create_floodlight_url(self, host):
-        return 'http://' + host + ":" + self.port + '/wm/staticflowpusher/json'
+        return "http://" + host + ":" + self.port + "/wm/staticflowpusher/json"
 
     def get_num_nodes(self, session):
         """
@@ -40,7 +43,13 @@ class FlowConfigBlasterFLE(FlowConfigBlaster):
         :param session:
         :return:
         """
-        url = 'http://' + self.host + ":" + self.port + '/wm/core/controller/switches/json'
+        url = (
+            "http://"
+            + self.host
+            + ":"
+            + self.port
+            + "/wm/core/controller/switches/json"
+        )
         nodes = self.nnodes
 
         r = session.get(url, headers=self.getheaders, stream=False)
@@ -63,14 +72,14 @@ class FlowConfigBlasterFLE(FlowConfigBlaster):
         :return: status code from the POST operation
         """
         flow = copy.deepcopy(self.flow)
-        flow['switch'] = "00:00:00:00:00:00:00:%s" % '{0:02x}'.format(node)
-        flow['name'] = flow_list[0]['flow-name']
-        flow['table'] = flow_list[0]['table_id']
-        flow['cookie'] = flow_list[0]['cookie']
+        flow["switch"] = "00:00:00:00:00:00:00:%s" % "{0:02x}".format(node)
+        flow["name"] = flow_list[0]["flow-name"]
+        flow["table"] = flow_list[0]["table_id"]
+        flow["cookie"] = flow_list[0]["cookie"]
         # flow['cookie_mask'] = flow_list[0]['cookie_mask']
-        flow['idle_timeout'] = flow_list[0]['idle-timeout']
-        flow['hard_timeout'] = flow_list[0]['hard-timeout']
-        flow['ipv4_dst'] = flow_list[0]['match']['ipv4-destination']
+        flow["idle_timeout"] = flow_list[0]["idle-timeout"]
+        flow["hard_timeout"] = flow_list[0]["hard-timeout"]
+        flow["ipv4_dst"] = flow_list[0]["match"]["ipv4-destination"]
 
         flow_data = json.dumps(flow)
 
@@ -78,7 +87,9 @@ class FlowConfigBlasterFLE(FlowConfigBlaster):
         host = hosts[flow_count % len(hosts)]
         flow_url = self.create_floodlight_url(host)
 
-        r = session.post(flow_url, data=flow_data, headers=self.putheaders, stream=False)
+        r = session.post(
+            flow_url, data=flow_data, headers=self.putheaders, stream=False
+        )
         return r.status_code
 
     def delete_flow(self, session, node, flow_id, flow_count):
@@ -94,13 +105,19 @@ class FlowConfigBlasterFLE(FlowConfigBlaster):
         hosts = self.host.split(",")
         host = hosts[flow_count % len(hosts)]
         flow_url = self.create_floodlight_url(host)
-        flow_data = json.dumps({'name': self.create_flow_name(flow_id)})
+        flow_data = json.dumps({"name": self.create_flow_name(flow_id)})
 
         r = session.delete(flow_url, data=flow_data, headers=self.getheaders)
         return r.status_code
 
     def clear_all_flows(self):
-        clear_url = 'http://' + self.host + ":" + self.port + '/wm/staticflowpusher/clear/all/json'
+        clear_url = (
+            "http://"
+            + self.host
+            + ":"
+            + self.port
+            + "/wm/staticflowpusher/clear/all/json"
+        )
         r = requests.get(clear_url)
         if r.status_code == 200:
             print("All flows cleared before the test")
@@ -110,53 +127,95 @@ class FlowConfigBlasterFLE(FlowConfigBlaster):
 
 if __name__ == "__main__":
 
-    parser = argparse.ArgumentParser(description='Flow programming performance test for Floodlight: First adds and '
-                                                 'then deletes flows using the Static Flow Entry Pusher REST API.')
-
-    parser.add_argument('--host', default='127.0.0.1',
-                        help='Host where the controller is running (default is 127.0.0.1)')
-    parser.add_argument('--port', default='8080',
-                        help='Port on which the controller\'s RESTCONF is listening (default is 8080)')
-    parser.add_argument('--cycles', type=int, default=1,
-                        help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are '
-                             'performed in cycles. <THREADS> worker threads are started in each cycle and the cycle '
-                             'ends when all threads finish. Another cycle is started when the previous cycle finished.')
-    parser.add_argument('--threads', type=int, default=1,
-                        help='Number of request worker threads to start in each cycle; default=1. '
-                             'Each thread will add/delete <FLOWS> flows.')
-    parser.add_argument('--flows', type=int, default=10,
-                        help='Number of flows that will be added/deleted by each worker thread in each cycle; '
-                             'default 10')
-    parser.add_argument('--nodes', type=int, default=16,
-                        help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
-                             'flows will be evenly distributed (programmed) into connected nodes.')
-    parser.add_argument('--delay', type=int, default=0,
-                        help='Time (in seconds) to wait between the add and delete cycles; default=0')
-    parser.add_argument('--no-delete', dest='delete', action='store_false',
-                        help='Do not perform the delete cycle.')
-    parser.add_argument('--startflow', type=int, default=0,
-                        help='The starting Flow ID; default=0')
+    parser = argparse.ArgumentParser(
+        description="Flow programming performance test for Floodlight: First adds and "
+        "then deletes flows using the Static Flow Entry Pusher REST API."
+    )
+
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where the controller is running (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8080",
+        help="Port on which the controller's RESTCONF is listening (default is 8080)",
+    )
+    parser.add_argument(
+        "--cycles",
+        type=int,
+        default=1,
+        help="Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are "
+        "performed in cycles. <THREADS> worker threads are started in each cycle and the cycle "
+        "ends when all threads finish. Another cycle is started when the previous cycle finished.",
+    )
+    parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in each cycle; default=1. "
+        "Each thread will add/delete <FLOWS> flows.",
+    )
+    parser.add_argument(
+        "--flows",
+        type=int,
+        default=10,
+        help="Number of flows that will be added/deleted by each worker thread in each cycle; "
+        "default 10",
+    )
+    parser.add_argument(
+        "--nodes",
+        type=int,
+        default=16,
+        help="Number of nodes if mininet is not connected; default=16. If mininet is connected, "
+        "flows will be evenly distributed (programmed) into connected nodes.",
+    )
+    parser.add_argument(
+        "--delay",
+        type=int,
+        default=0,
+        help="Time (in seconds) to wait between the add and delete cycles; default=0",
+    )
+    parser.add_argument(
+        "--no-delete",
+        dest="delete",
+        action="store_false",
+        help="Do not perform the delete cycle.",
+    )
+    parser.add_argument(
+        "--startflow", type=int, default=0, help="The starting Flow ID; default=0"
+    )
 
     in_args = parser.parse_args()
 
-    fct = FlowConfigBlasterFLE(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
-                               in_args.flows, in_args.startflow)
+    fct = FlowConfigBlasterFLE(
+        in_args.host,
+        in_args.port,
+        in_args.cycles,
+        in_args.threads,
+        in_args.nodes,
+        in_args.flows,
+        in_args.startflow,
+    )
 
     fct.clear_all_flows()
 
     # Run through <cycles>, where <threads> are started in each cycle and <flows> are added from each thread
     fct.add_blaster()
 
-    print('\n*** Total flows added: %s' % fct.get_ok_flows())
-    print('    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts())
+    print("\n*** Total flows added: %s" % fct.get_ok_flows())
+    print("    HTTP[OK] results:  %d\n" % fct.get_ok_rqsts())
 
     if in_args.delay > 0:
-        print('*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay)
+        print(
+            "*** Waiting for %d seconds before the delete cycle ***\n" % in_args.delay
+        )
         time.sleep(in_args.delay)
 
     # Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
     # deleted in each thread
     if in_args.delete:
         fct.delete_blaster()
-        print('\n*** Total flows deleted: %s' % fct.get_ok_flows())
-        print('    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts())
+        print("\n*** Total flows deleted: %s" % fct.get_ok_flows())
+        print("    HTTP[OK] results:    %d\n" % fct.get_ok_rqsts())
index 9dacaec456aca698fe1f516717bfd393ca8a6159..a43cff48a118af760c930ba1c45b6d187b4fc700 100755 (executable)
@@ -51,71 +51,145 @@ if __name__ == "__main__":
     #    for a specified period of time.
     ############################################################################
 
-    parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
-                                                 'into the config tree, as specified by optional parameters.')
-
-    parser.add_argument('--host', default='127.0.0.1',
-                        help='Host where odl controller is running (default is 127.0.0.1)')
-    parser.add_argument('--port', default='8181',
-                        help='Port on which odl\'s RESTCONF is listening (default is 8181)')
-    parser.add_argument('--cycles', type=int, default=1,
-                        help='Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are '
-                             'performed in cycles. <THREADS> worker threads are started in each cycle and the cycle '
-                             'ends when all threads finish. Another cycle is started when the previous cycle finished.')
-    parser.add_argument('--threads', type=int, default=1,
-                        help='Number of request worker threads to start in each cycle; default=1. '
-                             'Each thread will add/delete <FLOWS> flows.')
-    parser.add_argument('--flows', type=int, default=10,
-                        help='Number of flows that will be added/deleted by each worker thread in each cycle; '
-                             'default 10')
-    parser.add_argument('--fpr', type=int, default=1,
-                        help='Flows-per-Request - number of flows (batch size) sent in each HTTP request; '
-                             'default 1')
-    parser.add_argument('--delay', type=int, default=2,
-                        help='Time (seconds) to between inventory polls when waiting for stats to catch up; default=1')
-    parser.add_argument('--timeout', type=int, default=100,
-                        help='The maximum time (seconds) to wait between the add and delete cycles; default=100')
-    parser.add_argument('--delete', dest='delete', action='store_true', default=True,
-                        help='Delete all added flows one by one, benchmark delete '
-                             'performance.')
-    parser.add_argument('--bulk-delete', dest='bulk_delete', action='store_true', default=False,
-                        help='Delete all flows in bulk; default=False')
-    parser.add_argument('--auth', dest='auth', action='store_true',
-                        help="Use authenticated access to REST (username: 'admin', password: 'admin'); default=False")
-    parser.add_argument('--startflow', type=int, default=0,
-                        help='The starting Flow ID; default=0')
-    parser.add_argument('--file', default='',
-                        help='File from which to read the JSON flow template; default: no file, use a built in '
-                             'template.')
-    parser.add_argument('--config_monitor', type=int, default=60,
-                        help='Time to monotir inventory after flows are configured in seconds; default=60')
-    parser.add_argument('--deconfig_monitor', type=int, default=60,
-                        help='Time to monitor inventory after flows are de configured in seconds; default=60')
-    parser.add_argument('--monitor_period', type=int, default=10,
-                        help='Monitor period of triggering inventory crawler in seconds; default=10')
-    parser.add_argument('--monitor_outfile', default=None, help='Output file(if specified)')
+    parser = argparse.ArgumentParser(
+        description="Flow programming performance test: First adds and then deletes flows "
+        "into the config tree, as specified by optional parameters."
+    )
+
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where odl controller is running (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which odl's RESTCONF is listening (default is 8181)",
+    )
+    parser.add_argument(
+        "--cycles",
+        type=int,
+        default=1,
+        help="Number of flow add/delete cycles; default 1. Both Flow Adds and Flow Deletes are "
+        "performed in cycles. <THREADS> worker threads are started in each cycle and the cycle "
+        "ends when all threads finish. Another cycle is started when the previous cycle finished.",
+    )
+    parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in each cycle; default=1. "
+        "Each thread will add/delete <FLOWS> flows.",
+    )
+    parser.add_argument(
+        "--flows",
+        type=int,
+        default=10,
+        help="Number of flows that will be added/deleted by each worker thread in each cycle; "
+        "default 10",
+    )
+    parser.add_argument(
+        "--fpr",
+        type=int,
+        default=1,
+        help="Flows-per-Request - number of flows (batch size) sent in each HTTP request; "
+        "default 1",
+    )
+    parser.add_argument(
+        "--delay",
+        type=int,
+        default=2,
+        help="Time (seconds) to between inventory polls when waiting for stats to catch up; default=1",
+    )
+    parser.add_argument(
+        "--timeout",
+        type=int,
+        default=100,
+        help="The maximum time (seconds) to wait between the add and delete cycles; default=100",
+    )
+    parser.add_argument(
+        "--delete",
+        dest="delete",
+        action="store_true",
+        default=True,
+        help="Delete all added flows one by one, benchmark delete " "performance.",
+    )
+    parser.add_argument(
+        "--bulk-delete",
+        dest="bulk_delete",
+        action="store_true",
+        default=False,
+        help="Delete all flows in bulk; default=False",
+    )
+    parser.add_argument(
+        "--auth",
+        dest="auth",
+        action="store_true",
+        help="Use authenticated access to REST (username: 'admin', password: 'admin'); default=False",
+    )
+    parser.add_argument(
+        "--startflow", type=int, default=0, help="The starting Flow ID; default=0"
+    )
+    parser.add_argument(
+        "--file",
+        default="",
+        help="File from which to read the JSON flow template; default: no file, use a built in "
+        "template.",
+    )
+    parser.add_argument(
+        "--config_monitor",
+        type=int,
+        default=60,
+        help="Time to monotir inventory after flows are configured in seconds; default=60",
+    )
+    parser.add_argument(
+        "--deconfig_monitor",
+        type=int,
+        default=60,
+        help="Time to monitor inventory after flows are de configured in seconds; default=60",
+    )
+    parser.add_argument(
+        "--monitor_period",
+        type=int,
+        default=10,
+        help="Monitor period of triggering inventory crawler in seconds; default=10",
+    )
+    parser.add_argument(
+        "--monitor_outfile", default=None, help="Output file(if specified)"
+    )
 
     in_args = parser.parse_args()
 
     # Initialize
-    if in_args.file != '':
+    if in_args.file != "":
         flow_template = get_json_from_file(in_args.file)
     else:
         flow_template = None
 
-    ic = InventoryCrawler(in_args.host, in_args.port, 0, 'operational', in_args.auth, False)
-
-    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.fpr,
-                            16, in_args.flows, in_args.startflow, in_args.auth)
+    ic = InventoryCrawler(
+        in_args.host, in_args.port, 0, "operational", in_args.auth, False
+    )
+
+    fct = FlowConfigBlaster(
+        in_args.host,
+        in_args.port,
+        in_args.cycles,
+        in_args.threads,
+        in_args.fpr,
+        16,
+        in_args.flows,
+        in_args.startflow,
+        in_args.auth,
+    )
     # Get the baseline stats. Required in Step 3 to validate if the delete
     # function gets the controller back to the baseline
     ic.crawl_inventory()
     reported = ic.reported_flows
     found = ic.found_flows
 
-    print('Baseline:')
-    print('   Reported nodes: %d' % reported)
-    print('   Found nodes:    %d' % found)
+    print("Baseline:")
+    print("   Reported nodes: %d" % reported)
+    print("   Found nodes:    %d" % found)
 
     stats = []
     stats.append((time.time(), ic.nodes, ic.reported_flows, ic.found_flows))
@@ -123,8 +197,8 @@ if __name__ == "__main__":
     # each cycle and <FLOWS> flows are added from each thread
     fct.add_blaster()
 
-    print('\n*** Total flows added: %d' % fct.get_ok_flows())
-    print('    HTTP[OK] results:  %d\n' % fct.get_ok_rqsts())
+    print("\n*** Total flows added: %d" % fct.get_ok_flows())
+    print("    HTTP[OK] results:  %d\n" % fct.get_ok_rqsts())
 
     # monitor stats and save results in the list
     for stat_item in monitor_stats(ic, in_args.config_monitor, in_args.monitor_period):
@@ -135,25 +209,27 @@ if __name__ == "__main__":
     # in each cycle and <FLOWS> flows previously added in an add cycle are
     # deleted in each thread
     if in_args.bulk_delete:
-        print('\nDeleting all flows in bulk:')
+        print("\nDeleting all flows in bulk:")
         sts = cleanup_config_odl(in_args.host, in_args.port, in_args.auth)
         if sts != 200:
-            print('   Failed to delete flows, code %d' % sts)
+            print("   Failed to delete flows, code %d" % sts)
         else:
-            print('   All flows deleted.')
+            print("   All flows deleted.")
     else:
-        print('\nDeleting flows one by one\n   ',)
+        print("\nDeleting flows one by one\n   ")
         fct.delete_blaster()
-        print('\n*** Total flows deleted: %d' % fct.get_ok_flows())
-        print('    HTTP[OK] results:    %d\n' % fct.get_ok_rqsts())
+        print("\n*** Total flows deleted: %d" % fct.get_ok_flows())
+        print("    HTTP[OK] results:    %d\n" % fct.get_ok_rqsts())
 
     # monitor stats and append to the list
-    for stat_item in monitor_stats(ic, in_args.deconfig_monitor, in_args.monitor_period):
+    for stat_item in monitor_stats(
+        ic, in_args.deconfig_monitor, in_args.monitor_period
+    ):
         print(stat_item)
         stats.append(stat_item)
 
     # if requested, write collected data into the file
     if in_args.monitor_outfile is not None:
-        with open(in_args.monitor_outfile, 'wt') as fd:
+        with open(in_args.monitor_outfile, "wt") as fd:
             for e in stats:
-                fd.write('{0} {1} {2} {3}\n'.format(e[0], e[1], e[2], e[3]))
+                fd.write("{0} {1} {2} {3}\n".format(e[0], e[1], e[2], e[3]))
index d4e71ada2a6a32e9073999c31d46226f7ab51305..596125e3f611b4d5c4b39eea25a546ae98c89c3c 100755 (executable)
@@ -16,14 +16,14 @@ class InventoryCrawler(object):
     found_flows = 0
     nodes = 0
 
-    INVENTORY_URL = 'restconf/%s/opendaylight-inventory:nodes'
-    hdr = {'Accept': 'application/json'}
+    INVENTORY_URL = "restconf/%s/opendaylight-inventory:nodes"
+    hdr = {"Accept": "application/json"}
     OK, ERROR = range(2)
     table_stats_unavailable = 0
     table_stats_fails = []
 
     def __init__(self, host, port, plevel, datastore, auth, debug):
-        self.url = 'http://' + host + ":" + port + '/' + self.INVENTORY_URL % datastore
+        self.url = "http://" + host + ":" + port + "/" + self.INVENTORY_URL % datastore
         self.plevel = plevel
         self.auth = auth
         self.debug = debug
@@ -34,18 +34,18 @@ class InventoryCrawler(object):
         """
         self.found_flows += len(flows)
         if self.plevel > 1:
-            print('             Flows found: %d\n' % len(flows))
+            print("             Flows found: %d\n" % len(flows))
             if self.plevel > 2:
                 for f in flows:
-                    s = json.dumps(f, sort_keys=True, indent=4, separators=(',', ': '))
+                    s = json.dumps(f, sort_keys=True, indent=4, separators=(",", ": "))
                     # s = s.replace('{\n', '')
                     # s = s.replace('}', '')
                     s = s.strip()
-                    s = s.lstrip('{')
-                    s = s.rstrip('}')
-                    s = s.replace('\n', '\n            ')
-                    s = s.lstrip('\n')
-                    print("             Flow %s:" % (f['id']))
+                    s = s.lstrip("{")
+                    s = s.rstrip("}")
+                    s = s.replace("\n", "\n            ")
+                    s = s.lstrip("\n")
+                    print("             Flow %s:" % (f["id"]))
                     print(s)
 
     def crawl_table(self, table):
@@ -54,25 +54,27 @@ class InventoryCrawler(object):
         (plevel), it also invokes the crawl_flows
         """
         try:
-            stats = table['opendaylight-flow-table-statistics:flow-table-statistics']
-            active_flows = int(stats['active-flows'])
+            stats = table["opendaylight-flow-table-statistics:flow-table-statistics"]
+            active_flows = int(stats["active-flows"])
 
             if active_flows > 0:
                 self.reported_flows += active_flows
                 if self.plevel > 1:
-                    print('        Table %s:' % table['id'])
-                    s = json.dumps(stats, sort_keys=True, indent=12, separators=(',', ': '))
-                    s = s.replace('{\n', '')
-                    s = s.replace('}', '')
+                    print("        Table %s:" % table["id"])
+                    s = json.dumps(
+                        stats, sort_keys=True, indent=12, separators=(",", ": ")
+                    )
+                    s = s.replace("{\n", "")
+                    s = s.replace("}", "")
                     print(s)
         except KeyError:
             if self.plevel > 1:
-                print("        Stats for Table '%s' not available." % (table['id']))
+                print("        Stats for Table '%s' not available." % (table["id"]))
             self.table_stats_unavailable += 1
             pass
 
         try:
-            flows_in_table = table['flow']
+            flows_in_table = table["flow"]
             self.crawl_flows(flows_in_table)
         except KeyError:
             pass
@@ -85,24 +87,24 @@ class InventoryCrawler(object):
         self.nodes += 1
 
         if self.plevel > 1:
-            print("\nNode '%s':" % ((node['id'])))
+            print("\nNode '%s':" % ((node["id"])))
         elif self.plevel > 0:
-            print("%s" % ((node['id'])))
+            print("%s" % ((node["id"])))
 
         try:
-            tables = node['flow-node-inventory:table']
+            tables = node["flow-node-inventory:table"]
             if self.plevel > 1:
-                print('    Tables: %d' % len(tables))
+                print("    Tables: %d" % len(tables))
 
             for t in tables:
                 self.crawl_table(t)
 
             if self.table_stats_unavailable > 0:
-                self.table_stats_fails.append(node['id'])
+                self.table_stats_fails.append(node["id"])
 
         except KeyError:
             if self.plevel > 1:
-                print('    Data for tables not available.')
+                print("    Data for tables not available.")
 
     def crawl_inventory(self):
         """
@@ -118,28 +120,28 @@ class InventoryCrawler(object):
         if not self.auth:
             r = s.get(self.url, headers=self.hdr, stream=False)
         else:
-            r = s.get(self.url, headers=self.hdr, stream=False, auth=('admin', 'admin'))
+            r = s.get(self.url, headers=self.hdr, stream=False, auth=("admin", "admin"))
 
         if r.status_code == 200:
             try:
-                inv = json.loads(r.content)['nodes']['node']
+                inv = json.loads(r.content)["nodes"]["node"]
                 sinv = []
                 for n in range(len(inv)):
-                    if re.search('openflow', inv[n]['id']) is not None:
+                    if re.search("openflow", inv[n]["id"]) is not None:
                         sinv.append(inv[n])
 
-                sinv = sorted(sinv, key=lambda k: int(re.findall('\d+', k['id'])[0]))
+                sinv = sorted(sinv, key=lambda k: int(re.findall("\d+", k["id"])[0]))
 
                 for n in range(len(sinv)):
                     try:
                         self.crawl_node(sinv[n])
                     except Exception:
-                        print('Can not crawl %s' % sinv[n]['id'])
+                        print("Can not crawl %s" % sinv[n]["id"])
 
             except KeyError:
-                print('Could not retrieve inventory, response not in JSON format')
+                print("Could not retrieve inventory, response not in JSON format")
         else:
-            print('Could not retrieve inventory, HTTP error %d' % r.status_code)
+            print("Could not retrieve inventory, HTTP error %d" % r.status_code)
 
         s.close()
 
@@ -148,38 +150,72 @@ class InventoryCrawler(object):
 
 
 if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description='Restconf test program')
-    parser.add_argument('--host', default='127.0.0.1', help='host where '
-                        'the controller is running; default 127.0.0.1')
-    parser.add_argument('--port', default='8181', help='port on '
-                        'which odl\'s RESTCONF is listening; default 8181')
-    parser.add_argument('--plevel', type=int, default=0,
-                        help='Print Level: 0 - Summary (stats only); 1 - Node names; 2 - Node details;'
-                             '3 - Flow details')
-    parser.add_argument('--datastore', choices=['operational', 'config'],
-                        default='operational', help='Which data store to crawl; default operational')
-    parser.add_argument('--no-auth', dest='auth', action='store_false', default=False,
-                        help="Do not use authenticated access to REST (default)")
-    parser.add_argument('--auth', dest='auth', action='store_true',
-                        help="Use authenticated access to REST (username: 'admin', password: 'admin').")
-    parser.add_argument('--debug', dest='debug', action='store_true', default=False,
-                        help="List nodes that have not provided proper statistics data")
+    parser = argparse.ArgumentParser(description="Restconf test program")
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="host where " "the controller is running; default 127.0.0.1",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="port on " "which odl's RESTCONF is listening; default 8181",
+    )
+    parser.add_argument(
+        "--plevel",
+        type=int,
+        default=0,
+        help="Print Level: 0 - Summary (stats only); 1 - Node names; 2 - Node details;"
+        "3 - Flow details",
+    )
+    parser.add_argument(
+        "--datastore",
+        choices=["operational", "config"],
+        default="operational",
+        help="Which data store to crawl; default operational",
+    )
+    parser.add_argument(
+        "--no-auth",
+        dest="auth",
+        action="store_false",
+        default=False,
+        help="Do not use authenticated access to REST (default)",
+    )
+    parser.add_argument(
+        "--auth",
+        dest="auth",
+        action="store_true",
+        help="Use authenticated access to REST (username: 'admin', password: 'admin').",
+    )
+    parser.add_argument(
+        "--debug",
+        dest="debug",
+        action="store_true",
+        default=False,
+        help="List nodes that have not provided proper statistics data",
+    )
 
     in_args = parser.parse_args()
 
-    ic = InventoryCrawler(in_args.host, in_args.port, in_args.plevel, in_args.datastore, in_args.auth,
-                          in_args.debug)
+    ic = InventoryCrawler(
+        in_args.host,
+        in_args.port,
+        in_args.plevel,
+        in_args.datastore,
+        in_args.auth,
+        in_args.debug,
+    )
 
     print("Crawling '%s'" % (ic.url))
     ic.crawl_inventory()
 
-    print('\nTotals:')
-    print('    Nodes:          %d' % ic.nodes)
-    print('    Reported flows: %d' % ic.reported_flows)
-    print('    Found flows:    %d' % ic.found_flows)
+    print("\nTotals:")
+    print("    Nodes:          %d" % ic.nodes)
+    print("    Reported flows: %d" % ic.reported_flows)
+    print("    Found flows:    %d" % ic.found_flows)
 
     if in_args.debug:
         n_missing = len(ic.table_stats_fails)
         if n_missing > 0:
-            print('\nMissing table stats (%d nodes):' % n_missing)
+            print("\nMissing table stats (%d nodes):" % n_missing)
             print("%s\n" % (", ".join([x for x in ic.table_stats_fails])))
index 716c644c6ac35263012dd65836b53aab92cf2798..200b0587c2fdfde3318e3ef5ff21bd06b1e561b6 100644 (file)
@@ -46,11 +46,13 @@ total_req_rate = Counter(0.0)
 total_mbytes = Counter(0.0)
 total_mb_rate = Counter(0.0)
 
-putheaders = {'content-type': 'application/json'}
-getheaders = {'Accept': 'application/json'}
+putheaders = {"content-type": "application/json"}
+getheaders = {"Accept": "application/json"}
 
-INVENTORY_URL = 'http://localhost:8080/restconf/operational/opendaylight-inventory:nodes'
-N1T0_URL = 'http://localhost:8080/restconf/operational/opendaylight-inventory:nodes/node/openflow:1/table/0'
+INVENTORY_URL = (
+    "http://localhost:8080/restconf/operational/opendaylight-inventory:nodes"
+)
+N1T0_URL = "http://localhost:8080/restconf/operational/opendaylight-inventory:nodes/node/openflow:1/table/0"
 
 num_threads = 1
 
@@ -71,7 +73,7 @@ def get_inventory(tnum, url, hdrs, rnum, cond):
     results = {}
 
     with print_lock:
-        print('Thread %d: Getting %s' % (tnum, url))
+        print("Thread %d: Getting %s" % (tnum, url))
 
     s = requests.Session()
     with Timer() as t:
@@ -81,7 +83,7 @@ def get_inventory(tnum, url, hdrs, rnum, cond):
 
             try:
                 results[r.status_code] += 1
-            except(KeyError):
+            except (KeyError):
                 results[r.status_code] = 1
 
     total = sum(results.values())
@@ -95,11 +97,11 @@ def get_inventory(tnum, url, hdrs, rnum, cond):
     total_mb_rate.increment(mrate)
 
     with print_lock:
-        print('\nThread %d: ' % tnum)
-        print('    Elapsed time: %.2f,' % t.secs)
-        print('    Requests: %d, Requests/sec: %.2f' % (total, rate))
-        print('    Volume: %.2f MB, Rate: %.2f MByte/s' % (mbytes, mrate))
-        print('    Results: ')
+        print("\nThread %d: " % tnum)
+        print("    Elapsed time: %.2f," % t.secs)
+        print("    Requests: %d, Requests/sec: %.2f" % (total, rate))
+        print("    Volume: %.2f MB, Rate: %.2f MByte/s" % (mbytes, mrate))
+        print("    Results: ")
         print(results)
 
     with cond:
@@ -108,28 +110,43 @@ def get_inventory(tnum, url, hdrs, rnum, cond):
 
 if __name__ == "__main__":
 
-    parser = argparse.ArgumentParser(description='Restconf test program')
-    parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
-                        'odl controller is running (default is 127.0.0.1)')
-    parser.add_argument('--odlport', default='8080', help='port on '
-                        'which odl\'s RESTCONF is listening (default is 8080)')
-    parser.add_argument('--requests', type=int, default=10, help='number of '
-                        'requests to send')
-    parser.add_argument('--url', default='restconf/operational/opendaylight-inventory:nodes',
-                        help='Url to send.')
-    parser.add_argument('--nthreads', type=int, default=1,
-                        help='Number of request worker threads, default=1')
+    parser = argparse.ArgumentParser(description="Restconf test program")
+    parser.add_argument(
+        "--odlhost",
+        default="127.0.0.1",
+        help="host where " "odl controller is running (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--odlport",
+        default="8080",
+        help="port on " "which odl's RESTCONF is listening (default is 8080)",
+    )
+    parser.add_argument(
+        "--requests", type=int, default=10, help="number of " "requests to send"
+    )
+    parser.add_argument(
+        "--url",
+        default="restconf/operational/opendaylight-inventory:nodes",
+        help="Url to send.",
+    )
+    parser.add_argument(
+        "--nthreads",
+        type=int,
+        default=1,
+        help="Number of request worker threads, default=1",
+    )
     in_args = parser.parse_args()
 
-    url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + in_args.url
+    url = "http://" + in_args.odlhost + ":" + in_args.odlport + "/" + in_args.url
 
     threads = []
     nthreads = int(in_args.nthreads)
     cond = threading.Condition()
 
     for i in range(nthreads):
-        t = threading.Thread(target=get_inventory,
-                             args=(i, url, getheaders, int(in_args.requests), cond))
+        t = threading.Thread(
+            target=get_inventory, args=(i, url, getheaders, int(in_args.requests), cond)
+        )
         threads.append(t)
         t.start()
 
@@ -139,10 +156,14 @@ if __name__ == "__main__":
             cond.wait()
             finished = finished + 1
 
-    print('\nAggregate requests: %d, Aggregate requests/sec: %.2f' % (total_requests.value,
-                                                                      total_req_rate.value))
-    print('Aggregate Volume: %.2f MB, Aggregate Rate: %.2f MByte/s' % (total_mbytes.value,
-                                                                       total_mb_rate.value))
+    print(
+        "\nAggregate requests: %d, Aggregate requests/sec: %.2f"
+        % (total_requests.value, total_req_rate.value)
+    )
+    print(
+        "Aggregate Volume: %.2f MB, Aggregate Rate: %.2f MByte/s"
+        % (total_mbytes.value, total_mb_rate.value)
+    )
 
 #    get_inventory(url, getheaders, int(in_args.requests))
 
index c1d48d2476985664eff0a78295456422a7e735f5..7fe73280d20cae1333415284e43ffe34fc9e7f80 100755 (executable)
@@ -15,9 +15,9 @@ __author__ = "Gary Wu"
 __email__ = "gary.wu1@huawei.com"
 
 
-GET_HEADERS = {'Accept': 'application/json'}
+GET_HEADERS = {"Accept": "application/json"}
 
-INVENTORY_URL = 'http://%s:%d/restconf/%s/opendaylight-inventory:nodes'
+INVENTORY_URL = "http://%s:%d/restconf/%s/opendaylight-inventory:nodes"
 
 
 class Timer(object):
@@ -67,31 +67,57 @@ def read(hosts, port, auth, datastore, print_lock, cycles, results_queue):
         stats[r.status_code] = stats.get(r.status_code, 0) + 1
 
     with print_lock:
-        print('   %s results: %s' % (threading.current_thread().name, stats))
+        print("   %s results: %s" % (threading.current_thread().name, stats))
 
     results_queue.put(stats)
 
 
 if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description='Inventory read performance test: Repeatedly read openflow node data '
-                                     'from config datastore.  Note that the data needs to be created in the datastore '
-                                     'first using flow_config_blaster.py --no-delete.')
-
-    parser.add_argument('--host', default='127.0.0.1',
-                        help='Host where odl controller is running (default is 127.0.0.1).  '
-                             'Specify a comma-separated list of hosts to perform round-robin load-balancing.')
-    parser.add_argument('--port', default='8181', type=int,
-                        help='Port on which odl\'s RESTCONF is listening (default is 8181)')
-    parser.add_argument('--datastore', choices=['operational', 'config'],
-                        default='operational', help='Which data store to crawl; default operational')
-    parser.add_argument('--cycles', type=int, default=100,
-                        help='Number of repeated reads; default 100. ')
-    parser.add_argument('--threads', type=int, default=1,
-                        help='Number of request worker threads to start in each cycle; default=1. '
-                             'Each thread will add/delete <FLOWS> flows.')
-    parser.add_argument('--auth', dest='auth', action='store_true', default=False,
-                        help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
-                             'default: no authentication')
+    parser = argparse.ArgumentParser(
+        description="Inventory read performance test: Repeatedly read openflow node data "
+        "from config datastore.  Note that the data needs to be created in the datastore "
+        "first using flow_config_blaster.py --no-delete."
+    )
+
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where odl controller is running (default is 127.0.0.1).  "
+        "Specify a comma-separated list of hosts to perform round-robin load-balancing.",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        type=int,
+        help="Port on which odl's RESTCONF is listening (default is 8181)",
+    )
+    parser.add_argument(
+        "--datastore",
+        choices=["operational", "config"],
+        default="operational",
+        help="Which data store to crawl; default operational",
+    )
+    parser.add_argument(
+        "--cycles",
+        type=int,
+        default=100,
+        help="Number of repeated reads; default 100. ",
+    )
+    parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in each cycle; default=1. "
+        "Each thread will add/delete <FLOWS> flows.",
+    )
+    parser.add_argument(
+        "--auth",
+        dest="auth",
+        action="store_true",
+        default=False,
+        help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
+        "default: no authentication",
+    )
 
     args = parser.parse_args()
 
@@ -106,8 +132,18 @@ if __name__ == "__main__":
     with Timer() as t:
         threads = []
         for i in range(args.threads):
-            thread = threading.Thread(target=read, args=(hosts, port, auth, args.datastore, print_lock, args.cycles,
-                                                         results))
+            thread = threading.Thread(
+                target=read,
+                args=(
+                    hosts,
+                    port,
+                    auth,
+                    args.datastore,
+                    print_lock,
+                    args.cycles,
+                    results,
+                ),
+            )
             threads.append(thread)
             thread.start()
 
@@ -118,6 +154,6 @@ if __name__ == "__main__":
     # Aggregate the results
     stats = functools.reduce(operator.add, map(collections.Counter, results.queue))
 
-    print('\n*** Test summary:')
-    print('    Elapsed time:    %.2fs' % t.secs)
-    print('    HTTP[OK] results:  %d\n' % stats[200])
+    print("\n*** Test summary:")
+    print("    Elapsed time:    %.2fs" % t.secs)
+    print("    HTTP[OK] results:  %d\n" % stats[200])
index 0298c0a841cfcc2f978d7ee132e43638d7cca541..320cc0b1578a13e113d36e8d676b3bb454937d98 100644 (file)
@@ -13,17 +13,13 @@ import time
 flow_template = {
     "id": "2",
     "match": {
-        "ethernet-match": {
-            "ethernet-type": {
-                "type": 2048
-            }
-        },
-        "ipv4-destination": "10.0.20.0/24"
+        "ethernet-match": {"ethernet-type": {"type": 2048}},
+        "ipv4-destination": "10.0.20.0/24",
     },
     "priority": 2,
-    "table_id": 0
+    "table_id": 0,
 }
-odl_node_url = '/restconf/config/opendaylight-inventory:nodes/node/'
+odl_node_url = "/restconf/config/opendaylight-inventory:nodes/node/"
 
 
 class Timer(object):
@@ -73,16 +69,21 @@ def _prepare_post(cntl, method, flows, template=None):
         :returns req: http request object
     """
     flow_list = []
-    for dev_id, ip in (flows):
+    for dev_id, ip in flows:
         flow = copy.deepcopy(template)
         flow["id"] = ip
-        flow["match"]["ipv4-destination"] = '%s/32' % str(netaddr.IPAddress(ip))
+        flow["match"]["ipv4-destination"] = "%s/32" % str(netaddr.IPAddress(ip))
         flow_list.append(flow)
     body = {"flow": flow_list}
-    url = 'http://' + cntl + ':8181' + odl_node_url + dev_id + '/table/0'
+    url = "http://" + cntl + ":8181" + odl_node_url + dev_id + "/table/0"
     req_data = json.dumps(body)
-    req = requests.Request(method, url, headers={'Content-Type': 'application/json'},
-                           data=req_data, auth=('admin', 'admin'))
+    req = requests.Request(
+        method,
+        url,
+        headers={"Content-Type": "application/json"},
+        data=req_data,
+        auth=("admin", "admin"),
+    )
     return req
 
 
@@ -102,14 +103,36 @@ def _prepare_delete(cntl, method, flows, template=None):
         :returns req: http request object
     """
     dev_id, flow_id = flows[0]
-    url = 'http://' + cntl + ':8181' + odl_node_url + dev_id + '/table/0/flow/' + str(flow_id)
-    req = requests.Request(method, url, headers={'Content-Type': 'application/json'},
-                           data=None, auth=('admin', 'admin'))
+    url = (
+        "http://"
+        + cntl
+        + ":8181"
+        + odl_node_url
+        + dev_id
+        + "/table/0/flow/"
+        + str(flow_id)
+    )
+    req = requests.Request(
+        method,
+        url,
+        headers={"Content-Type": "application/json"},
+        data=None,
+        auth=("admin", "admin"),
+    )
     return req
 
 
-def _wt_request_sender(thread_id, preparefnc, inqueue=None, exitevent=None, controllers=[], restport='',
-                       template=None, outqueue=None, method=None):
+def _wt_request_sender(
+    thread_id,
+    preparefnc,
+    inqueue=None,
+    exitevent=None,
+    controllers=[],
+    restport="",
+    template=None,
+    outqueue=None,
+    method=None,
+):
     """The funcion sends http requests.
 
     Runs in the working thread. It reads out flow details from the queue and sends apropriate http requests
@@ -165,34 +188,43 @@ def _wt_request_sender(thread_id, preparefnc, inqueue=None, exitevent=None, cont
     outqueue.put(res)
 
 
-def get_device_ids(controller='127.0.0.1', port=8181):
+def get_device_ids(controller="127.0.0.1", port=8181):
     """Returns a list of switch ids"""
     ids = []
-    rsp = requests.get(url='http://{0}:{1}/restconf/operational/opendaylight-inventory:nodes'
-                       .format(controller, port), auth=('admin', 'admin'))
+    rsp = requests.get(
+        url="http://{0}:{1}/restconf/operational/opendaylight-inventory:nodes".format(
+            controller, port
+        ),
+        auth=("admin", "admin"),
+    )
     if rsp.status_code != 200:
         return []
     try:
-        devices = json.loads(rsp.content)['nodes']['node']
-        ids = [d['id'] for d in devices]
+        devices = json.loads(rsp.content)["nodes"]["node"]
+        ids = [d["id"] for d in devices]
     except KeyError:
         pass
     return ids
 
 
-def get_flow_ids(controller='127.0.0.1', port=8181):
+def get_flow_ids(controller="127.0.0.1", port=8181):
     """Returns a list of flow ids"""
     ids = []
     device_ids = get_device_ids(controller, port)
     for device_id in device_ids:
-        rsp = requests.get(url='http://{0}:{1}/restconf/operational/opendaylight-inventory:nodes/node/%s/table/0'
-                           .format(controller, port) % device_id, auth=('admin', 'admin'))
+        rsp = requests.get(
+            url="http://{0}:{1}/restconf/operational/opendaylight-inventory:nodes/node/%s/table/0".format(
+                controller, port
+            )
+            % device_id,
+            auth=("admin", "admin"),
+        )
         if rsp.status_code != 200:
             return []
         try:
-            flows = json.loads(rsp.content)['flow-node-inventory:table'][0]['flow']
+            flows = json.loads(rsp.content)["flow-node-inventory:table"][0]["flow"]
             for f in flows:
-                ids.append(f['id'])
+                ids.append(f["id"])
         except KeyError:
             pass
     return ids
@@ -200,28 +232,62 @@ def get_flow_ids(controller='127.0.0.1', port=8181):
 
 def main(*argv):
 
-    parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
-                                                 'into the config tree, as specified by optional parameters.')
-
-    parser.add_argument('--host', default='127.0.0.1',
-                        help='Host where ODL controller is running (default is 127.0.0.1)')
-    parser.add_argument('--port', default='8181',
-                        help='Port on which ODL\'s RESTCONF is listening (default is 8181)')
-    parser.add_argument('--threads', type=int, default=1,
-                        help='Number of request worker threads to start in each cycle; default=1. '
-                             'Each thread will add/delete <FLOWS> flows.')
-    parser.add_argument('--flows', type=int, default=10,
-                        help='Number of flows that will be added/deleted in total, default 10')
-    parser.add_argument('--fpr', type=int, default=1,
-                        help='Number of flows per REST request, default 1')
-    parser.add_argument('--timeout', type=int, default=100,
-                        help='The maximum time (seconds) to wait between the add and delete cycles; default=100')
-    parser.add_argument('--no-delete', dest='no_delete', action='store_true', default=False,
-                        help='Delete all added flows one by one, benchmark delete '
-                             'performance.')
-    parser.add_argument('--bulk-delete', dest='bulk_delete', action='store_true', default=False,
-                        help='Delete all flows in bulk; default=False')
-    parser.add_argument('--outfile', default='', help='Stores add and delete flow rest api rate; default=""')
+    parser = argparse.ArgumentParser(
+        description="Flow programming performance test: First adds and then deletes flows "
+        "into the config tree, as specified by optional parameters."
+    )
+
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where ODL controller is running (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which ODL's RESTCONF is listening (default is 8181)",
+    )
+    parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in each cycle; default=1. "
+        "Each thread will add/delete <FLOWS> flows.",
+    )
+    parser.add_argument(
+        "--flows",
+        type=int,
+        default=10,
+        help="Number of flows that will be added/deleted in total, default 10",
+    )
+    parser.add_argument(
+        "--fpr", type=int, default=1, help="Number of flows per REST request, default 1"
+    )
+    parser.add_argument(
+        "--timeout",
+        type=int,
+        default=100,
+        help="The maximum time (seconds) to wait between the add and delete cycles; default=100",
+    )
+    parser.add_argument(
+        "--no-delete",
+        dest="no_delete",
+        action="store_true",
+        default=False,
+        help="Delete all added flows one by one, benchmark delete " "performance.",
+    )
+    parser.add_argument(
+        "--bulk-delete",
+        dest="bulk_delete",
+        action="store_true",
+        default=False,
+        help="Delete all flows in bulk; default=False",
+    )
+    parser.add_argument(
+        "--outfile",
+        default="",
+        help='Stores add and delete flow rest api rate; default=""',
+    )
 
     in_args = parser.parse_args(*argv)
     print(in_args)
@@ -230,7 +296,7 @@ def main(*argv):
     base_dev_ids = get_device_ids(controller=in_args.host)
     base_flow_ids = get_flow_ids(controller=in_args.host)
     # ip
-    ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')))
+    ip_addr = Counter(int(netaddr.IPAddress("10.0.0.1")))
     # prepare func
     preparefnc = _prepare_post
 
@@ -266,10 +332,19 @@ def main(*argv):
     with Timer() as tmr:
         threads = []
         for i in range(int(in_args.threads)):
-            thr = threading.Thread(target=_wt_request_sender, args=(i, preparefnc),
-                                   kwargs={"inqueue": sendqueue, "exitevent": exitevent,
-                                           "controllers": [in_args.host], "restport": in_args.port,
-                                           "template": flow_template, "outqueue": resultqueue, "method": "POST"})
+            thr = threading.Thread(
+                target=_wt_request_sender,
+                args=(i, preparefnc),
+                kwargs={
+                    "inqueue": sendqueue,
+                    "exitevent": exitevent,
+                    "controllers": [in_args.host],
+                    "restport": in_args.port,
+                    "template": flow_template,
+                    "outqueue": resultqueue,
+                    "method": "POST",
+                },
+            )
             threads.append(thr)
             thr.start()
 
@@ -305,7 +380,10 @@ def main(*argv):
     if i < rounds:
         print("... monitoring finished in +%d seconds\n\n" % (t.secs))
     else:
-        print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
+        print(
+            "... monitoring aborted after %d rounds, elapsed time %d\n\n"
+            % ((rounds, t.secs))
+        )
 
     if in_args.no_delete:
         return
@@ -328,17 +406,30 @@ def main(*argv):
     preparefnc = _prepare_delete
     with Timer() as tmr:
         if in_args.bulk_delete:
-            url = 'http://' + in_args.host + ':' + '8181'
-            url += '/restconf/config/opendaylight-inventory:nodes'
-            rsp = requests.delete(url, headers={'Content-Type': 'application/json'}, auth=('admin', 'admin'))
+            url = "http://" + in_args.host + ":" + "8181"
+            url += "/restconf/config/opendaylight-inventory:nodes"
+            rsp = requests.delete(
+                url,
+                headers={"Content-Type": "application/json"},
+                auth=("admin", "admin"),
+            )
             result = {rsp.status_code: 1}
         else:
             threads = []
             for i in range(int(in_args.threads)):
-                thr = threading.Thread(target=_wt_request_sender, args=(i, preparefnc),
-                                       kwargs={"inqueue": sendqueue, "exitevent": exitevent,
-                                               "controllers": [in_args.host], "restport": in_args.port,
-                                               "template": None, "outqueue": resultqueue, "method": "DELETE"})
+                thr = threading.Thread(
+                    target=_wt_request_sender,
+                    args=(i, preparefnc),
+                    kwargs={
+                        "inqueue": sendqueue,
+                        "exitevent": exitevent,
+                        "controllers": [in_args.host],
+                        "restport": in_args.port,
+                        "template": None,
+                        "outqueue": resultqueue,
+                        "method": "DELETE",
+                    },
+                )
                 threads.append(thr)
                 thr.start()
 
@@ -373,11 +464,14 @@ def main(*argv):
     if i < rounds:
         print("... monitoring finished in +%d seconds\n\n" % (t.secs))
     else:
-        print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
+        print(
+            "... monitoring aborted after %d rounds, elapsed time %d\n\n"
+            % ((rounds, t.secs))
+        )
 
     if in_args.outfile != "":
-        addrate = add_details['flows'] / add_details['duration']
-        delrate = del_details['flows'] / del_details['duration']
+        addrate = add_details["flows"] / add_details["duration"]
+        delrate = del_details["flows"] / del_details["duration"]
         print("addrate", addrate)
         print("delrate", delrate)
 
index 06654b8ff6da075cf41f4711a0b61c2c23746f1d..456fe8a1aba185d9832cf4c203caf6e09ab4ae28 100644 (file)
@@ -16,26 +16,13 @@ flow_template = {
     "timeout": 0,
     "isPermanent": True,
     "deviceId": "of:0000000000000001",
-    "treatment": {
-        "instructions": [
-            {
-                "type": "NOACTION"
-            }
-        ],
-        "deferred": []
-    },
+    "treatment": {"instructions": [{"type": "NOACTION"}], "deferred": []},
     "selector": {
         "criteria": [
-            {
-                "type": "ETH_TYPE",
-                "ethType": 2048
-            },
-            {
-                "type": "IPV4_DST",
-                "ip": "10.0.0.0/32"
-            }
+            {"type": "ETH_TYPE", "ethType": 2048},
+            {"type": "IPV4_DST", "ip": "10.0.0.0/32"},
         ]
-    }
+    },
 }
 
 
@@ -87,13 +74,18 @@ def _prepare_post(cntl, method, flows, template=None):
     """
     fl1 = flows[0]
     dev_id, ip = fl1
-    url = 'http://' + cntl + ':' + '8181/onos/v1/flows/' + dev_id
+    url = "http://" + cntl + ":" + "8181/onos/v1/flows/" + dev_id
     flow = copy.deepcopy(template)
     flow["deviceId"] = dev_id
-    flow["selector"]["criteria"][1]["ip"] = '%s/32' % str(netaddr.IPAddress(ip))
+    flow["selector"]["criteria"][1]["ip"] = "%s/32" % str(netaddr.IPAddress(ip))
     req_data = json.dumps(flow)
-    req = requests.Request(method, url, headers={'Content-Type': 'application/json'},
-                           data=req_data, auth=('onos', 'rocks'))
+    req = requests.Request(
+        method,
+        url,
+        headers={"Content-Type": "application/json"},
+        data=req_data,
+        auth=("onos", "rocks"),
+    )
     return req
 
 
@@ -114,13 +106,22 @@ def _prepare_delete(cntl, method, flows, template=None):
     """
     fl1 = flows[0]
     dev_id, flow_id = fl1
-    url = 'http://' + cntl + ':' + '8181/onos/v1/flows/' + dev_id + '/' + flow_id
-    req = requests.Request(method, url, auth=('onos', 'rocks'))
+    url = "http://" + cntl + ":" + "8181/onos/v1/flows/" + dev_id + "/" + flow_id
+    req = requests.Request(method, url, auth=("onos", "rocks"))
     return req
 
 
-def _wt_request_sender(thread_id, preparefnc, inqueue=None, exitevent=None, controllers=[], restport='',
-                       template=None, outqueue=None, method=None):
+def _wt_request_sender(
+    thread_id,
+    preparefnc,
+    inqueue=None,
+    exitevent=None,
+    controllers=[],
+    restport="",
+    template=None,
+    outqueue=None,
+    method=None,
+):
     """The funcion sends http requests.
 
     Runs in the working thread. It reads out flow details from the queue and sends apropriate http requests
@@ -176,47 +177,59 @@ def _wt_request_sender(thread_id, preparefnc, inqueue=None, exitevent=None, cont
     outqueue.put(res)
 
 
-def get_device_ids(controller='127.0.0.1', port=8181):
+def get_device_ids(controller="127.0.0.1", port=8181):
     """Returns a list of switch ids"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/devices'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/devices".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return []
-    devices = json.loads(rsp.content)['devices']
-    ids = [d['id'] for d in devices if 'of:' in d['id']]
+    devices = json.loads(rsp.content)["devices"]
+    ids = [d["id"] for d in devices if "of:" in d["id"]]
     return ids
 
 
-def get_flow_ids(controller='127.0.0.1', port=8181):
+def get_flow_ids(controller="127.0.0.1", port=8181):
     """Returns a list of flow ids"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/flows'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/flows".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return []
-    flows = json.loads(rsp.content)['flows']
-    ids = [f['id'] for f in flows]
+    flows = json.loads(rsp.content)["flows"]
+    ids = [f["id"] for f in flows]
     return ids
 
 
-def get_flow_simple_stats(controller='127.0.0.1', port=8181):
+def get_flow_simple_stats(controller="127.0.0.1", port=8181):
     """Returns a list of flow ids"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/flows'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/flows".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return []
-    flows = json.loads(rsp.content)['flows']
+    flows = json.loads(rsp.content)["flows"]
     res = {}
     for f in flows:
-        if f['state'] not in res:
-            res[f['state']] = 1
+        if f["state"] not in res:
+            res[f["state"]] = 1
         else:
-            res[f['state']] += 1
+            res[f["state"]] += 1
     return res
 
 
-def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]):
+def get_flow_device_pairs(controller="127.0.0.1", port=8181, flow_details=[]):
     """Pairing flows from controller with deteils we used ofr creation"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/flows'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/flows".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return
-    flows = json.loads(rsp.content)['flows']
+    flows = json.loads(rsp.content)["flows"]
     for dev_id, ip in flow_details:
         for f in flows:
             # lets identify if it is our flow
@@ -229,17 +242,22 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]):
                     item_idx = 1
                 else:
                     continue
-                if f["selector"]["criteria"][item_idx]["ip"] == '%s/32' % str(netaddr.IPAddress(ip)):
+                if f["selector"]["criteria"][item_idx]["ip"] == "%s/32" % str(
+                    netaddr.IPAddress(ip)
+                ):
                     yield dev_id, f["id"]
                     break
 
 
-def get_flow_to_remove(controller='127.0.0.1', port=8181):
+def get_flow_to_remove(controller="127.0.0.1", port=8181):
     """Pairing flows from controller with deteils we used ofr creation"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/flows'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/flows".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return
-    flows = json.loads(rsp.content)['flows']
+    flows = json.loads(rsp.content)["flows"]
 
     for f in flows:
         # lets identify if it is our flow
@@ -252,19 +270,27 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181):
         else:
             continue
         ipstr = f["selector"]["criteria"][item_idx]["ip"]
-        if '10.' in ipstr and '/32' in ipstr:
+        if "10." in ipstr and "/32" in ipstr:
             yield (f["deviceId"], f["id"])
 
 
 def main(*argv):
 
-    parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
-                                                 'into the config tree, as specified by optional parameters.')
-
-    parser.add_argument('--host', default='127.0.0.1',
-                        help='Host where onos controller is running (default is 127.0.0.1)')
-    parser.add_argument('--port', default='8181',
-                        help='Port on which onos\'s RESTCONF is listening (default is 8181)')
+    parser = argparse.ArgumentParser(
+        description="Flow programming performance test: First adds and then deletes flows "
+        "into the config tree, as specified by optional parameters."
+    )
+
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where onos controller is running (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which onos's RESTCONF is listening (default is 8181)",
+    )
 
     in_args = parser.parse_args(*argv)
     print(in_args)
@@ -273,7 +299,9 @@ def main(*argv):
     base_dev_ids = get_device_ids(controller=in_args.host)
     base_flow_ids = get_flow_ids(controller=in_args.host)
     # ip
-    ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')))  # noqa  # FIXME: This script seems to be unfinished!
+    ip_addr = Counter(
+        int(netaddr.IPAddress("10.0.0.1"))
+    )  # noqa  # FIXME: This script seems to be unfinished!
     # prepare func
     preparefnc = _prepare_post  # noqa  # FIXME: This script seems to be unfinished!
 
index c9e0d3f7b1cddbca0572c17684553a9550a5a806..f3051860519885690ad34432383bb48415490c27 100644 (file)
@@ -16,32 +16,16 @@ flow_template = {
     "timeout": 0,
     "isPermanent": True,
     "deviceId": "of:0000000000000001",
-    "treatment": {
-        "instructions": [
-            {
-                "type": "NOACTION"
-            }
-        ],
-        "deferred": []
-    },
+    "treatment": {"instructions": [{"type": "NOACTION"}], "deferred": []},
     "selector": {
         "criteria": [
-            {
-                "type": "ETH_TYPE",
-                "ethType": 2048
-            },
-            {
-                "type": "IPV4_DST",
-                "ip": "10.0.0.0/32"
-            }
+            {"type": "ETH_TYPE", "ethType": 2048},
+            {"type": "IPV4_DST", "ip": "10.0.0.0/32"},
         ]
-    }
+    },
 }
 
-flow_delete_template = {
-    "deviceId": "of:0000000000000001",
-    "flowId": 21392098393151996
-}
+flow_delete_template = {"deviceId": "of:0000000000000001", "flowId": 21392098393151996}
 
 
 class Timer(object):
@@ -91,16 +75,21 @@ def _prepare_post(cntl, method, flows, template=None):
         :returns req: http request object
     """
     flow_list = []
-    for dev_id, ip in (flows):
+    for dev_id, ip in flows:
         flow = copy.deepcopy(template)
         flow["deviceId"] = dev_id
-        flow["selector"]["criteria"][1]["ip"] = '%s/32' % str(netaddr.IPAddress(ip))
+        flow["selector"]["criteria"][1]["ip"] = "%s/32" % str(netaddr.IPAddress(ip))
         flow_list.append(flow)
     body = {"flows": flow_list}
-    url = 'http://' + cntl + ':' + '8181/onos/v1/flows/'
+    url = "http://" + cntl + ":" + "8181/onos/v1/flows/"
     req_data = json.dumps(body)
-    req = requests.Request(method, url, headers={'Content-Type': 'application/json'},
-                           data=req_data, auth=('onos', 'rocks'))
+    req = requests.Request(
+        method,
+        url,
+        headers={"Content-Type": "application/json"},
+        data=req_data,
+        auth=("onos", "rocks"),
+    )
     return req
 
 
@@ -120,21 +109,35 @@ def _prepare_delete(cntl, method, flows, template=None):
         :returns req: http request object
     """
     flow_list = []
-    for dev_id, flow_id in (flows):
+    for dev_id, flow_id in flows:
         flow = copy.deepcopy(template)
         flow["deviceId"] = dev_id
         flow["flowId"] = flow_id
         flow_list.append(flow)
     body = {"flows": flow_list}
-    url = 'http://' + cntl + ':' + '8181/onos/v1/flows/'
+    url = "http://" + cntl + ":" + "8181/onos/v1/flows/"
     req_data = json.dumps(body)
-    req = requests.Request(method, url, headers={'Content-Type': 'application/json'},
-                           data=req_data, auth=('onos', 'rocks'))
+    req = requests.Request(
+        method,
+        url,
+        headers={"Content-Type": "application/json"},
+        data=req_data,
+        auth=("onos", "rocks"),
+    )
     return req
 
 
-def _wt_request_sender(thread_id, preparefnc, inqueue=None, exitevent=None, controllers=[], restport='',
-                       template=None, outqueue=None, method=None):
+def _wt_request_sender(
+    thread_id,
+    preparefnc,
+    inqueue=None,
+    exitevent=None,
+    controllers=[],
+    restport="",
+    template=None,
+    outqueue=None,
+    method=None,
+):
     """The funcion sends http requests.
 
     Runs in the working thread. It reads out flow details from the queue and sends apropriate http requests
@@ -190,47 +193,59 @@ def _wt_request_sender(thread_id, preparefnc, inqueue=None, exitevent=None, cont
     outqueue.put(res)
 
 
-def get_device_ids(controller='127.0.0.1', port=8181):
+def get_device_ids(controller="127.0.0.1", port=8181):
     """Returns a list of switch ids"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/devices'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/devices".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return []
-    devices = json.loads(rsp.content)['devices']
-    ids = [d['id'] for d in devices if 'of:' in d['id']]
+    devices = json.loads(rsp.content)["devices"]
+    ids = [d["id"] for d in devices if "of:" in d["id"]]
     return ids
 
 
-def get_flow_ids(controller='127.0.0.1', port=8181):
+def get_flow_ids(controller="127.0.0.1", port=8181):
     """Returns a list of flow ids"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/flows'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/flows".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return []
-    flows = json.loads(rsp.content)['flows']
-    ids = [f['id'] for f in flows]
+    flows = json.loads(rsp.content)["flows"]
+    ids = [f["id"] for f in flows]
     return ids
 
 
-def get_flow_simple_stats(controller='127.0.0.1', port=8181):
+def get_flow_simple_stats(controller="127.0.0.1", port=8181):
     """Returns a list of flow ids"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/flows'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/flows".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return []
-    flows = json.loads(rsp.content)['flows']
+    flows = json.loads(rsp.content)["flows"]
     res = {}
     for f in flows:
-        if f['state'] not in res:
-            res[f['state']] = 1
+        if f["state"] not in res:
+            res[f["state"]] = 1
         else:
-            res[f['state']] += 1
+            res[f["state"]] += 1
     return res
 
 
-def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]):
+def get_flow_device_pairs(controller="127.0.0.1", port=8181, flow_details=[]):
     """Pairing flows from controller with deteils we used ofr creation"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/flows'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/flows".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return
-    flows = json.loads(rsp.content)['flows']
+    flows = json.loads(rsp.content)["flows"]
     for dev_id, ip in flow_details:
         for f in flows:
             # lets identify if it is our flow
@@ -243,17 +258,22 @@ def get_flow_device_pairs(controller='127.0.0.1', port=8181, flow_details=[]):
                     item_idx = 1
                 else:
                     continue
-                if f["selector"]["criteria"][item_idx]["ip"] == '%s/32' % str(netaddr.IPAddress(ip)):
+                if f["selector"]["criteria"][item_idx]["ip"] == "%s/32" % str(
+                    netaddr.IPAddress(ip)
+                ):
                     yield dev_id, f["id"]
                     break
 
 
-def get_flow_to_remove(controller='127.0.0.1', port=8181):
+def get_flow_to_remove(controller="127.0.0.1", port=8181):
     """Pairing flows from controller with deteils we used ofr creation"""
-    rsp = requests.get(url='http://{0}:{1}/onos/v1/flows'.format(controller, port), auth=('onos', 'rocks'))
+    rsp = requests.get(
+        url="http://{0}:{1}/onos/v1/flows".format(controller, port),
+        auth=("onos", "rocks"),
+    )
     if rsp.status_code != 200:
         return
-    flows = json.loads(rsp.content)['flows']
+    flows = json.loads(rsp.content)["flows"]
 
     for f in flows:
         # lets identify if it is our flow
@@ -266,34 +286,68 @@ def get_flow_to_remove(controller='127.0.0.1', port=8181):
         else:
             continue
         ipstr = f["selector"]["criteria"][item_idx]["ip"]
-        if '10.' in ipstr and '/32' in ipstr:
+        if "10." in ipstr and "/32" in ipstr:
             yield (f["deviceId"], f["id"])
 
 
 def main(*argv):
 
-    parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
-                                                 'into the config tree, as specified by optional parameters.')
-
-    parser.add_argument('--host', default='127.0.0.1',
-                        help='Host where onos controller is running (default is 127.0.0.1)')
-    parser.add_argument('--port', default='8181',
-                        help='Port on which onos\'s RESTCONF is listening (default is 8181)')
-    parser.add_argument('--threads', type=int, default=1,
-                        help='Number of request worker threads to start in each cycle; default=1. '
-                             'Each thread will add/delete <FLOWS> flows.')
-    parser.add_argument('--flows', type=int, default=10,
-                        help='Number of flows that will be added/deleted in total, default 10')
-    parser.add_argument('--fpr', type=int, default=1,
-                        help='Number of flows per REST request, default 1')
-    parser.add_argument('--timeout', type=int, default=100,
-                        help='The maximum time (seconds) to wait between the add and delete cycles; default=100')
-    parser.add_argument('--no-delete', dest='no_delete', action='store_true', default=False,
-                        help='Delete all added flows one by one, benchmark delete '
-                             'performance.')
-    parser.add_argument('--bulk-delete', dest='bulk_delete', action='store_true', default=False,
-                        help='Delete all flows in bulk; default=False')
-    parser.add_argument('--outfile', default='', help='Stores add and delete flow rest api rate; default=""')
+    parser = argparse.ArgumentParser(
+        description="Flow programming performance test: First adds and then deletes flows "
+        "into the config tree, as specified by optional parameters."
+    )
+
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where onos controller is running (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which onos's RESTCONF is listening (default is 8181)",
+    )
+    parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in each cycle; default=1. "
+        "Each thread will add/delete <FLOWS> flows.",
+    )
+    parser.add_argument(
+        "--flows",
+        type=int,
+        default=10,
+        help="Number of flows that will be added/deleted in total, default 10",
+    )
+    parser.add_argument(
+        "--fpr", type=int, default=1, help="Number of flows per REST request, default 1"
+    )
+    parser.add_argument(
+        "--timeout",
+        type=int,
+        default=100,
+        help="The maximum time (seconds) to wait between the add and delete cycles; default=100",
+    )
+    parser.add_argument(
+        "--no-delete",
+        dest="no_delete",
+        action="store_true",
+        default=False,
+        help="Delete all added flows one by one, benchmark delete " "performance.",
+    )
+    parser.add_argument(
+        "--bulk-delete",
+        dest="bulk_delete",
+        action="store_true",
+        default=False,
+        help="Delete all flows in bulk; default=False",
+    )
+    parser.add_argument(
+        "--outfile",
+        default="",
+        help='Stores add and delete flow rest api rate; default=""',
+    )
 
     in_args = parser.parse_args(*argv)
     print(in_args)
@@ -302,7 +356,7 @@ def main(*argv):
     base_dev_ids = get_device_ids(controller=in_args.host)
     base_flow_ids = get_flow_ids(controller=in_args.host)
     # ip
-    ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')))
+    ip_addr = Counter(int(netaddr.IPAddress("10.0.0.1")))
     # prepare func
     preparefnc = _prepare_post
 
@@ -337,10 +391,19 @@ def main(*argv):
     with Timer() as tmr:
         threads = []
         for i in range(int(in_args.threads)):
-            thr = threading.Thread(target=_wt_request_sender, args=(i, preparefnc),
-                                   kwargs={"inqueue": sendqueue, "exitevent": exitevent,
-                                           "controllers": [in_args.host], "restport": in_args.port,
-                                           "template": flow_template, "outqueue": resultqueue, "method": "POST"})
+            thr = threading.Thread(
+                target=_wt_request_sender,
+                args=(i, preparefnc),
+                kwargs={
+                    "inqueue": sendqueue,
+                    "exitevent": exitevent,
+                    "controllers": [in_args.host],
+                    "restport": in_args.port,
+                    "template": flow_template,
+                    "outqueue": resultqueue,
+                    "method": "POST",
+                },
+            )
             threads.append(thr)
             thr.start()
 
@@ -369,7 +432,9 @@ def main(*argv):
             flow_stats = get_flow_simple_stats(controller=in_args.host)
             print(flow_stats)
             try:
-                pending_adds = int(flow_stats[u'PENDING_ADD'])  # noqa  # FIXME: Print this somewhere.
+                pending_adds = int(
+                    flow_stats[u"PENDING_ADD"]
+                )  # noqa  # FIXME: Print this somewhere.
             except KeyError:
                 break
             time.sleep(1)
@@ -377,7 +442,10 @@ def main(*argv):
     if i < rounds:
         print("... monitoring finished in +%d seconds\n\n" % (t.secs))
     else:
-        print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
+        print(
+            "... monitoring aborted after %d rounds, elapsed time %d\n\n"
+            % ((rounds, t.secs))
+        )
 
     if in_args.no_delete:
         return
@@ -415,11 +483,19 @@ def main(*argv):
     with Timer() as tmr:
         threads = []
         for i in range(int(in_args.threads)):
-            thr = threading.Thread(target=_wt_request_sender, args=(i, preparefnc),
-                                   kwargs={"inqueue": sendqueue, "exitevent": exitevent,
-                                           "controllers": [in_args.host], "restport": in_args.port,
-                                           "template": flow_delete_template, "outqueue": resultqueue,
-                                           "method": "DELETE"})
+            thr = threading.Thread(
+                target=_wt_request_sender,
+                args=(i, preparefnc),
+                kwargs={
+                    "inqueue": sendqueue,
+                    "exitevent": exitevent,
+                    "controllers": [in_args.host],
+                    "restport": in_args.port,
+                    "template": flow_delete_template,
+                    "outqueue": resultqueue,
+                    "method": "DELETE",
+                },
+            )
             threads.append(thr)
             thr.start()
 
@@ -447,7 +523,9 @@ def main(*argv):
             flow_stats = get_flow_simple_stats(controller=in_args.host)
             print(flow_stats)
             try:
-                pending_rems = int(flow_stats[u'PENDING_REMOVE'])  # noqa  # FIXME: Print this somewhere.
+                pending_rems = int(
+                    flow_stats[u"PENDING_REMOVE"]
+                )  # noqa  # FIXME: Print this somewhere.
             except KeyError:
                 break
             time.sleep(1)
@@ -455,11 +533,14 @@ def main(*argv):
     if i < rounds:
         print("... monitoring finished in +%d seconds\n\n" % (t.secs))
     else:
-        print("... monitoring aborted after %d rounds, elapsed time %d\n\n" % ((rounds, t.secs)))
+        print(
+            "... monitoring aborted after %d rounds, elapsed time %d\n\n"
+            % ((rounds, t.secs))
+        )
 
     if in_args.outfile != "":
-        addrate = add_details['flows'] / add_details['duration']
-        delrate = del_details['flows'] / del_details['duration']
+        addrate = add_details["flows"] / add_details["duration"]
+        delrate = del_details["flows"] / del_details["duration"]
         print("addrate", addrate)
         print("delrate", delrate)
 
index bde0aa23ed9f29f304500fabe03f7d32bf35d2cd..17d8cc46ae73bd9371083f8e197ec54f19597cea 100755 (executable)
@@ -13,5 +13,5 @@ if __name__ == "__main__":
 
     data = sys.stdin.readlines()
     payload = json.loads(data.pop(0))
-    s = json.dumps(payload, sort_keys=True, indent=4, separators=(',', ': '))
-    print('%s\n\n' % s)
+    s = json.dumps(payload, sort_keys=True, indent=4, separators=(",", ": "))
+    print("%s\n\n" % s)
index ed62d52da2a010f816493ef6f57ce1f9d4bde463..f01331b441894d39fa0438635909489eaa35f8e9 100755 (executable)
@@ -54,7 +54,8 @@ class ShardPerformanceTester(object):
     specified when a test is started. By passing in the appropriate URLs, the test can be used to test data
     retrieval performance of different shards or different resources at different granularities, etc.
     """
-    headers = {'Accept': 'application/json'}
+
+    headers = {"Accept": "application/json"}
 
     def __init__(self, host, port, auth, threads, nrequests, plevel):
         """
@@ -88,7 +89,9 @@ class ShardPerformanceTester(object):
         if not self.auth:
             r = session.get(r_url, headers=self.headers, stream=False)
         else:
-            r = session.get(r_url, headers=self.headers, stream=False, auth=('admin', 'admin'))
+            r = session.get(
+                r_url, headers=self.headers, stream=False, auth=("admin", "admin")
+            )
         return r.status_code
 
     def worker(self, tid, urls):
@@ -104,7 +107,7 @@ class ShardPerformanceTester(object):
         s = requests.Session()
 
         with self.print_lock:
-            print('    Thread %d: Performing %d requests' % (tid, self.requests))
+            print("    Thread %d: Performing %d requests" % (tid, self.requests))
 
         with Timer() as t:
             for r in range(self.requests):
@@ -118,10 +121,10 @@ class ShardPerformanceTester(object):
         total_rate = sum(res.values()) / t.secs
 
         with self.print_lock:
-            print('Thread %d done:' % tid)
-            print('    Time: %.2f,' % t.secs)
-            print('    Success rate:  %.2f, Total rate: %.2f' % (ok_rate, total_rate))
-            print('    Per-thread stats: ',)
+            print("Thread %d done:" % tid)
+            print("    Time: %.2f," % t.secs)
+            print("    Success rate:  %.2f, Total rate: %.2f" % (ok_rate, total_rate))
+            print("    Per-thread stats: ")
             print(res)
             self.threads_done += 1
             self.total_rate += total_rate
@@ -160,17 +163,23 @@ class ShardPerformanceTester(object):
                     self.cond.wait()
 
         # Print summary results. Each worker prints its owns results too.
-        print('\nSummary Results:')
-        print('    Requests/sec (total_sum): %.2f' % ((self.threads * self.requests) / t.secs))
-        print('    Requests/sec (measured):  %.2f' % ((self.threads * self.requests) / t.secs))
-        print('    Time: %.2f' % t.secs)
+        print("\nSummary Results:")
+        print(
+            "    Requests/sec (total_sum): %.2f"
+            % ((self.threads * self.requests) / t.secs)
+        )
+        print(
+            "    Requests/sec (measured):  %.2f"
+            % ((self.threads * self.requests) / t.secs)
+        )
+        print("    Time: %.2f" % t.secs)
         self.threads_done = 0
 
         if self.plevel > 0:
-            print('    Per URL Counts: ',)
+            print("    Per URL Counts: ")
             for i in range(len(urls)):
-                print('%d' % self.url_counters[i].value)
-            print('\n')
+                print("%d" % self.url_counters[i].value)
+            print("\n")
 
 
 class TestUrlGenerator(object):
@@ -191,7 +200,7 @@ class TestUrlGenerator(object):
         self.host = host
         self.port = port
         self.auth = auth
-        self.resource_string = ''
+        self.resource_string = ""
 
     def url_generator(self, data):
         """
@@ -199,7 +208,10 @@ class TestUrlGenerator(object):
         :param data: Bulk resource data (JSON) from which to generate the URLs
         :return: List of generated Resources
         """
-        print("Abstract class '%s' should never be used standalone" % (self.__class__.__name__))
+        print(
+            "Abstract class '%s' should never be used standalone"
+            % (self.__class__.__name__)
+        )
         return []
 
     def generate(self):
@@ -208,22 +220,30 @@ class TestUrlGenerator(object):
          or the entire topology) from the controller specified during int()  and then invokes a resource-specific
          URL generator to create a set of resource-specific URLs.
         """
-        t_url = 'http://' + self.host + ":" + self.port + '/' + self.resource_string
-        headers = {'Accept': 'application/json'}
+        t_url = "http://" + self.host + ":" + self.port + "/" + self.resource_string
+        headers = {"Accept": "application/json"}
         r_url = []
 
         if not self.auth:
             r = requests.get(t_url, headers=headers, stream=False)
         else:
-            r = requests.get(t_url, headers=headers, stream=False, auth=('admin', 'admin'))
+            r = requests.get(
+                t_url, headers=headers, stream=False, auth=("admin", "admin")
+            )
 
         if r.status_code != 200:
-            print("Failed to get HTTP response from '%s', code %d" % ((t_url, r.status_code)))
+            print(
+                "Failed to get HTTP response from '%s', code %d"
+                % ((t_url, r.status_code))
+            )
         else:
             try:
                 r_url = self.url_generator(json.loads(r.content))
             except Exception:
-                print("Failed to get json from '%s'. Please make sure you are connected to mininet." % (r_url))
+                print(
+                    "Failed to get json from '%s'. Please make sure you are connected to mininet."
+                    % (r_url)
+                )
 
         return r_url
 
@@ -236,22 +256,31 @@ class TopoUrlGenerator(TestUrlGenerator):
 
     def __init__(self, host, port, auth):
         TestUrlGenerator.__init__(self, host, port, auth)
-        self.resource_string = 'restconf/operational/network-topology:network-topology/topology/flow:1'
+        self.resource_string = (
+            "restconf/operational/network-topology:network-topology/topology/flow:1"
+        )
 
     def url_generator(self, topo_data):
         url_list = []
         try:
-            nodes = topo_data['topology'][0]['node']
+            nodes = topo_data["topology"][0]["node"]
             for node in nodes:
-                tpoints = node['termination-point']
+                tpoints = node["termination-point"]
                 for tpoint in tpoints:
-                    t_url = 'http://' + self.host + ":" + self.port + \
-                            '/restconf/operational/network-topology:network-topology/topology/flow:1/node/' + \
-                            node['node-id'] + '/termination-point/' + tpoint['tp-id']
+                    t_url = (
+                        "http://"
+                        + self.host
+                        + ":"
+                        + self.port
+                        + "/restconf/operational/network-topology:network-topology/topology/flow:1/node/"
+                        + node["node-id"]
+                        + "/termination-point/"
+                        + tpoint["tp-id"]
+                    )
                     url_list.append(t_url)
             return url_list
         except KeyError:
-            print('Error parsing topology json')
+            print("Error parsing topology json")
             return []
 
 
@@ -262,89 +291,131 @@ class InvUrlGenerator(TestUrlGenerator):
 
     def __init__(self, host, port, auth):
         TestUrlGenerator.__init__(self, host, port, auth)
-        self.resource_string = 'restconf/operational/opendaylight-inventory:nodes'
+        self.resource_string = "restconf/operational/opendaylight-inventory:nodes"
 
     def url_generator(self, inv_data):
         url_list = []
         try:
-            nodes = inv_data['nodes']['node']
+            nodes = inv_data["nodes"]["node"]
             for node in nodes:
-                nconns = node['node-connector']
+                nconns = node["node-connector"]
                 for nconn in nconns:
-                    i_url = 'http://' + self.host + ":" + self.port + \
-                            '/restconf/operational/opendaylight-inventory:nodes/node/' + \
-                            node['id'] + '/node-connector/' + nconn['id'] + \
-                            '/opendaylight-port-statistics:flow-capable-node-connector-statistics'
+                    i_url = (
+                        "http://"
+                        + self.host
+                        + ":"
+                        + self.port
+                        + "/restconf/operational/opendaylight-inventory:nodes/node/"
+                        + node["id"]
+                        + "/node-connector/"
+                        + nconn["id"]
+                        + "/opendaylight-port-statistics:flow-capable-node-connector-statistics"
+                    )
                     url_list.append(i_url)
             return url_list
         except KeyError:
-            print('Error parsing inventory json')
+            print("Error parsing inventory json")
             return []
 
 
 if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
-                                                 'into the config tree, as specified by optional parameters.')
-
-    parser.add_argument('--host', default='127.0.0.1',
-                        help='Host where odl controller is running (default is 127.0.0.1)')
-    parser.add_argument('--port', default='8181',
-                        help='Port on which odl\'s RESTCONF is listening (default is 8181)')
-    parser.add_argument('--auth', dest='auth', action='store_true', default=False,
-                        help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
-                             'default: no authentication')
-    parser.add_argument('--threads', type=int, default=1,
-                        help='Number of request worker threads to start in each cycle; default=1. ')
-    parser.add_argument('--requests', type=int, default=100,
-                        help='Number of requests each worker thread will send to the controller; default=100.')
-    parser.add_argument('--resource', choices=['inv', 'topo', 'topo+inv', 'all'], default='both',
-                        help='Which resource to test: inventory, topology, or both; default both')
-    parser.add_argument('--plevel', type=int, default=0,
-                        help='Print level: controls output verbosity. 0-lowest, 1-highest; default 0')
+    parser = argparse.ArgumentParser(
+        description="Flow programming performance test: First adds and then deletes flows "
+        "into the config tree, as specified by optional parameters."
+    )
+
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where odl controller is running (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which odl's RESTCONF is listening (default is 8181)",
+    )
+    parser.add_argument(
+        "--auth",
+        dest="auth",
+        action="store_true",
+        default=False,
+        help="Use the ODL default username/password 'admin'/'admin' to authenticate access to REST; "
+        "default: no authentication",
+    )
+    parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in each cycle; default=1. ",
+    )
+    parser.add_argument(
+        "--requests",
+        type=int,
+        default=100,
+        help="Number of requests each worker thread will send to the controller; default=100.",
+    )
+    parser.add_argument(
+        "--resource",
+        choices=["inv", "topo", "topo+inv", "all"],
+        default="both",
+        help="Which resource to test: inventory, topology, or both; default both",
+    )
+    parser.add_argument(
+        "--plevel",
+        type=int,
+        default=0,
+        help="Print level: controls output verbosity. 0-lowest, 1-highest; default 0",
+    )
     in_args = parser.parse_args()
 
     topo_urls = []
     inv_urls = []
 
     # If required, get topology resource URLs
-    if in_args.resource != 'inventory':
+    if in_args.resource != "inventory":
         tg = TopoUrlGenerator(in_args.host, in_args.port, in_args.auth)
         topo_urls += tg.generate()
         if len(topo_urls) == 0:
-            print('Failed to generate topology URLs')
+            print("Failed to generate topology URLs")
             sys.exit(-1)
 
     # If required, get inventory resource URLs
-    if in_args.resource != 'topology':
+    if in_args.resource != "topology":
         ig = InvUrlGenerator(in_args.host, in_args.port, in_args.auth)
         inv_urls += ig.generate()
         if len(inv_urls) == 0:
-            print('Failed to generate inventory URLs')
+            print("Failed to generate inventory URLs")
             sys.exit(-1)
 
-    if in_args.resource == 'topo+inv' or in_args.resource == 'all':
+    if in_args.resource == "topo+inv" or in_args.resource == "all":
         # To have balanced test results, the number of URLs for topology and inventory must be the same
         if len(topo_urls) != len(inv_urls):
             print("The number of topology and inventory URLs don't match")
             sys.exit(-1)
 
-    st = ShardPerformanceTester(in_args.host, in_args.port, in_args.auth, in_args.threads, in_args.requests,
-                                in_args.plevel)
-
-    if in_args.resource == 'all' or in_args.resource == 'topo':
-        print('===================================')
-        print('Testing topology shard performance:')
-        print('===================================')
+    st = ShardPerformanceTester(
+        in_args.host,
+        in_args.port,
+        in_args.auth,
+        in_args.threads,
+        in_args.requests,
+        in_args.plevel,
+    )
+
+    if in_args.resource == "all" or in_args.resource == "topo":
+        print("===================================")
+        print("Testing topology shard performance:")
+        print("===================================")
         st.run_test(topo_urls)
 
-    if in_args.resource == 'all' or in_args.resource == 'inv':
-        print('====================================')
-        print('Testing inventory shard performance:')
-        print('====================================')
+    if in_args.resource == "all" or in_args.resource == "inv":
+        print("====================================")
+        print("Testing inventory shard performance:")
+        print("====================================")
         st.run_test(inv_urls)
 
-    if in_args.resource == 'topo+inv' or in_args.resource == 'all':
-        print('===============================================')
-        print('Testing combined shards (topo+inv) performance:')
-        print('===============================================')
+    if in_args.resource == "topo+inv" or in_args.resource == "all":
+        print("===============================================")
+        print("Testing combined shards (topo+inv) performance:")
+        print("===============================================")
         st.run_test(topo_urls + inv_urls)
index 5f4973ba79329f68e3d6df9b0040da7da00bd277..6845ff8a25fa076498bad9e3ee1050a088dbf121 100644 (file)
@@ -21,42 +21,65 @@ def main():
     """
 
     # Constants
-    car_entry_template = string.Template('''      {
+    car_entry_template = string.Template(
+        """      {
        "id": "car-$NUM"
-      }''')
+      }"""
+    )
 
-    patch_data_template = string.Template('''{
+    patch_data_template = string.Template(
+        """{
  "cars": {
   "car-entry": [
 $ENTRIES
      ]
     }
-}''')
+}"""
+    )
 
     # Arguments
-    parser = argparse.ArgumentParser(description="Config datastore"
-                                                 "scale test script")
-    parser.add_argument("--host", default="127.0.0.1",
-                        help="Host where odl controller is running."
-                             "(default: 127.0.0.1)")
-    parser.add_argument("--port", default="8181",
-                        help="Port on which odl's RESTCONF is listening"
-                             "(default: 8181)")
-    parser.add_argument("--start-id", type=int, default=1,
-                        help="ID number of the first car. (default:1)")
-    parser.add_argument("--segment-size", type=int, default=1,
-                        help="Number of cars in segment. (default:1)")
-    parser.add_argument("--iterations", type=int, default=1,
-                        help="How many times the segment sent. (default:1)")
-    parser.add_argument("--move-per-iter", type=int, default=1,
-                        help="Each segment has IDs moved by this. (default:1)")
+    parser = argparse.ArgumentParser(description="Config datastore" "scale test script")
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where odl controller is running." "(default: 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which odl's RESTCONF is listening" "(default: 8181)",
+    )
+    parser.add_argument(
+        "--start-id",
+        type=int,
+        default=1,
+        help="ID number of the first car. (default:1)",
+    )
+    parser.add_argument(
+        "--segment-size",
+        type=int,
+        default=1,
+        help="Number of cars in segment. (default:1)",
+    )
+    parser.add_argument(
+        "--iterations",
+        type=int,
+        default=1,
+        help="How many times the segment sent. (default:1)",
+    )
+    parser.add_argument(
+        "--move-per-iter",
+        type=int,
+        default=1,
+        help="Each segment has IDs moved by this. (default:1)",
+    )
     parser.add_argument("--user", help="Restconf user name", default="admin")
     parser.add_argument("--password", help="Restconf password", default="admin")
 
     args = parser.parse_args()
 
     # Logic
-    url = "http://" + args.host + ':' + args.port + "/restconf/config/car:cars"
+    url = "http://" + args.host + ":" + args.port + "/restconf/config/car:cars"
     auth = (args.user, args.password)
     headers = {"Content-Type": "application/json"}
     session = requests.Session()
index 3731d0c516bb9ee0d12be815715d5be959d97297..52d717053af89ea76248496a6cfb5421009c33a0 100644 (file)
@@ -19,7 +19,7 @@ _template_add_car = {
             "category": "my_category",
             "model": "to be replaced",
             "manufacturer": "my_manufacturer",
-            "year": "2015"
+            "year": "2015",
         }
     ]
 }
@@ -31,7 +31,7 @@ _template_add_people_rpc = {
             "people:gender": "male",
             "people:age": "99",
             "people:address": "to be replaced",
-            "people:contactNo": "to be replaced"
+            "people:contactNo": "to be replaced",
         }
     ]
 }
@@ -40,7 +40,7 @@ _template_add_cp_rpc = {
     "input": {
         "car-purchase:person": "to be replaced",
         "car-purchase:person-id": "to be replaced",
-        "car-purchase:car-id": "to be replaced"
+        "car-purchase:car-id": "to be replaced",
     }
 }
 
@@ -161,7 +161,9 @@ def _prepare_add_car_people_rpc(odl_ip, port, item_list, auth):
     container = {"input": {}}
     item = item_list[0]
     entry = container["input"]
-    entry["car-purchase:person"] = "/people:people/people:person[people:id='" + str(item) + "']"
+    entry["car-purchase:person"] = (
+        "/people:people/people:person[people:id='" + str(item) + "']"
+    )
     entry["car-purchase:person-id"] = str(item)
     entry["car-purchase:car-id"] = str(item)
     container["input"] = entry
@@ -169,9 +171,19 @@ def _prepare_add_car_people_rpc(odl_ip, port, item_list, auth):
     return req
 
 
-def _request_sender(thread_id, preparing_function, auth, in_queue=None,
-                    exit_event=None, odl_ip="127.0.0.1", port="8181", out_queue=None,
-                    req_timeout=60, retry_timeout=15, retry_rcs=[]):
+def _request_sender(
+    thread_id,
+    preparing_function,
+    auth,
+    in_queue=None,
+    exit_event=None,
+    odl_ip="127.0.0.1",
+    port="8181",
+    out_queue=None,
+    req_timeout=60,
+    retry_timeout=15,
+    retry_rcs=[],
+):
     """The funcion sends http requests.
 
     Runs in the working thread. It reads out flow details from the queue and
@@ -227,7 +239,12 @@ def _request_sender(thread_id, preparing_function, auth, in_queue=None,
                 counter[rsp.status_code] += 1
                 rc = rsp.status_code
                 lvl = logging.INFO if rc > 299 else logging.DEBUG
-                logger.log(lvl, "Request started at {} finished with following detais".format(time.ctime(start_time)))
+                logger.log(
+                    lvl,
+                    "Request started at {} finished with following detais".format(
+                        time.ctime(start_time)
+                    ),
+                )
                 logger.log(lvl, "%s %s", rsp.request, rsp.request.url)
                 logger.log(lvl, "Headers %s:", rsp.request.headers)
                 logger.log(lvl, "Body: %s", rsp.request.body)
@@ -244,9 +261,18 @@ def _request_sender(thread_id, preparing_function, auth, in_queue=None,
     logger.info("Response code(s) got per number of requests: %s", responses)
 
 
-def _task_executor(preparing_function, odl_ip="127.0.0.1", port="8181",
-                   thread_count=1, item_count=1, items_per_request=1,
-                   auth=('admin', 'admin'), req_timeout=600, retry_timeout=15, retry_rcs=[]):
+def _task_executor(
+    preparing_function,
+    odl_ip="127.0.0.1",
+    port="8181",
+    thread_count=1,
+    item_count=1,
+    items_per_request=1,
+    auth=("admin", "admin"),
+    req_timeout=600,
+    retry_timeout=15,
+    retry_rcs=[],
+):
     """The main function which drives sending of http requests.
 
     Creates 2 queues and requested number of "working threads".
@@ -282,13 +308,13 @@ def _task_executor(preparing_function, odl_ip="127.0.0.1", port="8181",
     """
 
     # geting hosts
-    hosts = odl_ip.split(',')
+    hosts = odl_ip.split(",")
     nrhosts = len(hosts)
 
     items = [i + 1 for i in range(item_count)]
     item_groups = []
     for i in range(0, item_count, items_per_request):
-        item_groups.append(items[i:i + items_per_request])
+        item_groups.append(items[i : i + items_per_request])
 
     # fill the queue with details needed for one http requests
     send_queue = Queue.Queue()
@@ -303,12 +329,20 @@ def _task_executor(preparing_function, odl_ip="127.0.0.1", port="8181",
     # start threads to read details from queues and to send http requests
     threads = []
     for i in range(int(thread_count)):
-        thr = threading.Thread(target=_request_sender,
-                               args=(i, preparing_function, auth),
-                               kwargs={"in_queue": send_queue, "exit_event": exit_event,
-                                       "odl_ip": hosts[i % nrhosts], "port": port,
-                                       "out_queue": result_queue, "req_timeout": req_timeout,
-                                       "retry_timeout": retry_timeout, "retry_rcs": retry_rcs})
+        thr = threading.Thread(
+            target=_request_sender,
+            args=(i, preparing_function, auth),
+            kwargs={
+                "in_queue": send_queue,
+                "exit_event": exit_event,
+                "odl_ip": hosts[i % nrhosts],
+                "port": port,
+                "out_queue": result_queue,
+                "req_timeout": req_timeout,
+                "retry_timeout": retry_timeout,
+                "retry_rcs": retry_rcs,
+            },
+        )
         threads.append(thr)
         thr.start()
 
@@ -556,17 +590,30 @@ def add_car(odl_ip, port, thread_count, item_count, auth, items_per_request):
         None
     """
 
-    logger.info("Add %s car(s) to %s:%s (%s per request)",
-                item_count, odl_ip, port, items_per_request)
-    res = _task_executor(_prepare_add_car, odl_ip=odl_ip, port=port,
-                         thread_count=thread_count, item_count=item_count,
-                         items_per_request=items_per_request, auth=auth)
+    logger.info(
+        "Add %s car(s) to %s:%s (%s per request)",
+        item_count,
+        odl_ip,
+        port,
+        items_per_request,
+    )
+    res = _task_executor(
+        _prepare_add_car,
+        odl_ip=odl_ip,
+        port=port,
+        thread_count=thread_count,
+        item_count=item_count,
+        items_per_request=items_per_request,
+        auth=auth,
+    )
     if res.keys() != [204]:
         logger.error("Not all cars were configured: " + repr(res))
         raise Exception("Not all cars were configured: " + repr(res))
 
 
-def add_car_with_retries(odl_ip, port, thread_count, item_count, auth, items_per_request):
+def add_car_with_retries(
+    odl_ip, port, thread_count, item_count, auth, items_per_request
+):
     """Configure car entries to the config datastore.
 
     Args:
@@ -587,18 +634,33 @@ def add_car_with_retries(odl_ip, port, thread_count, item_count, auth, items_per
         None
     """
 
-    logger.info("Add %s car(s) to %s:%s (%s per request)",
-                item_count, odl_ip, port, items_per_request)
+    logger.info(
+        "Add %s car(s) to %s:%s (%s per request)",
+        item_count,
+        odl_ip,
+        port,
+        items_per_request,
+    )
     retry_rcs = [401, 404, 500, 503]
-    res = _task_executor(_prepare_add_car, odl_ip=odl_ip, port=port,
-                         thread_count=thread_count, item_count=item_count,
-                         items_per_request=items_per_request, auth=auth,
-                         req_timeout=15, retry_timeout=30, retry_rcs=retry_rcs)
+    res = _task_executor(
+        _prepare_add_car,
+        odl_ip=odl_ip,
+        port=port,
+        thread_count=thread_count,
+        item_count=item_count,
+        items_per_request=items_per_request,
+        auth=auth,
+        req_timeout=15,
+        retry_timeout=30,
+        retry_rcs=retry_rcs,
+    )
     acceptable_rcs = [204] + retry_rcs
     for key in res.keys():
         if key not in acceptable_rcs:
             logger.error("Problems during cars' configuration appeared: " + repr(res))
-            raise Exception("Problems during cars' configuration appeared: " + repr(res))
+            raise Exception(
+                "Problems during cars' configuration appeared: " + repr(res)
+            )
 
 
 def add_people_rpc(odl_ip, port, thread_count, item_count, auth, items_per_request):
@@ -622,23 +684,37 @@ def add_people_rpc(odl_ip, port, thread_count, item_count, auth, items_per_reque
         None
     """
 
-    logger.info("Add %s people to %s:%s (%s per request)",
-                item_count, odl_ip, port, items_per_request)
+    logger.info(
+        "Add %s people to %s:%s (%s per request)",
+        item_count,
+        odl_ip,
+        port,
+        items_per_request,
+    )
     if items_per_request != 1:
-        logger.error("Only 1 item per request is supported, " +
-                     "you specified: {0}".format(item_count))
-        raise NotImplementedError("Only 1 item per request is supported, " +
-                                  "you specified: {0}".format(item_count))
-    res = _task_executor(_prepare_add_people_rpc, odl_ip=odl_ip, port=port,
-                         thread_count=thread_count, item_count=item_count,
-                         items_per_request=items_per_request, auth=auth)
+        logger.error(
+            "Only 1 item per request is supported, "
+            + "you specified: {0}".format(item_count)
+        )
+        raise NotImplementedError(
+            "Only 1 item per request is supported, "
+            + "you specified: {0}".format(item_count)
+        )
+    res = _task_executor(
+        _prepare_add_people_rpc,
+        odl_ip=odl_ip,
+        port=port,
+        thread_count=thread_count,
+        item_count=item_count,
+        items_per_request=items_per_request,
+        auth=auth,
+    )
     if res.keys() != [200]:
         logger.error("Not all people were configured: " + repr(res))
         raise Exception("Not all people were configured: " + repr(res))
 
 
-def add_car_people_rpc(odl_ip, port, thread_count, item_count, auth,
-                       items_per_request):
+def add_car_people_rpc(odl_ip, port, thread_count, item_count, auth, items_per_request):
     """Configure car-people entries to the config datastore one by one using rpc
 
     Args:
@@ -659,17 +735,32 @@ def add_car_people_rpc(odl_ip, port, thread_count, item_count, auth,
         None
     """
 
-    logger.info("Add %s purchase(s) to %s:%s (%s per request)",
-                item_count, odl_ip, port, items_per_request)
+    logger.info(
+        "Add %s purchase(s) to %s:%s (%s per request)",
+        item_count,
+        odl_ip,
+        port,
+        items_per_request,
+    )
     if items_per_request != 1:
-        logger.error("Only 1 item per request is supported, " +
-                     "you specified: {0}".format(item_count))
-        raise NotImplementedError("Only 1 item per request is supported, " +
-                                  "you specified: {0}".format(item_count))
-
-    res = _task_executor(_prepare_add_car_people_rpc, odl_ip=odl_ip, port=port,
-                         thread_count=thread_count, item_count=item_count,
-                         items_per_request=items_per_request, auth=auth)
+        logger.error(
+            "Only 1 item per request is supported, "
+            + "you specified: {0}".format(item_count)
+        )
+        raise NotImplementedError(
+            "Only 1 item per request is supported, "
+            + "you specified: {0}".format(item_count)
+        )
+
+    res = _task_executor(
+        _prepare_add_car_people_rpc,
+        odl_ip=odl_ip,
+        port=port,
+        thread_count=thread_count,
+        item_count=item_count,
+        items_per_request=items_per_request,
+        auth=auth,
+    )
     if res.keys() != [200]:
         logger.error("Not all rpc calls passed: " + repr(res))
         raise Exception("Not all rpc calls passed: " + repr(res))
@@ -681,7 +772,11 @@ _items = ["car", "people", "car-people"]
 _handler_matrix = {
     "add": {"car": add_car},
     "get": {"car": get_car, "people": get_people, "car-people": get_car_people},
-    "delete": {"car": delete_car, "people": delete_people, "car-people": delete_car_people},
+    "delete": {
+        "car": delete_car,
+        "people": delete_people,
+        "car-people": delete_car_people,
+    },
     "add-rpc": {"car-people": add_car_people_rpc, "people": add_people_rpc},
     "add-with-retries": {"car": add_car_with_retries},
 }
@@ -694,38 +789,56 @@ if __name__ == "__main__":
     It provides "car", "people" and "car-people" crud operations.
     """
 
-    parser = argparse.ArgumentParser(description="Cluster datastore"
-                                                 "performance test script")
-    parser.add_argument("--host", default="127.0.0.1",
-                        help="Host where odl controller is running."
-                             "Or comma separated list of hosts."
-                             "(default is 127.0.0.1)")
-    parser.add_argument("--port", default="8181",
-                        help="Port on which odl's RESTCONF is listening"
-                             "(default is 8181)")
-    parser.add_argument("--threads", type=int, default=1,
-                        help="Number of request worker threads to start in"
-                             "each cycle (default=1)")
-    parser.add_argument("action", choices=_actions, metavar="action",
-                        help="Action to be performed.")
-    parser.add_argument("--itemtype", choices=_items, default="car",
-                        help="Flows-per-Request - number of flows (batch size)"
-                             "sent in each HTTP request (default 1)")
-    parser.add_argument("--itemcount", type=int, help="Items per request",
-                        default=1)
+    parser = argparse.ArgumentParser(
+        description="Cluster datastore" "performance test script"
+    )
+    parser.add_argument(
+        "--host",
+        default="127.0.0.1",
+        help="Host where odl controller is running."
+        "Or comma separated list of hosts."
+        "(default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--port",
+        default="8181",
+        help="Port on which odl's RESTCONF is listening" "(default is 8181)",
+    )
+    parser.add_argument(
+        "--threads",
+        type=int,
+        default=1,
+        help="Number of request worker threads to start in" "each cycle (default=1)",
+    )
+    parser.add_argument(
+        "action", choices=_actions, metavar="action", help="Action to be performed."
+    )
+    parser.add_argument(
+        "--itemtype",
+        choices=_items,
+        default="car",
+        help="Flows-per-Request - number of flows (batch size)"
+        "sent in each HTTP request (default 1)",
+    )
+    parser.add_argument("--itemcount", type=int, help="Items per request", default=1)
     parser.add_argument("--user", help="Restconf user name", default="admin")
     parser.add_argument("--password", help="Restconf password", default="admin")
     parser.add_argument("--ipr", type=int, help="Items per request", default=1)
-    parser.add_argument("--debug", dest="loglevel", action="store_const",
-                        const=logging.DEBUG, default=logging.INFO,
-                        help="Set log level to debug (default is error)")
+    parser.add_argument(
+        "--debug",
+        dest="loglevel",
+        action="store_const",
+        const=logging.DEBUG,
+        default=logging.INFO,
+        help="Set log level to debug (default is error)",
+    )
 
     args = parser.parse_args()
 
     logger = logging.getLogger("logger")
-    log_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
+    log_formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
     console_handler = logging.StreamHandler()
-    file_handler = logging.FileHandler('cluster_rest_script.log', mode="w")
+    file_handler = logging.FileHandler("cluster_rest_script.log", mode="w")
     console_handler.setFormatter(log_formatter)
     file_handler.setFormatter(log_formatter)
     logger.addHandler(console_handler)
@@ -734,8 +847,10 @@ if __name__ == "__main__":
 
     auth = (args.user, args.password)
 
-    if (args.action not in _handler_matrix or
-            args.itemtype not in _handler_matrix[args.action]):
+    if (
+        args.action not in _handler_matrix
+        or args.itemtype not in _handler_matrix[args.action]
+    ):
         msg = "Unsupported combination of action: " + str(args.action)
         msg += " and item: " + str(args.itemtype)
         logger.error(msg)
@@ -746,5 +861,4 @@ if __name__ == "__main__":
     # this should be done inside handler functions
 
     handler_function = _handler_matrix[args.action][args.itemtype]
-    handler_function(args.host, args.port, args.threads,
-                     args.itemcount, auth, args.ipr)
+    handler_function(args.host, args.port, args.threads, args.itemcount, auth, args.ipr)
index 24a0fdc77e5bcc9b6e4e82ecf1e9d44f643ebf14..639bfaa54185b68511ceedb3e273c3c776de76c1 100644 (file)
@@ -5,7 +5,7 @@ import argparse
 import logging
 import requests
 
-__author__ = 'Marcus Williams'
+__author__ = "Marcus Williams"
 __copyright__ = "Copyright (c) 2015, Intel Corp Inc., Cisco Systems Inc. and others"
 __credits__ = ["Jan Medved, Lori Jakab"]
 __license__ = "New-style BSD"
@@ -13,27 +13,35 @@ __email__ = "marcus.williams@intel.com"
 __version__ = "0.0.1"
 
 
-class OvsdbConfigBlaster (object):
-    PUT_HEADERS = {'Content-Type': 'application/json',
-                   'Authorization': 'Basic YWRtaW46YWRtaW4=',
-                   'Accept': 'application/json'}
-    GET_HEADERS = {'Accept': 'application/json',
-                   'Authorization': 'Basic YWRtaW46YWRtaW4='}
-    DELETE_HEADERS = {'Accept': 'application/json',
-                      'Authorization': 'Basic YWRtaW46YWRtaW4='}
+class OvsdbConfigBlaster(object):
+    PUT_HEADERS = {
+        "Content-Type": "application/json",
+        "Authorization": "Basic YWRtaW46YWRtaW4=",
+        "Accept": "application/json",
+    }
+    GET_HEADERS = {
+        "Accept": "application/json",
+        "Authorization": "Basic YWRtaW46YWRtaW4=",
+    }
+    DELETE_HEADERS = {
+        "Accept": "application/json",
+        "Authorization": "Basic YWRtaW46YWRtaW4=",
+    }
     TIMEOUT = 10
 
-    def __init__(self,
-                 controller_ip,
-                 controller_port,
-                 vswitch_ip,
-                 vswitch_ovsdb_port,
-                 vswitch_remote_ip,
-                 vswitch_remote_ovsdb_port,
-                 vswitch_port_type,
-                 vswitch_lst_del_br,
-                 delete_ports,
-                 num_instances):
+    def __init__(
+        self,
+        controller_ip,
+        controller_port,
+        vswitch_ip,
+        vswitch_ovsdb_port,
+        vswitch_remote_ip,
+        vswitch_remote_ovsdb_port,
+        vswitch_port_type,
+        vswitch_lst_del_br,
+        delete_ports,
+        num_instances,
+    ):
         """
         Args:
             :param controller_ip: The ODL host ip used to send RPCs
@@ -53,21 +61,19 @@ class OvsdbConfigBlaster (object):
         self.controller_port = controller_port
         self.vswitch_dict = dict()
         self.add_vswitch_to_dict(
-            vswitch_ip,
-            vswitch_remote_ip,
-            vswitch_ovsdb_port, 'ovs-1')
+            vswitch_ip, vswitch_remote_ip, vswitch_ovsdb_port, "ovs-1"
+        )
         if vswitch_remote_ip:
             self.add_vswitch_to_dict(
-                vswitch_remote_ip,
-                vswitch_ip,
-                vswitch_remote_ovsdb_port, 'ovs-2')
+                vswitch_remote_ip, vswitch_ip, vswitch_remote_ovsdb_port, "ovs-2"
+            )
         self.vswitch_port_type = vswitch_port_type
         self.vswitch_lst_del_br = vswitch_lst_del_br
         self.num_instances = num_instances
         self.delete_ports = delete_ports
-        self.connect_vswitch(self.vswitch_dict['ovs-1'])
-        if self.vswitch_dict.get('ovs-2'):
-            self.connect_vswitch(self.vswitch_dict['ovs-2'])
+        self.connect_vswitch(self.vswitch_dict["ovs-1"])
+        if self.vswitch_dict.get("ovs-2"):
+            self.connect_vswitch(self.vswitch_dict["ovs-2"])
 
     @staticmethod
     def return_ovsdb_url(vswitch_ip, vswitch_ovsdb_port, url_type="config"):
@@ -79,19 +85,19 @@ class OvsdbConfigBlaster (object):
         """
         url_prefix = None
         if url_type == "config":
-            url_prefix = 'restconf/config/'
+            url_prefix = "restconf/config/"
         elif url_type == "oper":
-            url_prefix = 'restconf/operational/'
-        ovsdb_url = url_prefix \
-            + 'network-topology:' \
-            'network-topology/topology/' \
-            'ovsdb:1/node/ovsdb:%2F%2F' \
-            + vswitch_ip\
-            + ':' \
-            + vswitch_ovsdb_port
+            url_prefix = "restconf/operational/"
+        ovsdb_url = (
+            url_prefix + "network-topology:"
+            "network-topology/topology/"
+            "ovsdb:1/node/ovsdb:%2F%2F" + vswitch_ip + ":" + vswitch_ovsdb_port
+        )
         return ovsdb_url
 
-    def add_vswitch_to_dict(self, vswitch_ip, vswitch_remote_ip, vswitch_ovsdb_port, vswitch_name):
+    def add_vswitch_to_dict(
+        self, vswitch_ip, vswitch_remote_ip, vswitch_ovsdb_port, vswitch_name
+    ):
         """ Add details of an Open vSwitch instance to self.vswitch_dict
         Args:
             :param vswitch_ip: The ip of Open vSwitch to use
@@ -99,32 +105,30 @@ class OvsdbConfigBlaster (object):
             :param vswitch_ovsdb_port: The ovsdb port of Open vSwitch to use
             :param vswitch_name: The name to label the added Open vSwitch instance
         """
-        urlprefix = 'http://' \
-                    + self.controller_ip + \
-                    ':' \
-                    + self.controller_port + \
-                    '/'
-        self.vswitch_dict.update({
-            vswitch_name: {
-                'name': vswitch_name,
-                'ip': vswitch_ip,
-                'remote-ip': vswitch_remote_ip,
-                'ovsdb-port': vswitch_ovsdb_port,
-                'node-id': 'ovsdb://%s:%s'
-                           % (vswitch_ip,
-                              vswitch_ovsdb_port),
-                'post-url': urlprefix
-                + OvsdbConfigBlaster.return_ovsdb_url(
-                    vswitch_ip,
-                    vswitch_ovsdb_port),
-                'get-config-url': urlprefix
-                + OvsdbConfigBlaster.return_ovsdb_url(
-                    vswitch_ip,
-                    vswitch_ovsdb_port),
-                'get-oper-url': urlprefix
-                + OvsdbConfigBlaster.return_ovsdb_url(
-                    vswitch_ip,
-                    vswitch_ovsdb_port)}})
+        urlprefix = "http://" + self.controller_ip + ":" + self.controller_port + "/"
+        self.vswitch_dict.update(
+            {
+                vswitch_name: {
+                    "name": vswitch_name,
+                    "ip": vswitch_ip,
+                    "remote-ip": vswitch_remote_ip,
+                    "ovsdb-port": vswitch_ovsdb_port,
+                    "node-id": "ovsdb://%s:%s" % (vswitch_ip, vswitch_ovsdb_port),
+                    "post-url": urlprefix
+                    + OvsdbConfigBlaster.return_ovsdb_url(
+                        vswitch_ip, vswitch_ovsdb_port
+                    ),
+                    "get-config-url": urlprefix
+                    + OvsdbConfigBlaster.return_ovsdb_url(
+                        vswitch_ip, vswitch_ovsdb_port
+                    ),
+                    "get-oper-url": urlprefix
+                    + OvsdbConfigBlaster.return_ovsdb_url(
+                        vswitch_ip, vswitch_ovsdb_port
+                    ),
+                }
+            }
+        )
 
     def connect_vswitch(self, vswitch_dict):
         """ Connect ODL to an Open vSwitch instance using restconf
@@ -133,21 +137,19 @@ class OvsdbConfigBlaster (object):
                                  an instance of Open vSwitch
         """
         connect_ovs_body = {
-            u'network-topology:node': [
+            u"network-topology:node": [
                 {
-                    u'node-id': unicode(vswitch_dict['node-id']),
-                    u'connection-info': {
-                        u'ovsdb:remote-port': unicode(vswitch_dict['ovsdb-port']),
-                        u'ovsdb:remote-ip': unicode(vswitch_dict['ip'])
-                    }
+                    u"node-id": unicode(vswitch_dict["node-id"]),
+                    u"connection-info": {
+                        u"ovsdb:remote-port": unicode(vswitch_dict["ovsdb-port"]),
+                        u"ovsdb:remote-ip": unicode(vswitch_dict["ip"]),
+                    },
                 }
             ]
         }
-        self.send_rest(self.session,
-                       vswitch_dict['post-url'],
-                       connect_ovs_body)
+        self.send_rest(self.session, vswitch_dict["post-url"], connect_ovs_body)
 
-    def add_bridge(self, num_instances, vswitch_name='ovs-1'):
+    def add_bridge(self, num_instances, vswitch_name="ovs-1"):
         """Add num_instances of bridge to ODL config
         Args:
             :param num_instances: Number of bridges to create
@@ -156,42 +158,43 @@ class OvsdbConfigBlaster (object):
         """
 
         for i in range(num_instances):
-            bridge_name = unicode('br-' + str(i) + '-test')
+            bridge_name = unicode("br-" + str(i) + "-test")
             add_bridge_body = {
                 u"network-topology:node": [
                     {
                         u"node-id": u"%s/bridge/%s"
-                                    % (unicode(self.vswitch_dict[vswitch_name]
-                                               .get('node-id')),
-                                       unicode(bridge_name)),
+                        % (
+                            unicode(self.vswitch_dict[vswitch_name].get("node-id")),
+                            unicode(bridge_name),
+                        ),
                         u"ovsdb:bridge-name": unicode(bridge_name),
                         u"ovsdb:datapath-id": u"00:00:b2:bf:48:25:f2:4b",
                         u"ovsdb:protocol-entry": [
-                            {
-                                u"protocol":
-                                    u"ovsdb:ovsdb-bridge-protocol-openflow-13"
-                            }],
+                            {u"protocol": u"ovsdb:ovsdb-bridge-protocol-openflow-13"}
+                        ],
                         u"ovsdb:controller-entry": [
                             {
-                                u"target": u"tcp:%s:%s" % (self.controller_ip, self.controller_port)
-                            }],
+                                u"target": u"tcp:%s:%s"
+                                % (self.controller_ip, self.controller_port)
+                            }
+                        ],
                         u"ovsdb:managed-by": u"/network-topology:network-topology/"
-                                             u"network-topology:topology"
-                                             u"[network-topology:topology-id"
-                                             u"='ovsdb:1']/network-topology:node"
-                                             u"[network-topology:node-id="
-                                             u"'%s']"
-                                             % unicode(self.vswitch_dict[vswitch_name]
-                                                           .get('node-id'))
+                        u"network-topology:topology"
+                        u"[network-topology:topology-id"
+                        u"='ovsdb:1']/network-topology:node"
+                        u"[network-topology:node-id="
+                        u"'%s']"
+                        % unicode(self.vswitch_dict[vswitch_name].get("node-id")),
                     }
                 ]
             }
-            self.send_rest(self.session,
-                           self.vswitch_dict[vswitch_name]
-                           .get('post-url')
-                           + '%2Fbridge%2F'
-                           + bridge_name,
-                           add_bridge_body)
+            self.send_rest(
+                self.session,
+                self.vswitch_dict[vswitch_name].get("post-url")
+                + "%2Fbridge%2F"
+                + bridge_name,
+                add_bridge_body,
+            )
         self.session.close()
 
     def add_port(self, port_type="ovsdb:interface-type-vxlan"):
@@ -200,54 +203,58 @@ class OvsdbConfigBlaster (object):
             :param port_type: The type of port to create
                                 default: 'ovsdb:interface-type-vxlan'
         """
-        bridge_name = 'br-0-test'
-        self.add_bridge(1, 'ovs-1')
-#        self.add_bridge(1, 'ovs-2')
+        bridge_name = "br-0-test"
+        self.add_bridge(1, "ovs-1")
+        #        self.add_bridge(1, 'ovs-2')
 
         for instance in range(self.num_instances):
             for vswitch in self.vswitch_dict.itervalues():
                 if port_type == "ovsdb:interface-type-vxlan":
                     port_prefix = "tp-"
-                    ovsdb_rest_url = vswitch.get('post-url') \
-                        + '%2Fbridge%2F'\
-                        + bridge_name\
-                        + '/termination-point/'
-                    body_name = 'tp-body'
+                    ovsdb_rest_url = (
+                        vswitch.get("post-url")
+                        + "%2Fbridge%2F"
+                        + bridge_name
+                        + "/termination-point/"
+                    )
+                    body_name = "tp-body"
                 else:
                     port_prefix = "port-"
-                    ovsdb_rest_url = vswitch.get('post-url') \
-                        + '%2Fbridge%2F' \
-                        + bridge_name\
-                        + '/port/'
-                    body_name = 'port-body'
-                port_name = port_prefix + str(instance) + '-test-' + vswitch.get('ip')
-                body = {'tp-body': {
-                    u"network-topology:termination-point": [
-                        {
-                            u"ovsdb:options": [
-                                {
-                                    u"ovsdb:option": u"remote_ip",
-                                    u"ovsdb:value": unicode(vswitch.get('remote-ip'))
-                                }
-                            ],
-                            u"ovsdb:name": unicode(port_name),
-                            u"ovsdb:interface-type": unicode(port_type),
-                            u"tp-id": unicode(port_name),
-                            u"vlan-tag": unicode(instance + 1),
-                            u"trunks": [
-                                {
-                                    u"trunk": u"5"
-                                }
-                            ],
-                            u"vlan-mode": u"access"
-                        }
-                    ]
-                },
+                    ovsdb_rest_url = (
+                        vswitch.get("post-url")
+                        + "%2Fbridge%2F"
+                        + bridge_name
+                        + "/port/"
+                    )
+                    body_name = "port-body"
+                port_name = port_prefix + str(instance) + "-test-" + vswitch.get("ip")
+                body = {
+                    "tp-body": {
+                        u"network-topology:termination-point": [
+                            {
+                                u"ovsdb:options": [
+                                    {
+                                        u"ovsdb:option": u"remote_ip",
+                                        u"ovsdb:value": unicode(
+                                            vswitch.get("remote-ip")
+                                        ),
+                                    }
+                                ],
+                                u"ovsdb:name": unicode(port_name),
+                                u"ovsdb:interface-type": unicode(port_type),
+                                u"tp-id": unicode(port_name),
+                                u"vlan-tag": unicode(instance + 1),
+                                u"trunks": [{u"trunk": u"5"}],
+                                u"vlan-mode": u"access",
+                            }
+                        ]
+                    },
                     # TODO add port-body
-                    'port-body': {}}
-                self.send_rest(self.session,
-                               ovsdb_rest_url + port_name,
-                               body.get(body_name))
+                    "port-body": {},
+                }
+                self.send_rest(
+                    self.session, ovsdb_rest_url + port_name, body.get(body_name)
+                )
 
         self.session.close()
 
@@ -259,12 +266,13 @@ class OvsdbConfigBlaster (object):
         """
         for vswitch_names in vswitch_lst_del_br:
             for br_num in range(num_bridges):
-                bridge_name = unicode('br-' + str(br_num) + '-test')
-                self.send_rest_del(self.session,
-                                   self.vswitch_dict[vswitch_names]
-                                   .get('post-url')
-                                   + '%2Fbridge%2F'
-                                   + bridge_name)
+                bridge_name = unicode("br-" + str(br_num) + "-test")
+                self.send_rest_del(
+                    self.session,
+                    self.vswitch_dict[vswitch_names].get("post-url")
+                    + "%2Fbridge%2F"
+                    + bridge_name,
+                )
             self.session.close()
 
     def delete_port(self, num_ports):
@@ -273,16 +281,17 @@ class OvsdbConfigBlaster (object):
             :param num_ports: Number of ports to delete
         """
         for port in range(num_ports):
-            bridge_name = 'br-0-test'
+            bridge_name = "br-0-test"
             for vswitch in self.vswitch_dict.itervalues():
                 port_prefix = "tp-"
-                ovsdb_rest_url = vswitch.get('post-url') \
-                    + '%2Fbridge%2F' \
-                    + bridge_name \
-                    + '/termination-point/'
-                port_name = port_prefix + str(port) + '-test-' + vswitch.get('ip')
-                self.send_rest_del(self.session,
-                                   ovsdb_rest_url + port_name)
+                ovsdb_rest_url = (
+                    vswitch.get("post-url")
+                    + "%2Fbridge%2F"
+                    + bridge_name
+                    + "/termination-point/"
+                )
+                port_name = port_prefix + str(port) + "-test-" + vswitch.get("ip")
+                self.send_rest_del(self.session, ovsdb_rest_url + port_name)
                 self.session.close()
 
     def send_rest_del(self, session, rest_url):
@@ -291,15 +300,12 @@ class OvsdbConfigBlaster (object):
             :param session: The HTTP session handle
             :return int: status_code - HTTP status code
         """
-        ret = session.delete(rest_url,
-                             headers=self.DELETE_HEADERS,
-                             stream=False,
-                             timeout=self.TIMEOUT)
+        ret = session.delete(
+            rest_url, headers=self.DELETE_HEADERS, stream=False, timeout=self.TIMEOUT
+        )
 
         if ret.status_code is not 200:
-            raise ValueError(ret.text,
-                             ret.status_code,
-                             rest_url)
+            raise ValueError(ret.text, ret.status_code, rest_url)
         return ret.status_code
 
     def send_rest(self, session, rest_url, json_body):
@@ -310,75 +316,115 @@ class OvsdbConfigBlaster (object):
         Returns:
             :return int: status_code - HTTP status code
         """
-        ret = session.put(rest_url,
-                          json=json_body,
-                          headers=self.PUT_HEADERS,
-                          stream=False,
-                          timeout=self.TIMEOUT)
+        ret = session.put(
+            rest_url,
+            json=json_body,
+            headers=self.PUT_HEADERS,
+            stream=False,
+            timeout=self.TIMEOUT,
+        )
 
         if ret.status_code is not 200:
-            raise ValueError(ret.text,
-                             ret.status_code,
-                             rest_url,
-                             json_body)
+            raise ValueError(ret.text, ret.status_code, rest_url, json_body)
         return ret.status_code
 
 
 if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description='Add:delete bridge/port/term-points to OpenDaylight')
+    parser = argparse.ArgumentParser(
+        description="Add:delete bridge/port/term-points to OpenDaylight"
+    )
 
-    parser.add_argument('--mode', default='None',
-                        help='Operating mode, can be "bridge", "port" or "term" \
-                            (default is "bridge")')
-    parser.add_argument('--controller', default='127.0.0.1',
-                        help='IP of running ODL controller \
-                             (default is 127.0.0.1)')
-    parser.add_argument('--controllerport', default='8181',
-                        help='Port of ODL RESTCONF \
-                            (default is 8181)')
-    parser.add_argument('--vswitch', default='127.0.0.1',
-                        help='IP of Open vSwitch \
-                            (default is 127.0.0.1)')
-    parser.add_argument('--vswitchport', default='6640',
-                        help='Port of Open vSwitch OVSDB server \
-                            (default is 6640)')
-    parser.add_argument('--vswitchremote', default=None,
-                        help='IP of remote Open vSwitch \
-                            (default is none)')
-    parser.add_argument('--vswitchremoteport', default=None,
-                        help='Port of remote Open vSwitch OVSDB server \
-                            (default is none)')
-    parser.add_argument('--vswitchporttype', default=None,
-                        help='Port of remote Open vSwitch OVSDB server \
-                            (default is none)')
-    parser.add_argument('--deletebridges', nargs='*', type=str, default=None,
-                        help='A list of switches on which to delete bridges, '
-                             'uses instances for number of bridges. \
+    parser.add_argument(
+        "--mode",
+        default="None",
+        help='Operating mode, can be "bridge", "port" or "term" \
+                            (default is "bridge")',
+    )
+    parser.add_argument(
+        "--controller",
+        default="127.0.0.1",
+        help="IP of running ODL controller \
+                             (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--controllerport",
+        default="8181",
+        help="Port of ODL RESTCONF \
+                            (default is 8181)",
+    )
+    parser.add_argument(
+        "--vswitch",
+        default="127.0.0.1",
+        help="IP of Open vSwitch \
+                            (default is 127.0.0.1)",
+    )
+    parser.add_argument(
+        "--vswitchport",
+        default="6640",
+        help="Port of Open vSwitch OVSDB server \
+                            (default is 6640)",
+    )
+    parser.add_argument(
+        "--vswitchremote",
+        default=None,
+        help="IP of remote Open vSwitch \
+                            (default is none)",
+    )
+    parser.add_argument(
+        "--vswitchremoteport",
+        default=None,
+        help="Port of remote Open vSwitch OVSDB server \
+                            (default is none)",
+    )
+    parser.add_argument(
+        "--vswitchporttype",
+        default=None,
+        help="Port of remote Open vSwitch OVSDB server \
+                            (default is none)",
+    )
+    parser.add_argument(
+        "--deletebridges",
+        nargs="*",
+        type=str,
+        default=None,
+        help="A list of switches on which to delete bridges, "
+        'uses instances for number of bridges. \
                               Example: "ovs-1 ovs2" \
-                            (default is none)')
-    parser.add_argument('--deleteports', type=int, default=1,
-                        help='delete ports of remote open vswitch ovsdb server (default 1)')
-    parser.add_argument('--instances', type=int, default=1,
-                        help='Number of instances to add/get (default 1)')
+                            (default is none)',
+    )
+    parser.add_argument(
+        "--deleteports",
+        type=int,
+        default=1,
+        help="delete ports of remote open vswitch ovsdb server (default 1)",
+    )
+    parser.add_argument(
+        "--instances",
+        type=int,
+        default=1,
+        help="Number of instances to add/get (default 1)",
+    )
 
     args = parser.parse_args()
 
-    ovsdb_config_blaster = OvsdbConfigBlaster(args.controller,
-                                              args.controllerport,
-                                              args.vswitch,
-                                              args.vswitchport,
-                                              args.vswitchremote,
-                                              args.vswitchremoteport,
-                                              args.vswitchporttype,
-                                              args.deletebridges,
-                                              args.deleteports,
-                                              args.instances)
+    ovsdb_config_blaster = OvsdbConfigBlaster(
+        args.controller,
+        args.controllerport,
+        args.vswitch,
+        args.vswitchport,
+        args.vswitchremote,
+        args.vswitchremoteport,
+        args.vswitchporttype,
+        args.deletebridges,
+        args.deleteports,
+        args.instances,
+    )
     if args.mode == "bridge":
         if args.deletebridges is not None:
-            ovsdb_config_blaster.delete_bridge(ovsdb_config_blaster.
-                                               vswitch_lst_del_br,
-                                               ovsdb_config_blaster.
-                                               num_instances)
+            ovsdb_config_blaster.delete_bridge(
+                ovsdb_config_blaster.vswitch_lst_del_br,
+                ovsdb_config_blaster.num_instances,
+            )
         else:
             ovsdb_config_blaster.add_bridge(ovsdb_config_blaster.num_instances)
     elif args.mode == "term":
@@ -387,5 +433,7 @@ if __name__ == "__main__":
         else:
             ovsdb_config_blaster.add_port()
     else:
-        print("please use: python ovsdbconfigblaster.py --help "
-              "\nUnsupported mode: ", args.mode)
+        print(
+            "please use: python ovsdbconfigblaster.py --help " "\nUnsupported mode: ",
+            args.mode,
+        )
index 3b2d32254a773da0ef46f564ba85dc0d1e48c54c..aafed26b56982064aeab1834b9a6bba2ccd9c6a0 100644 (file)
@@ -36,6 +36,7 @@ import collections  # For deque and Counter.
 import ipaddr
 import threading
 import time
+
 try:
     from collections import Counter
 except ImportError:  # Python 2.6 does not have Counter in collections.
@@ -56,71 +57,98 @@ def str2bool(text):
 
 # Note: JSON data contains '"', so using "'" to quote Pythons strings.
 parser = argparse.ArgumentParser()
-parser.add_argument('--pccs', default='1', type=int,
-                    help='number of PCCs to simulate')
-parser.add_argument('--lsps', default='1', type=int,
-                    help='number of LSPs pre PCC to update')
-parser.add_argument('--workers', default='1', type=int,
-                    help='number of blocking https threads to use')
-parser.add_argument('--hop', default='2.2.2.2/32',
-                    help='ipv4 prefix (including /32) of hop to use')
-parser.add_argument('--timeout', default='300', type=float,
-                    help='seconds to bail out after')  # FIXME: grammar
-parser.add_argument('--refresh', default='0.1', type=float,
-                    help='seconds to sleep in main thread if nothing to do')
-parser.add_argument('--pccaddress', default='127.0.0.1',
-                    help='IP address of the first simulated PCC')
-parser.add_argument('--odladdress', default='127.0.0.1',
-                    help='IP address of ODL acting as PCE')
-parser.add_argument('--user', default='admin',
-                    help='Username for restconf authentication')
-parser.add_argument('--password', default='admin',
-                    help='Password for restconf authentication')
-parser.add_argument('--scope', default='sdn',
-                    help='Scope for restconf authentication')
-parser.add_argument('--reuse', default='True', type=str2bool,
-                    help='Should single requests session be re-used')
-parser.add_argument('--delegate', default='true',
-                    help='should delegate the lsp or set "false" if lsp not be delegated')
-parser.add_argument('--pccip', default=None,
-                    help='IP address of the simulated PCC')
-parser.add_argument('--tunnelnumber', default=None,
-                    help='Tunnel Number for the simulated PCC')
+parser.add_argument("--pccs", default="1", type=int, help="number of PCCs to simulate")
+parser.add_argument(
+    "--lsps", default="1", type=int, help="number of LSPs pre PCC to update"
+)
+parser.add_argument(
+    "--workers", default="1", type=int, help="number of blocking https threads to use"
+)
+parser.add_argument(
+    "--hop", default="2.2.2.2/32", help="ipv4 prefix (including /32) of hop to use"
+)
+parser.add_argument(
+    "--timeout", default="300", type=float, help="seconds to bail out after"
+)  # FIXME: grammar
+parser.add_argument(
+    "--refresh",
+    default="0.1",
+    type=float,
+    help="seconds to sleep in main thread if nothing to do",
+)
+parser.add_argument(
+    "--pccaddress", default="127.0.0.1", help="IP address of the first simulated PCC"
+)
+parser.add_argument(
+    "--odladdress", default="127.0.0.1", help="IP address of ODL acting as PCE"
+)
+parser.add_argument(
+    "--user", default="admin", help="Username for restconf authentication"
+)
+parser.add_argument(
+    "--password", default="admin", help="Password for restconf authentication"
+)
+parser.add_argument("--scope", default="sdn", help="Scope for restconf authentication")
+parser.add_argument(
+    "--reuse",
+    default="True",
+    type=str2bool,
+    help="Should single requests session be re-used",
+)
+parser.add_argument(
+    "--delegate",
+    default="true",
+    help='should delegate the lsp or set "false" if lsp not be delegated',
+)
+parser.add_argument("--pccip", default=None, help="IP address of the simulated PCC")
+parser.add_argument(
+    "--tunnelnumber", default=None, help="Tunnel Number for the simulated PCC"
+)
 args = parser.parse_args()  # arguments are read
 
-expected = '''{"output":{}}'''
+expected = """{"output":{}}"""
 
 payload_list_data = [
-    '{',
+    "{",
     '   "input":{',
-    '       "node":"pcc://', '', '",',
-    '       "name":"pcc_', '', '_tunnel_', '', '",',
+    '       "node":"pcc://',
+    "",
+    '",',
+    '       "name":"pcc_',
+    "",
+    "_tunnel_",
+    "",
+    '",',
     '       "network-topology-ref":"/network-topology:network-topology/network-topology:topology',
-    '[network-topology:topology-id=\\\"pcep-topology\\\"]",',
+    '[network-topology:topology-id=\\"pcep-topology\\"]",',
     '       "arguments":{',
     '           "lsp":{',
-    '           "delegate":', '',
+    '           "delegate":',
+    "",
     '           ,"administrative":true',
-    '},',
+    "},",
     '"ero":{',
     '   "subobject":[',
-    '       {',
+    "       {",
     '           "loose":false,',
     '           "ip-prefix":{',
-    '                "ip-prefix":', '"', '', '"',
-    '           }',
-    '       },',
-    '       {',
+    '                "ip-prefix":',
+    '"',
+    "",
+    '"',
+    "           }",
+    "       },",
+    "       {",
     '           "loose":false,',
     '           "ip-prefix":{',
     '                "ip-prefix":"1.1.1.1/32"',
-    '            }',
-    '        }',
-    '        ]',
-    '       }',
-    '   }',
-    ' }',
-    '}'
+    "            }",
+    "        }",
+    "        ]",
+    "       }",
+    "   }",
+    " }",
+    "}",
 ]
 
 
@@ -140,7 +168,7 @@ def iterable_msg(pccs, lsps, workers, hop, delegate):
     """Generator yielding tuple of worker number and kwargs to post."""
     first_pcc_int = int(ipaddr.IPv4Address(args.pccaddress))
     # Headers are constant, but it is easier to add them to kwargs in this generator.
-    headers = {'Content-Type': 'application/json'}
+    headers = {"Content-Type": "application/json"}
     # TODO: Perhaps external text file with Template? May affect performance.
     list_data = payload_list_data
     for lsp in range(1, lsps + 1):
@@ -152,7 +180,7 @@ def iterable_msg(pccs, lsps, workers, hop, delegate):
             list_data[6] = pcc_ip
             list_data[15] = delegate
             list_data[25] = hop
-            whole_data = ''.join(list_data)
+            whole_data = "".join(list_data)
             worker = (lsp * pccs + pcc) % workers
             post_kwargs = {"data": whole_data, "headers": headers}
             yield worker, post_kwargs
@@ -162,7 +190,7 @@ def generate_payload_for_single_pcc(hop, delegate, pccip, tunnel_no):
     """Generator yielding single kwargs to post."""
     first_pcc_int = int(ipaddr.IPv4Address(args.pccaddress))
     # Headers are constant, but it is easier to add them to kwargs in this generator.
-    headers = {'Content-Type': 'application/json'}
+    headers = {"Content-Type": "application/json"}
     # TODO: Perhaps external text file with Template? May affect performance.
     list_data = payload_list_data
     if tunnel_no == "None":
@@ -178,7 +206,7 @@ def generate_payload_for_single_pcc(hop, delegate, pccip, tunnel_no):
     list_data[6] = pcc_ip
     list_data[15] = delegate
     list_data[25] = hop
-    whole_data = ''.join(list_data)
+    whole_data = "".join(list_data)
     worker = 0
     post_kwargs = {"data": whole_data, "headers": headers}
     print(post_kwargs)
@@ -187,7 +215,7 @@ def generate_payload_for_single_pcc(hop, delegate, pccip, tunnel_no):
 
 def queued_send(session, queue_messages, queue_responses):
     """Pop from queue, Post and append result; repeat until empty."""
-    uri = 'operations/network-topology-pcep:update-lsp'
+    uri = "operations/network-topology-pcep:update-lsp"
     while 1:
         try:
             post_kwargs = queue_messages.popleft()
@@ -205,28 +233,34 @@ def queued_send(session, queue_messages, queue_responses):
 
 def classify(resp_tuple):
     """Return 'pass' or a reason what is wrong with response."""
-    prepend = ''
+    prepend = ""
     status = resp_tuple[0]
     if (status != 200) and (status != 204):  # is it int?
-        prepend = 'status: ' + str(status) + ' '
+        prepend = "status: " + str(status) + " "
     content = resp_tuple[1]
-    if prepend or (content != expected and content != ''):
-        return prepend + 'content: ' + str(content)
-    return 'pass'
+    if prepend or (content != expected and content != ""):
+        return prepend + "content: " + str(content)
+    return "pass"
 
 
 # Main.
 list_q_msg = [collections.deque() for _ in range(args.workers)]
 if args.pccip == "None":
-    for worker, post_kwargs in iterable_msg(args.pccs, args.lsps, args.workers, args.hop, args.delegate):
+    for worker, post_kwargs in iterable_msg(
+        args.pccs, args.lsps, args.workers, args.hop, args.delegate
+    ):
         list_q_msg[worker].append(post_kwargs)
 else:
-    for worker, post_kwargs in generate_payload_for_single_pcc(args.hop, args.delegate, args.pccip, args.tunnelnumber):
+    for worker, post_kwargs in generate_payload_for_single_pcc(
+        args.hop, args.delegate, args.pccip, args.tunnelnumber
+    ):
         list_q_msg[worker].append(post_kwargs)
 queue_responses = collections.deque()  # thread safe
 threads = []
 for worker in range(args.workers):
-    session = AuthStandalone.Init_Session(args.odladdress, args.user, args.password, args.scope, args.reuse)
+    session = AuthStandalone.Init_Session(
+        args.odladdress, args.user, args.password, args.scope, args.reuse
+    )
     queue_messages = list_q_msg[worker]
     thread_args = (session, queue_messages, queue_responses)
     thread = threading.Thread(target=queued_send, args=thread_args)
@@ -234,7 +268,7 @@ for worker in range(args.workers):
     threads.append(thread)
 tasks = sum(map(len, list_q_msg))  # fancy way of counting, should equal to pccs*lsps.
 counter = CounterDown(tasks)
-print('work is going to start with %s tasks' % tasks)
+print("work is going to start with %s tasks" % tasks)
 time_start = time.time()
 for thread in threads:
     thread.start()
@@ -263,9 +297,9 @@ while 1:
             continue
         left = len(queue_responses)
         if left:
-            print('error: more responses left inqueue', left)
+            print("error: more responses left inqueue", left)
     else:
-        print('Time is up!')
+        print("Time is up!")
         left = len(queue_responses)  # can be still increasing
         for _ in range(left):
             resp_tuple = queue_responses.popleft()  # thread safe
@@ -274,7 +308,7 @@ while 1:
     break  # may leave late items in queue_reponses
 time_stop = time.time()
 timedelta_duration = time_stop - time_start
-print('took', timedelta_duration)
+print("took", timedelta_duration)
 print(repr(counter.counter))
 # for message in debug_list:
 #     print message
index 30586783753c4b467cd363a0303f98d3ada6e8dd..92bda9305a7d053aa71101473d47f0cd9609383d 100755 (executable)
@@ -67,18 +67,22 @@ class Stats(object):
                     self.min_flows_col.append(float(row[self.min_flow_index]))
                     self.max_flows_col.append(float(row[self.max_flow_index]))
                     self.avg_flows_col.append(float(row[self.avg_flow_index]))
-                    self.runtime_col.append(float(row[self.end_time_index]) -
-                                            float(row[self.start_time_index]))
+                    self.runtime_col.append(
+                        float(row[self.end_time_index])
+                        - float(row[self.start_time_index])
+                    )
                     self.used_ram_col.append(float(row[self.used_ram_index]))
-                    self.iowait_col.append(float(row[self.end_iowait_index]) -
-                                           float(row[self.start_iowait_index]))
+                    self.iowait_col.append(
+                        float(row[self.end_iowait_index])
+                        - float(row[self.start_iowait_index])
+                    )
                     self.steal_time_col.append(
-                        float(row[self.end_steal_time_index]) -
-                        float(row[self.start_steal_time_index]))
+                        float(row[self.end_steal_time_index])
+                        - float(row[self.start_steal_time_index])
+                    )
                     self.one_load_col.append(float(row[self.one_load_index]))
                     self.five_load_col.append(float(row[self.five_load_index]))
-                    self.fifteen_load_col.append(
-                        float(row[self.fifteen_load_index]))
+                    self.fifteen_load_col.append(float(row[self.fifteen_load_index]))
                 except ValueError:
                     # Skips header
                     continue
@@ -96,8 +100,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "Average Flows per Second", self.avg_flows_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "Average Flows per Second", self.avg_flows_col
+        )
 
     def compute_min_flow_stats(self):
         """Compute CBench min flows/second stats."""
@@ -112,8 +117,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "Minimum Flows per Second", self.min_flows_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "Minimum Flows per Second", self.min_flows_col
+        )
 
     def compute_max_flow_stats(self):
         """Compute CBench max flows/second stats."""
@@ -128,8 +134,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "Maximum Flows per Second", self.max_flows_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "Maximum Flows per Second", self.max_flows_col
+        )
 
     def compute_ram_stats(self):
         """Compute used RAM stats."""
@@ -144,8 +151,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "Used RAM (MB)", self.used_ram_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "Used RAM (MB)", self.used_ram_col
+        )
 
     def compute_runtime_stats(self):
         """Compute CBench runtime length stats."""
@@ -160,8 +168,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "CBench Runtime (sec)", self.runtime_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "CBench Runtime (sec)", self.runtime_col
+        )
 
     def compute_iowait_stats(self):
         """Compute iowait stats."""
@@ -176,8 +185,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "IOWait Time (sec)", self.iowait_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "IOWait Time (sec)", self.iowait_col
+        )
 
     def compute_steal_time_stats(self):
         """Compute steal time stats."""
@@ -192,8 +202,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "Steal Time (sec)", self.steal_time_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "Steal Time (sec)", self.steal_time_col
+        )
 
     def compute_one_load_stats(self):
         """Compute one minute load stats."""
@@ -208,8 +219,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "One Minute Load", self.one_load_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "One Minute Load", self.one_load_col
+        )
 
     def compute_five_load_stats(self):
         """Compute five minute load stats."""
@@ -224,8 +236,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "Five Minute Load", self.five_load_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "Five Minute Load", self.five_load_col
+        )
 
     def compute_fifteen_load_stats(self):
         """Compute fifteen minute load stats."""
@@ -240,8 +253,9 @@ class Stats(object):
         :type graph_num: int
 
         """
-        self.build_generic_graph(total_gcount, graph_num,
-                                 "Fifteen Minute Load", self.fifteen_load_col)
+        self.build_generic_graph(
+            total_gcount, graph_num, "Fifteen Minute Load", self.fifteen_load_col
+        )
 
     def compute_generic_stats(self, stats_name, stats_col):
         """Helper for computing generic stats."""
@@ -251,11 +265,11 @@ class Stats(object):
         generic_stats["mean"] = round(numpy.mean(stats_col), self.precision)
         generic_stats["stddev"] = round(numpy.std(stats_col), self.precision)
         try:
-            generic_stats["relstddev"] = round(generic_stats["stddev"] /
-                                               generic_stats["mean"] *
-                                               100, self.precision)
+            generic_stats["relstddev"] = round(
+                generic_stats["stddev"] / generic_stats["mean"] * 100, self.precision
+            )
         except ZeroDivisionError:
-            generic_stats["relstddev"] = 0.
+            generic_stats["relstddev"] = 0.0
         self.results[stats_name] = generic_stats
 
     def build_generic_graph(self, total_gcount, graph_num, y_label, data_col):
@@ -283,37 +297,45 @@ class Stats(object):
 stats = Stats()
 
 # Map of graph names to the Stats.fns that build them
-graph_map = {"min_flows": stats.build_min_flow_graph,
-             "max_flows": stats.build_max_flow_graph,
-             "flows": stats.build_avg_flow_graph,
-             "runtime": stats.build_runtime_graph,
-             "iowait": stats.build_iowait_graph,
-             "steal_time": stats.build_steal_time_graph,
-             "one_load": stats.build_one_load_graph,
-             "five_load": stats.build_five_load_graph,
-             "fifteen_load": stats.build_fifteen_load_graph,
-             "ram": stats.build_ram_graph}
-stats_map = {"min_flows": stats.compute_min_flow_stats,
-             "max_flows": stats.compute_max_flow_stats,
-             "flows": stats.compute_avg_flow_stats,
-             "runtime": stats.compute_runtime_stats,
-             "iowait": stats.compute_iowait_stats,
-             "steal_time": stats.compute_steal_time_stats,
-             "one_load": stats.compute_one_load_stats,
-             "five_load": stats.compute_five_load_stats,
-             "fifteen_load": stats.compute_fifteen_load_stats,
-             "ram": stats.compute_ram_stats}
+graph_map = {
+    "min_flows": stats.build_min_flow_graph,
+    "max_flows": stats.build_max_flow_graph,
+    "flows": stats.build_avg_flow_graph,
+    "runtime": stats.build_runtime_graph,
+    "iowait": stats.build_iowait_graph,
+    "steal_time": stats.build_steal_time_graph,
+    "one_load": stats.build_one_load_graph,
+    "five_load": stats.build_five_load_graph,
+    "fifteen_load": stats.build_fifteen_load_graph,
+    "ram": stats.build_ram_graph,
+}
+stats_map = {
+    "min_flows": stats.compute_min_flow_stats,
+    "max_flows": stats.compute_max_flow_stats,
+    "flows": stats.compute_avg_flow_stats,
+    "runtime": stats.compute_runtime_stats,
+    "iowait": stats.compute_iowait_stats,
+    "steal_time": stats.compute_steal_time_stats,
+    "one_load": stats.compute_one_load_stats,
+    "five_load": stats.compute_five_load_stats,
+    "fifteen_load": stats.compute_fifteen_load_stats,
+    "ram": stats.compute_ram_stats,
+}
 
 # Build argument parser
 parser = argparse.ArgumentParser(description="Compute stats about CBench data")
-parser.add_argument("-S", "--all-stats", action="store_true",
-                    help="compute all stats")
-parser.add_argument("-s", "--stats", choices=stats_map.keys(),
-                    help="compute stats on specified data", nargs="+")
-parser.add_argument("-G", "--all-graphs", action="store_true",
-                    help="graph all data")
-parser.add_argument("-g", "--graphs", choices=graph_map.keys(),
-                    help="graph specified data", nargs="+")
+parser.add_argument("-S", "--all-stats", action="store_true", help="compute all stats")
+parser.add_argument(
+    "-s",
+    "--stats",
+    choices=stats_map.keys(),
+    help="compute stats on specified data",
+    nargs="+",
+)
+parser.add_argument("-G", "--all-graphs", action="store_true", help="graph all data")
+parser.add_argument(
+    "-g", "--graphs", choices=graph_map.keys(), help="graph specified data", nargs="+"
+)
 
 
 # Print help if no arguments are given
@@ -348,13 +370,13 @@ for stat in stats_to_compute:
 if args.graphs or args.all_graphs:
     # Attempt to adjust plot spacing, just a simple heuristic
     if len(graphs_to_build) <= 3:
-        pyplot.subplots_adjust(hspace=.2)
+        pyplot.subplots_adjust(hspace=0.2)
     elif len(graphs_to_build) <= 6:
-        pyplot.subplots_adjust(hspace=.4)
+        pyplot.subplots_adjust(hspace=0.4)
     elif len(graphs_to_build) <= 9:
-        pyplot.subplots_adjust(hspace=.7)
+        pyplot.subplots_adjust(hspace=0.7)
     else:
-        pyplot.subplots_adjust(hspace=.7)
+        pyplot.subplots_adjust(hspace=0.7)
         print("WARNING: That's a lot of graphs. Add a second column?")
     pyplot.show()
 
index d65b13b3d68b3eaadbfe68ed0ed533d4ab83329b..389a88aac6827743ece98401693313fe90441177 100644 (file)
@@ -27,11 +27,23 @@ def parse_arguments():
     """
     parser = argparse.ArgumentParser()
     parser.add_argument("--uri", default="ws://127.0.0.1:8185/", help="URI to connect")
-    parser.add_argument("--count", type=int, default=1, help="Number of messages to receive")
-    parser.add_argument("--credentials", default="admin:admin", help="Basic authorization username:password")
+    parser.add_argument(
+        "--count", type=int, default=1, help="Number of messages to receive"
+    )
+    parser.add_argument(
+        "--credentials",
+        default="admin:admin",
+        help="Basic authorization username:password",
+    )
     parser.add_argument("--logfile", default="wsreceiver.log", help="Log file name")
-    parser.add_argument("--debug", dest="loglevel", action="store_const",
-                        const=logging.DEBUG, default=logging.INFO, help="Log level")
+    parser.add_argument(
+        "--debug",
+        dest="loglevel",
+        action="store_const",
+        const=logging.DEBUG,
+        default=logging.INFO,
+        help="Log level",
+    )
     args = parser.parse_args()
     return args
 
@@ -50,10 +62,8 @@ class WSReceiver(object):
         """
         self.uri = uri
         self.credentials = credentials
-        auth_string = base64.b64encode(credentials.encode('ascii'))
-        self.headers = {
-            'Authorization': 'Basic ' + auth_string.decode('ascii')
-        }
+        auth_string = base64.b64encode(credentials.encode("ascii"))
+        self.headers = {"Authorization": "Basic " + auth_string.decode("ascii")}
 
         logger.info("Connecting to: %s", self.uri)
         self.ws = create_connection(self.uri, header=self.headers)