Performance scripts modifications. Added 2 scripts, modified 2 scripts.
authorJan Medved <jmedved@cisco.com>
Sun, 5 Oct 2014 17:00:27 +0000 (10:00 -0700)
committerJan Medved <jmedved@cisco.com>
Sun, 5 Oct 2014 17:08:25 +0000 (10:08 -0700)
Change-Id: I32842be18c1334542b1b48a87a02db2309f1a67e
Signed-off-by: jmedved@cisco.com <jmedved@cisco.com>
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py [new file with mode: 0755]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py [new file with mode: 0755]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py

index f48ebc035ec064beabcce9e4a7e95ba080c955d1..0002b23f27a1da92b0b7cdd1ba5dae8f095dc372 100644 (file)
@@ -1,32 +1,45 @@
+#!/usr/bin/python
 __author__ = "Jan Medved"
 __copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
 __license__ = "New-style BSD"
 __email__ = "jmedved@cisco.com"
 
-
-import json
 import argparse
 import requests
 
-CONFIGURL = 'restconf/config/opendaylight-inventory:nodes'
-getheaders = {'Accept': 'application/json'}
-
 
-if __name__ == "__main__":
+def cleanup_config(host, port, auth):
+    CONFIGURL = 'restconf/config/opendaylight-inventory:nodes'
+    getheaders = {'Accept': 'application/json'}
 
-    parser = argparse.ArgumentParser(description='Cleans up the config space')
-    parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
-                        'odl controller is running (default is 127.0.0.1)')
-    parser.add_argument('--odlport', default='8080', help='port on '
-                        'which odl\'s RESTCONF is listening (default is 8080)')
+    url = 'http://' + host + ":" + port + '/' + CONFIGURL
+    s = requests.Session()
 
-    in_args = parser.parse_args()
+    if not auth:
+        r = s.delete(url, headers=getheaders)
+    else:
+        r = s.delete(url, headers=getheaders, auth=('admin', 'admin'))
 
-    url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + CONFIGURL
-    s = requests.Session()
-    r = s.delete(url, headers=getheaders)
+    s.close()
 
     if r.status_code != 200:
         print 'Failed to delete nodes in the config space, code %d' % r.status_code
     else:
         print 'Nodes in config space deleted.'
+
+
+if __name__ == "__main__":
+
+    parser = argparse.ArgumentParser(description='Cleans up the config space')
+    parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
+                        'odl controller is running (default is 127.0.0.1)')
+    parser.add_argument('--odlport', default='8181', help='port on '
+                        'which odl\'s RESTCONF is listening (default is 8181)')
+    parser.add_argument('--no-auth', dest='auth', action='store_false', default=False,
+                        help="Do not use authenticated access to REST (default)")
+    parser.add_argument('--auth', dest='auth', action='store_true',
+                        help="Use authenticated access to REST "
+                        "(username: 'admin', password: 'admin').")
+
+    in_args = parser.parse_args()
+    cleanup_config(in_args.odlhost, in_args.odlport, in_args.auth)
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_add_delete_test.py
new file mode 100755 (executable)
index 0000000..1bb13d6
--- /dev/null
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+
+__author__ = "Jan Medved"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__email__ = "jmedved@cisco.com"
+
+import argparse
+import time
+from flow_config_blaster import FlowConfigBlaster
+from inventory_crawler import InventoryCrawler
+from config_cleanup import cleanup_config
+
+
+
+if __name__ == "__main__":
+
+    JSON_FLOW_MOD1 = '''{
+        "flow-node-inventory:flow": [
+            {
+                "flow-node-inventory:cookie": %d,
+                "flow-node-inventory:cookie_mask": 65535,
+                "flow-node-inventory:flow-name": "%s",
+                "flow-node-inventory:hard-timeout": %d,
+                "flow-node-inventory:id": "%s",
+                "flow-node-inventory:idle-timeout": %d,
+                "flow-node-inventory:installHw": false,
+                "flow-node-inventory:instructions": {
+                    "flow-node-inventory:instruction": [
+                        {
+                            "flow-node-inventory:apply-actions": {
+                                "flow-node-inventory:action": [
+                                    {
+                                        "flow-node-inventory:drop-action": {},
+                                        "flow-node-inventory:order": 0
+                                    }
+                                ]
+                            },
+                            "flow-node-inventory:order": 0
+                        }
+                    ]
+                },
+                "flow-node-inventory:match": {
+                    "flow-node-inventory:ipv4-destination": "%s/32",
+                    "flow-node-inventory:ethernet-match": {
+                        "flow-node-inventory:ethernet-type": {
+                            "flow-node-inventory:type": 2048
+                        }
+                    }
+                },
+                "flow-node-inventory:priority": 2,
+                "flow-node-inventory:strict": false,
+                "flow-node-inventory:table_id": 0
+            }
+        ]
+    }'''
+
+
+    parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
+                                                 'into the config tree, as specified by optional parameters.')
+
+    parser.add_argument('--host', default='127.0.0.1',
+                        help='Host where odl controller is running (default is 127.0.0.1)')
+    parser.add_argument('--port', default='8181',
+                        help='Port on which odl\'s RESTCONF is listening (default is 8181)')
+    parser.add_argument('--flows', type=int, default=10,
+                        help='Number of flow add/delete requests to send in each cycle; default 10')
+    parser.add_argument('--cycles', type=int, default=1,
+                        help='Number of flow add/delete cycles to send in each thread; default 1')
+    parser.add_argument('--threads', type=int, default=1,
+                        help='Number of request worker threads, default=1. '
+                             'Each thread will add/delete nflows.')
+    parser.add_argument('--nodes', type=int, default=16,
+                        help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
+                             'flows will be evenly distributed (programmed) into connected nodes.')
+    parser.add_argument('--delay', type=int, default=2,
+                        help='Time to wait between the add and delete cycles; default=0')
+    parser.add_argument('--timeout', type=int, default=100,
+                        help='The maximum time to wait between the add and delete cycles; default=100')
+    parser.add_argument('--delete', dest='delete', action='store_true', default=True,
+                        help='Delete all added flows one by one, benchmark delete '
+                             'performance.')
+    parser.add_argument('--bulk-delete', dest='bulk_delete', action='store_true', default=False,
+                        help='Delete all flows in bulk; default=False')
+    parser.add_argument('--auth', dest='auth', action='store_true',
+                        help="Use authenticated access to REST (username: 'admin', password: 'admin'); default=False")
+    parser.add_argument('--startflow', type=int, default=0,
+                        help='The starting Flow ID; default=0')
+
+    in_args = parser.parse_args()
+
+    # Initialize
+    ic = InventoryCrawler(in_args.host, in_args.port, 0, 'operational', in_args.auth, False)
+
+    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
+                            in_args.flows, in_args.startflow, in_args.auth, JSON_FLOW_MOD1)
+
+    # Get baseline stats
+    ic.crawl_inventory()
+    reported = ic.reported_flows
+    found = ic.found_flows
+
+    print 'Baseline:'
+    print '   Reported nodes: %d' % reported
+    print '   Found nodes:    %d' % found
+
+    # Run through <cycles>, where <threads> are started in each cycle and <flows> are added from each thread
+    fct.add_blaster()
+
+    print '\n*** Total flows added: %s' % fct.get_total_flows()
+    print '    HTTP[OK] results:  %d\n' % fct.get_ok_flows()
+
+    # Wait for stats to catch up
+    total_delay = 0
+    exp_found = found + fct.get_ok_flows()
+    exp_reported = reported + fct.get_ok_flows()
+
+    print 'Waiting for stats to catch up:'
+    while True:
+        ic.crawl_inventory()
+        print '   %d, %d' %(ic.reported_flows, ic.found_flows)
+        if ic.found_flows == exp_found or total_delay > in_args.timeout:
+            break
+        total_delay += in_args.delay
+        time.sleep(in_args.delay)
+
+    if total_delay < in_args.timeout:
+        print 'Stats collected in %d seconds.' % total_delay
+    else:
+        print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay
+
+    # Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
+    # deleted in each thread
+    if in_args.bulk_delete:
+        print '\nDeleting all flows in bulk:\n   ',
+        cleanup_config(in_args.host, in_args.port, in_args.auth)
+    else:
+       print '\nDeleting flows one by one\n   ',
+       fct.delete_blaster()
+
+    # Wait for stats to catch up
+    total_delay = 0
+
+    print '\nWaiting for stats to catch up:'
+    while True:
+        ic.crawl_inventory()
+        if ic.found_flows == found or total_delay > in_args.timeout:
+            break
+        total_delay += in_args.delay
+        print '   %d, %d' %(ic.reported_flows, ic.found_flows)
+        time.sleep(in_args.delay)
+
+    if total_delay < in_args.timeout:
+        print 'Stats collected in %d seconds.' % total_delay
+    else:
+        print 'Stats collection did not finish in %d seconds. Aborting...' % total_delay
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_blaster.py
new file mode 100755 (executable)
index 0000000..54d2c6f
--- /dev/null
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+__author__ = "Jan Medved"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__email__ = "jmedved@cisco.com"
+
+from random import randrange
+import json
+import argparse
+import requests
+import time
+import threading
+import re
+import netaddr
+
+
+class Counter(object):
+    def __init__(self, start=0):
+        self.lock = threading.Lock()
+        self.value = start
+
+    def increment(self, value=1):
+        self.lock.acquire()
+        val = self.value
+        try:
+            self.value += value
+        finally:
+            self.lock.release()
+        return val
+
+
+class Timer(object):
+    def __init__(self, verbose=False):
+        self.verbose = verbose
+
+    def __enter__(self):
+        self.start = time.time()
+        return self
+
+    def __exit__(self, *args):
+        self.end = time.time()
+        self.secs = self.end - self.start
+        self.msecs = self.secs * 1000  # millisecs
+        if self.verbose:
+            print ("elapsed time: %f ms" % self.msecs)
+
+
+class FlowConfigBlaster(object):
+    putheaders = {'content-type': 'application/json'}
+    getheaders = {'Accept': 'application/json'}
+
+    FLWURL = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+    INVURL = 'restconf/operational/opendaylight-inventory:nodes'
+
+    ok_total = 0
+
+    flows = {}
+
+    def __init__(self, host, port, ncycles, nthreads, nnodes, nflows, startflow, auth, json_template):
+        self.host = host
+        self.port = port
+        self.ncycles = ncycles
+        self.nthreads = nthreads
+        self.nnodes = nnodes
+        self.nflows = nflows
+        self.startflow = startflow
+        self.auth = auth
+        self.json_template = json_template
+
+        self.ok_rate = Counter(0.0)
+        self.total_rate = Counter(0.0)
+
+        self.ip_addr = Counter(int(netaddr.IPAddress('10.0.0.1')) + startflow)
+
+        self.print_lock = threading.Lock()
+        self.cond = threading.Condition()
+        self.threads_done = 0
+
+        for i in range(self.nthreads):
+            self.flows[i] = {}
+
+
+    def get_num_nodes(self, session):
+        """
+        Determines the number of OF nodes in the connected mininet network. If mininet is not connected, the default
+        number of flows is 16
+        """
+        inventory_url = 'http://' + self.host + ":" + self.port + '/' + self.INVURL
+        nodes = self.nnodes
+
+        if not self.auth:
+            r = session.get(inventory_url, headers=self.getheaders, stream=False)
+        else:
+            r = session.get(inventory_url, headers=self.getheaders, stream=False, auth=('admin', 'admin'))
+
+        if r.status_code == 200:
+            try:
+                inv = json.loads(r.content)['nodes']['node']
+                nn = 0
+                for n in range(len(inv)):
+                    if re.search('openflow', inv[n]['id']) is not None:
+                        nn += 1
+                if nn != 0:
+                    nodes = nn
+            except KeyError:
+                pass
+
+        return nodes
+
+
+    def add_flow(self, session, url_template, tid, node, flow_id, ipaddr):
+        """
+        Adds a single flow to the config data store via REST
+        """
+        flow_data = self.json_template % (tid + flow_id, 'TestFlow-%d' % flow_id, 65000,
+                                      str(flow_id), 65000, str(netaddr.IPAddress(ipaddr)))
+        # print flow_data
+        flow_url = url_template % (node, flow_id)
+        # print flow_url
+
+        if not self.auth:
+            r = session.put(flow_url, data=flow_data, headers=self.putheaders, stream=False)
+        else:
+            r = session.put(flow_url, data=flow_data, headers=self.putheaders, stream=False, auth=('admin', 'admin'))
+
+        return r.status_code
+
+
+    def add_flows(self, start_flow, tid):
+        """
+        Adds flows into the ODL config space. This function is executed by a worker thread
+        """
+
+        put_url = 'http://' + self.host + ":" + self.port + '/' + self.FLWURL
+
+        add_res = {200: 0}
+
+        s = requests.Session()
+
+        n_nodes = self.get_num_nodes(s)
+
+        with self.print_lock:
+            print '    Thread %d:\n        Adding %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+
+        with Timer() as t:
+            for flow in range(self.nflows):
+                node_id = randrange(1, n_nodes + 1)
+                flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
+                self.flows[tid][flow_id] = node_id
+                sts = self.add_flow(s, put_url, tid, node_id, flow_id, self.ip_addr.increment())
+                try:
+                    add_res[sts] += 1
+                except KeyError:
+                    add_res[sts] = 1
+
+        add_time = t.secs
+        add_ok_rate = add_res[200] / add_time
+        add_total_rate = sum(add_res.values()) / add_time
+
+        self.ok_rate.increment(add_ok_rate)
+        self.total_rate.increment(add_total_rate)
+
+        with self.print_lock:
+            print '    Thread %d: ' % tid
+            print '        Add time: %.2f,' % add_time
+            print '        Add success rate:  %.2f, Add total rate: %.2f' % (add_ok_rate, add_total_rate)
+            print '        Add Results: ',
+            print add_res
+            self.ok_total += add_res[200]
+            self.threads_done += 1
+
+        s.close()
+
+        with self.cond:
+            self.cond.notifyAll()
+
+
+    def delete_flow(self, session, url_template, node, flow_id):
+        flow_url = url_template % (node, flow_id)
+
+        if not self.auth:
+            r = session.delete(flow_url, headers=self.getheaders)
+        else:
+            r = session.delete(flow_url, headers=self.getheaders, auth=('admin', 'admin'))
+
+        return r.status_code
+
+
+    def delete_flows(self, start_flow, tid):
+        """
+        Deletes flow from the ODL config space that have been added using the 'add_flows()' function. This function is
+        executed by a worker thread
+        """
+        del_url = 'http://' + self.host + ":" + self.port + '/' + self.FLWURL
+
+        del_res = {200: 0}
+
+        s = requests.Session()
+        n_nodes = self.get_num_nodes(s)
+
+        with self.print_lock:
+            print 'Thread %d: Deleting %d flows on %d nodes' % (tid, self.nflows, n_nodes)
+
+        with Timer() as t:
+            for flow in range(self.nflows):
+                flow_id = tid * (self.ncycles * self.nflows) + flow + start_flow + self.startflow
+                sts = self.delete_flow(s, del_url, self.flows[tid][flow_id], flow_id)
+                try:
+                    del_res[sts] += 1
+                except KeyError:
+                    del_res[sts] = 1
+
+        del_time = t.secs
+
+        del_ok_rate = del_res[200] / del_time
+        del_total_rate = sum(del_res.values()) / del_time
+
+        self.ok_rate.increment(del_ok_rate)
+        self.total_rate.increment(del_total_rate)
+
+        with self.print_lock:
+            print '    Thread %d: ' % tid
+            print '        Delete time: %.2f,' % del_time
+            print '        Delete success rate:  %.2f, Delete total rate: %.2f' % (del_ok_rate, del_total_rate)
+            print '        Delete Results: ',
+            print del_res
+            self.threads_done += 1
+
+        s.close()
+
+        with self.cond:
+            self.cond.notifyAll()
+
+
+    def run_cycle(self, function):
+        """
+        Runs an add or delete cycle. Starts a number of worker threads that each add a bunch of flows. Work is done
+        in context of the worker threads
+        """
+
+        for c in range(self.ncycles):
+            with self.print_lock:
+                print '\nCycle %d:' % c
+
+            threads = []
+            for i in range(self.nthreads):
+                t = threading.Thread(target=function, args=(c * self.nflows, i))
+                threads.append(t)
+                t.start()
+
+            # Wait for all threads to finish
+            while self.threads_done < self.nthreads:
+                with self.cond:
+                    self.cond.wait()
+
+            with self.print_lock:
+                print '    Overall success rate:  %.2f, Overall rate: %.2f' % (
+                    self.ok_rate.value, self.total_rate.value)
+                self.threads_done = 0
+
+            self.ok_rate.value = 0
+            self.total_rate.value = 0
+
+
+    def add_blaster(self):
+        self.run_cycle(self.add_flows)
+
+    def delete_blaster(self):
+        self.run_cycle(self.delete_flows)
+
+    def get_total_flows(self):
+        return sum(len(self.flows[key]) for key in self.flows.keys())
+
+    def get_ok_flows(self):
+        return self.ok_total
+
+
+if __name__ == "__main__":
+
+    JSON_FLOW_MOD1 = '''{
+        "flow-node-inventory:flow": [
+            {
+                "flow-node-inventory:cookie": %d,
+                "flow-node-inventory:cookie_mask": 65535,
+                "flow-node-inventory:flow-name": "%s",
+                "flow-node-inventory:hard-timeout": %d,
+                "flow-node-inventory:id": "%s",
+                "flow-node-inventory:idle-timeout": %d,
+                "flow-node-inventory:installHw": false,
+                "flow-node-inventory:instructions": {
+                    "flow-node-inventory:instruction": [
+                        {
+                            "flow-node-inventory:apply-actions": {
+                                "flow-node-inventory:action": [
+                                    {
+                                        "flow-node-inventory:drop-action": {},
+                                        "flow-node-inventory:order": 0
+                                    }
+                                ]
+                            },
+                            "flow-node-inventory:order": 0
+                        }
+                    ]
+                },
+                "flow-node-inventory:match": {
+                    "flow-node-inventory:ipv4-destination": "%s/32",
+                    "flow-node-inventory:ethernet-match": {
+                        "flow-node-inventory:ethernet-type": {
+                            "flow-node-inventory:type": 2048
+                        }
+                    }
+                },
+                "flow-node-inventory:priority": 2,
+                "flow-node-inventory:strict": false,
+                "flow-node-inventory:table_id": 0
+            }
+        ]
+    }'''
+
+
+    parser = argparse.ArgumentParser(description='Flow programming performance test: First adds and then deletes flows '
+                                                 'into the config tree, as specified by optional parameters.')
+
+    parser.add_argument('--host', default='127.0.0.1',
+                        help='Host where odl controller is running (default is 127.0.0.1)')
+    parser.add_argument('--port', default='8181',
+                        help='Port on which odl\'s RESTCONF is listening (default is 8181)')
+    parser.add_argument('--flows', type=int, default=10,
+                        help='Number of flow add/delete requests to send in each cycle; default 10')
+    parser.add_argument('--cycles', type=int, default=1,
+                        help='Number of flow add/delete cycles to send in each thread; default 1')
+    parser.add_argument('--threads', type=int, default=1,
+                        help='Number of request worker threads, default=1. '
+                             'Each thread will add/delete nflows.')
+    parser.add_argument('--nodes', type=int, default=16,
+                        help='Number of nodes if mininet is not connected; default=16. If mininet is connected, '
+                             'flows will be evenly distributed (programmed) into connected nodes.')
+    parser.add_argument('--delay', type=int, default=0,
+                        help='Time to wait between the add and delete cycles; default=0')
+    parser.add_argument('--delete', dest='delete', action='store_true', default=True,
+                        help='Delete all added flows one by one, benchmark delete '
+                             'performance.')
+    parser.add_argument('--no-delete', dest='delete', action='store_false',
+                        help='Do not perform the delete cycle.')
+    parser.add_argument('--no-auth', dest='auth', action='store_false', default=False,
+                        help="Do not use authenticated access to REST (default)")
+    parser.add_argument('--auth', dest='auth', action='store_true',
+                        help="Use authenticated access to REST (username: 'admin', password: 'admin').")
+    parser.add_argument('--startflow', type=int, default=0,
+                        help='The starting Flow ID; default=0')
+
+    in_args = parser.parse_args()
+
+    fct = FlowConfigBlaster(in_args.host, in_args.port, in_args.cycles, in_args.threads, in_args.nodes,
+                            in_args.flows, in_args.startflow, in_args.auth, JSON_FLOW_MOD1)
+
+    # Run through <cycles>, where <threads> are started in each cycle and <flows> are added from each thread
+    fct.add_blaster()
+
+    print '\n*** Total flows added: %s' % fct.get_total_flows()
+    print '    HTTP[OK] results:  %d\n' % fct.get_ok_flows()
+
+    if in_args.delay > 0:
+        print '*** Waiting for %d seconds before the delete cycle ***\n' % in_args.delay
+        time.sleep(in_args.delay)
+
+    # Run through <cycles>, where <threads> are started in each cycle and <flows> previously added in an add cycle are
+    # deleted in each thread
+    if in_args.delete:
+        fct.delete_blaster()
index 57efa6ba37948d0a6d03bdfa262e77172b201bcc..b69f901f1fd52f69015c1f1753a9318a5e9dfbca 100644 (file)
+#!/usr/bin/python
 __author__ = "Jan Medved"
 __copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
 __license__ = "New-style BSD"
 __email__ = "jmedved@cisco.com"
 
-from operator import itemgetter, attrgetter
 import argparse
 import requests
-# import time
-# import threading
 import re
 import json
 
-INVENTORY_URL = 'restconf/%s/opendaylight-inventory:nodes'
-hdr = {'Accept': 'application/json'}
-
-# Inventory totals
-reported_flows = 0
-found_flows = 0
-nodes = 0
-
-def crawl_flows(flows):
-    global found_flows
-
-    found_flows += len(flows)
-    if in_args.plevel > 1:
-        print '             Flows found: %d' % len(flows)
-        if in_args.plevel > 2:
-            for f in flows:
-                s = json.dumps(f, sort_keys=True, indent=4, separators=(',', ': '))
-                # s = s.replace('{\n', '')
-                # s = s.replace('}', '')
-                s = s.strip()
-                s = s.lstrip('{')
-                s = s.rstrip('}')
-                s = s.replace('\n', '\n            ')
-                s = s.lstrip('\n')
-                print "             Flow %s:" % f['flow-node-inventory:id']
-                print s
-
-
-
-def crawl_table(table):
-    global reported_flows
-
-    try:
-        stats = table['opendaylight-flow-table-statistics:flow-table-statistics']
-        active_flows = stats['opendaylight-flow-table-statistics:active-flows']
-
-        if active_flows > 0:
-            reported_flows += active_flows
-            if in_args.plevel > 1:
-                print '        Table %s:' % table['flow-node-inventory:id']
-                s = json.dumps(stats, sort_keys=True, indent=12, separators=(',', ': '))
-                s = s.replace('{\n', '')
-                s = s.replace('}', '')
-                print s
-    except:
-        print "        Stats for Table '%s' not available." %  \
-              table['flow-node-inventory:id']
-
-    try:
-        flows_in_table = table['flow-node-inventory:flow']
-        crawl_flows(flows_in_table)
-    except(KeyError):
-        pass
-
-
-
-def crawl_node(node):
-    global nodes
-    nodes = nodes + 1
-    if in_args.plevel > 1:
-        print "\nNode '%s':" %(node['id'])
-    elif in_args.plevel > 0:
-        print "%s" %(node['id'])
-
-    try:
-        tables = node['flow-node-inventory:table']
-        if in_args.plevel > 1:
-            print '    Tables: %d' % len(tables)
-
-        for t in tables:
-            crawl_table(t)
-    except:
-        print '    Data for tables not available.'
-
-#    print json.dumps(tables, sort_keys=True, indent=4, separators=(',', ': '))
-
-def crawl_inventory(url):
-    s = requests.Session()
-    r = s.get(url, headers=hdr, stream=False)
-
-    if (r.status_code == 200):
+
+class InventoryCrawler(object):
+    reported_flows = 0
+    found_flows = 0
+    nodes = 0
+
+    INVENTORY_URL = 'restconf/%s/opendaylight-inventory:nodes'
+    hdr = {'Accept': 'application/json'}
+    OK, ERROR = range(2)
+    table_stats_unavailable = 0
+    table_stats_fails = []
+
+    def __init__(self, host, port, plevel, datastore, auth, debug):
+        self.url = 'http://' + host + ":" + port + '/' + self.INVENTORY_URL % datastore
+        self.plevel = plevel
+        self.auth = auth
+        self.debug = debug
+
+
+    def crawl_flows(self, flows):
+        """
+        Collects and prints summary information for all flows in a table
+        """
+        self.found_flows += len(flows)
+        if self.plevel > 1:
+            print '             Flows found: %d\n' % len(flows)
+            if self.plevel > 2:
+                for f in flows:
+                    s = json.dumps(f, sort_keys=True, indent=4, separators=(',', ': '))
+                    # s = s.replace('{\n', '')
+                    # s = s.replace('}', '')
+                    s = s.strip()
+                    s = s.lstrip('{')
+                    s = s.rstrip('}')
+                    s = s.replace('\n', '\n            ')
+                    s = s.lstrip('\n')
+                    print "             Flow %s:" % f['id']
+                    print s
+
+
+    def crawl_table(self, table):
+        """
+        Collects and prints summary statistics information about a single table. Depending on the print level
+        (plevel), it also invokes the crawl_flows
+        """
+        try:
+            stats = table['opendaylight-flow-table-statistics:flow-table-statistics']
+            active_flows = int(stats['active-flows'])
+
+            if active_flows > 0:
+                self.reported_flows += active_flows
+                if self.plevel > 1:
+                    print '        Table %s:' % table['id']
+                    s = json.dumps(stats, sort_keys=True, indent=12, separators=(',', ': '))
+                    s = s.replace('{\n', '')
+                    s = s.replace('}', '')
+                    print s
+        except KeyError:
+            if self.plevel > 1:
+                print "        Stats for Table '%s' not available." % table['id']
+            self.table_stats_unavailable += 1
+            pass
+
         try:
-            inv = json.loads(r.content)['nodes']['node']
-            sinv = []
-            for n in range(len(inv)):
-                if re.search('openflow', inv[n]['id']) != None:
-                    sinv.append(inv[n])
+            flows_in_table = table['flow']
+            self.crawl_flows(flows_in_table)
+        except KeyError:
+            pass
+
+
+    def crawl_node(self, node):
+        """
+        Collects and prints summary information about a single node
+        """
+        self.table_stats_unavailable = 0
+        self.nodes += 1
+
+        if self.plevel > 1:
+            print "\nNode '%s':" % (node['id'])
+        elif self.plevel > 0:
+            print "%s" % (node['id'])
+
+        try:
+            tables = node['flow-node-inventory:table']
+            if self.plevel > 1:
+                print '    Tables: %d' % len(tables)
+
+            for t in tables:
+                self.crawl_table(t)
+
+            if self.table_stats_unavailable > 0:
+                self.table_stats_fails.append(node['id'])
+
+        except KeyError:
+            if self.plevel > 1:
+                print '    Data for tables not available.'
 
-#            sinv = sorted(sinv, key=lambda k: int(k['id'].split(':')[-1]))
+
+    def crawl_inventory(self):
+        """
+        Collects and prints summary information about all openflow nodes in a data store (either operational or config)
+        """
+        self.found_flows = 0
+        self.reported_flows = 0
+        self.table_stats_unavailable = 0
+        self.table_stats_fails = []
+
+        s = requests.Session()
+        if not self.auth:
+            r = s.get(self.url, headers=self.hdr, stream=False)
+        else:
+            r = s.get(self.url, headers=self.hdr, stream=False, auth=('admin', 'admin'))
+
+        if r.status_code == 200:
             try:
+                inv = json.loads(r.content)['nodes']['node']
+                sinv = []
+                for n in range(len(inv)):
+                    if re.search('openflow', inv[n]['id']) is not None:
+                        sinv.append(inv[n])
+
                 sinv = sorted(sinv, key=lambda k: int(re.findall('\d+', k['id'])[0]))
+
                 for n in range(len(sinv)):
-                    crawl_node(sinv[n])
-            except:
-                print 'Fuck! %s' % sinv[n]['id']
+                    try:
+                        self.crawl_node(sinv[n])
+                    except:
+                        print 'Can not crawl %s' % sinv[n]['id']
 
-        except(KeyError):
-            print 'Could not retrieve inventory, response not in JSON format'
-    else:
-        print 'Could not retrieve inventory, HTTP error %d' % r.status_code
+            except KeyError:
+                print 'Could not retrieve inventory, response not in JSON format'
+        else:
+            print 'Could not retrieve inventory, HTTP error %d' % r.status_code
 
+        s.close()
 
 
-if __name__ == "__main__":
+    def set_plevel(self, plevel):
+        self.plevel = plevel
 
+
+
+
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(description='Restconf test program')
     parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
-                        'odl controller is running (default is 127.0.0.1)')
-    parser.add_argument('--odlport', default='8080', help='port on '
-                        'which odl\'s RESTCONF is listening (default is 8080)')
-    parser.add_argument('--plevel', type=int, default=0, help='Print level: '
-                        '0 - Summary (just stats); 1 - Node names; 2 - Node details; '
-                         '3 - Flow details')
-    parser.add_argument('--datastore', choices=['operational', 'config'], \
+                                                               'odl controller is running (default is 127.0.0.1)')
+    parser.add_argument('--odlport', default='8181', help='port on '
+                                                          'which odl\'s RESTCONF is listening (default is 8181)')
+    parser.add_argument('--plevel', type=int, default=0,
+                        help='Print Level: 0 - Summary (stats only); 1 - Node names; 2 - Node details;'
+                             '3 - Flow details')
+    parser.add_argument('--datastore', choices=['operational', 'config'],
                         default='operational', help='Which data store to crawl; '
-                        'default operational')
+                                                    'default operational')
+    parser.add_argument('--no-auth', dest='auth', action='store_false', default=False,
+                        help="Do not use authenticated access to REST (default)")
+    parser.add_argument('--auth', dest='auth', action='store_true',
+                        help="Use authenticated access to REST (username: 'admin', password: 'admin').")
+    parser.add_argument('--debug', dest='debug', action='store_true', default=False,
+                        help="List nodes that have not provided proper statistics data")
 
     in_args = parser.parse_args()
 
-    url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + \
-          INVENTORY_URL % in_args.datastore
-
-    print "Crawling '%s'" % url
+    ic = InventoryCrawler(in_args.odlhost, in_args.odlport, in_args.plevel, in_args.datastore, in_args.auth,
+                          in_args.debug)
 
-    crawl_inventory(url)
+    print "Crawling '%s'" % ic.url
+    ic.crawl_inventory()
 
     print '\nTotals:'
-    print '    Nodes:          %d' % nodes
-    print '    Reported flows: %d' % reported_flows
-    print '    Found flows:    %d' % found_flows
+    print '    Nodes:          %d' % ic.nodes
+    print '    Reported flows: %d' % ic.reported_flows
+    print '    Found flows:    %d' % ic.found_flows
+
+    if in_args.debug:
+        n_missing = len(ic.table_stats_fails)
+        if n_missing > 0:
+            print '\nMissing table stats (%d nodes):' % n_missing
+            print "%s\n" % ", ".join([x for x in ic.table_stats_fails])