check-in scripts (passed by Jan Medved) related to
authorBasheeruddin Ahmed <syedbahm@cisco.com>
Thu, 28 Aug 2014 01:08:45 +0000 (18:08 -0700)
committerBasheeruddin Ahmed <syedbahm@cisco.com>
Thu, 28 Aug 2014 01:08:45 +0000 (18:08 -0700)
datastore performance testing
Signed-off-by: Basheeruddin Ahmed <syedbahm@cisco.com>
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py [new file with mode: 0644]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_perf.py [new file with mode: 0644]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py [new file with mode: 0644]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py [new file with mode: 0644]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_incr_put.py [new file with mode: 0644]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_oneput_ngets.py [new file with mode: 0644]

diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/config_cleanup.py
new file mode 100644 (file)
index 0000000..f48ebc0
--- /dev/null
@@ -0,0 +1,32 @@
+__author__ = "Jan Medved"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__email__ = "jmedved@cisco.com"
+
+
+import json
+import argparse
+import requests
+
+CONFIGURL = 'restconf/config/opendaylight-inventory:nodes'
+getheaders = {'Accept': 'application/json'}
+
+
+if __name__ == "__main__":
+
+    parser = argparse.ArgumentParser(description='Cleans up the config space')
+    parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
+                        'odl controller is running (default is 127.0.0.1)')
+    parser.add_argument('--odlport', default='8080', help='port on '
+                        'which odl\'s RESTCONF is listening (default is 8080)')
+
+    in_args = parser.parse_args()
+
+    url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + CONFIGURL
+    s = requests.Session()
+    r = s.delete(url, headers=getheaders)
+
+    if r.status_code != 200:
+        print 'Failed to delete nodes in the config space, code %d' % r.status_code
+    else:
+        print 'Nodes in config space deleted.'
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_perf.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_perf.py
new file mode 100644 (file)
index 0000000..28edfad
--- /dev/null
@@ -0,0 +1,313 @@
+__author__ = "Jan Medved"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__email__ = "jmedved@cisco.com"
+
+from random import randrange
+import json
+import argparse
+import requests
+import time
+import threading
+import re
+
+class Counter(object):
+    def __init__(self, start=0):
+        self.lock = threading.Lock()
+        self.value = start
+    def increment(self, value=1):
+        self.lock.acquire()
+        try:
+            self.value = self.value + value
+        finally:
+            self.lock.release()
+
+
+class Timer(object):
+    def __init__(self, verbose=False):
+        self.verbose = verbose
+
+    def __enter__(self):
+        self.start = time.time()
+        return self
+
+    def __exit__(self, *args):
+        self.end = time.time()
+        self.secs = self.end - self.start
+        self.msecs = self.secs * 1000  # millisecs
+        if self.verbose:
+            print ("elapsed time: %f ms" % self.msecs)
+
+
+putheaders = {'content-type': 'application/json'}
+getheaders = {'Accept': 'application/json'}
+# ODL IP:port
+# We fist delete all existing service functions
+DELURL  = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+GETURL  = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+# Incremental PUT. This URL is for a list element
+PUTURL  = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
+
+INVURL = 'restconf/operational/opendaylight-inventory:nodes'
+N1T0_URL = 'restconf/operational/opendaylight-inventory:nodes/node/openflow:1/table/0'
+
+
+print_lock = threading.Lock()
+threads_done = 0
+
+JSON_FLOW_MOD1 = '''{
+    "flow-node-inventory:flow": [
+        {
+            "flow-node-inventory:cookie": %d,
+            "flow-node-inventory:cookie_mask": 65535,
+            "flow-node-inventory:flow-name": "%s",
+            "flow-node-inventory:hard-timeout": %d,
+            "flow-node-inventory:id": "%s",
+            "flow-node-inventory:idle-timeout": %d,
+            "flow-node-inventory:installHw": false,
+            "flow-node-inventory:instructions": {
+                "flow-node-inventory:instruction": [
+                    {
+                        "flow-node-inventory:apply-actions": {
+                            "flow-node-inventory:action": [
+                                {
+                                    "flow-node-inventory:dec-nw-ttl": {},
+                                    "flow-node-inventory:order": 0
+                                }
+                            ]
+                        },
+                        "flow-node-inventory:order": 0
+                    }
+                ]
+            },
+            "flow-node-inventory:match": {
+                "flow-node-inventory:metadata": {
+                    "flow-node-inventory:metadata": %d
+                }
+            },
+            "flow-node-inventory:priority": 2,
+            "flow-node-inventory:strict": false,
+            "flow-node-inventory:table_id": 0
+        }
+    ]
+}'''
+
+add_ok_rate = Counter(0.0)
+add_total_rate = Counter(0.0)
+del_ok_rate = Counter(0.0)
+del_total_rate = Counter(0.0)
+
+flows = {}
+
+def add_flow(session, url_template, res, tid, node, flow_id, metadata):
+    flow_data = JSON_FLOW_MOD1 % (tid + flow_id, 'TestFlow-%d' % flow_id, 65000,
+                                  str(flow_id), 65000, metadata)
+    flow_url = url_template % (node, flow_id)
+    r = session.put(flow_url, data=flow_data, headers=putheaders, stream=False )
+
+    try:
+        res[r.status_code] += 1
+    except(KeyError):
+        res[r.status_code] = 1
+
+
+def delete_flow(session, url_template, res, tid, node, flow_id):
+    flow_url = url_template % (node, flow_id)
+    r = session.delete(flow_url, headers=getheaders)
+    try:
+        res[r.status_code] += 1
+    except(KeyError):
+        res[r.status_code] = 1
+
+
+def get_num_nodes(session, inventory_url, default_nodes):
+    """
+    Determines the number of OF nodes in the connected mininet network. If
+    mininet is not connected, the default number of flows is 16
+    """
+    nodes = default_nodes
+    r = session.get(inventory_url, headers=getheaders, stream=False )
+    if (r.status_code == 200):
+        try:
+            inv = json.loads(r.content)['nodes']['node']
+            nn = 0
+            for n in range(len(inv)):
+                if re.search('openflow', inv[n]['id']) != None:
+                    nn = nn + 1
+            if nn != 0:
+                nodes = nn
+        except(KeyError):
+            pass
+
+    return nodes
+
+def add_flows(put_url, nnodes, nflows, start_flow, tid, cond):
+    """
+    The function that add flows into the ODL config space.
+    """
+    global threads_done
+
+    add_res = {}
+    add_res[200] = 0
+
+    s = requests.Session()
+
+    nnodes = get_num_nodes(s, inv_url, nnodes)
+
+    with print_lock:
+        print '    Thread %d:\n        Adding %d flows on %d nodes' % (tid, nflows, nnodes)
+
+    with Timer() as t:
+        for flow in range(nflows):
+            node_id = randrange(1, nnodes+1)
+            flow_id = tid*100000 + flow + start_flow
+            flows[tid][flow_id] = node_id
+            add_flow(s, put_url, add_res, tid, node_id, flow_id, flow*2+1)
+
+    add_time = t.secs
+    add_ok_rate_t = add_res[200]/add_time
+    add_total_rate_t = sum(add_res.values())/add_time
+
+    add_ok_rate.increment(add_ok_rate_t)
+    add_total_rate.increment(add_total_rate_t)
+
+    with print_lock:
+        print '    Thread %d: ' % tid
+        print '        Add time: %.2f,' % add_time
+        print '        Add success rate:  %.2f, Add total rate: %.2f' % \
+                        (add_ok_rate_t, add_total_rate_t)
+        print '        Add Results: ',
+        print add_res
+        threads_done = threads_done + 1
+
+    s.close()
+
+    with cond:
+        cond.notifyAll()
+
+
+def delete_flows(del_url, nnodes, nflows, start_flow, tid, cond):
+    """
+    The function that deletes flow from the ODL config space that have been
+    added using the 'add_flows()' function.
+    """
+    global threads_done
+
+    del_res = {}
+    del_res[200] = 0
+
+    s = requests.Session()
+    nnodes = get_num_nodes(s, inv_url, nnodes)
+
+    with print_lock:
+        print 'Thread %d: Deleting %d flows on %d nodes' % (tid, nflows, nnodes)
+
+    with Timer() as t:
+        for flow in range(nflows):
+            flow_id = tid*100000 + flow + start_flow
+            delete_flow(s, del_url, del_res, 100, flows[tid][flow_id], flow_id)
+
+    del_time = t.secs
+
+    del_ok_rate_t = del_res[200]/del_time
+    del_total_rate_t = sum(del_res.values())/del_time
+
+    del_ok_rate.increment(del_ok_rate_t)
+    del_total_rate.increment(del_total_rate_t)
+
+    with print_lock:
+        print '    Thread %d: ' % tid
+        print '        Delete time: %.2f,' % del_time
+        print '        Delete success rate:  %.2f, Delete total rate: %.2f' % \
+                        (del_ok_rate_t, del_total_rate_t)
+        print '        Delete Results: ',
+        print del_res
+        threads_done = threads_done + 1
+
+    s.close()
+
+    with cond:
+        cond.notifyAll()
+
+
+def driver(function, ncycles, nthreads, nnodes, nflows, url, cond, ok_rate, total_rate):
+    """
+    The top-level driver function that drives the execution of the flow-add and
+    flow-delete tests.
+    """
+    global threads_done
+
+    for c in range(ncycles):
+        with print_lock:
+            print '\nCycle %d:' % c
+
+        threads = []
+        for i in range(nthreads):
+            t = threading.Thread(target=function,
+                                 args=(url, nnodes, nflows, c*nflows, i, cond))
+            threads.append(t)
+            t.start()
+
+        # Wait for all threads to finish
+        while threads_done < in_args.nthreads:
+            with cond:
+                cond.wait()
+
+        with print_lock:
+             print '    Overall success rate:  %.2f, Overall rate: %.2f' % \
+                            (ok_rate.value, total_rate.value)
+             threads_done = 0
+
+        ok_rate.value = 0
+        total_rate.value = 0
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Flow programming performance test: '
+                                     'First adds and then deletes flows into '
+                                     'the config tree, as specified by optional parameters.')
+    parser.add_argument('--odlhost', default='127.0.0.1', help='Host where '
+                        'odl controller is running (default is 127.0.0.1)')
+    parser.add_argument('--odlport', default='8080', help='Port on '
+                        'which odl\'s RESTCONF is listening (default is 8080)')
+    parser.add_argument('--nflows', type=int, default=10, help='Number of '
+                        'flow add/delete requests to send in  each cycle; default 10')
+    parser.add_argument('--ncycles', type=int, default=1, help='Number of '
+                        'flow add/delete cycles to send in each thread; default 1')
+    parser.add_argument('--nthreads', type=int, default=1,
+                        help='Number of request worker threads, default=1. '
+                        'Each thread will add/delete nflows.')
+    parser.add_argument('--nnodes', type=int, default=16,
+                        help='Number of nodes if mininet is not connected, default=16. '
+                        'If mininet is connected, flows will be evenly distributed '
+                        '(programmed) into connected nodes.')
+    parser.add_argument('--delete', dest='delete', action='store_true', default=True,
+                        help='Delete all added flows one by one, benchmark delete '
+                        'performance.')
+    parser.add_argument('--no-delete', dest='delete', action='store_false',
+                        help='Add flows and leave them in the config data store.')
+
+    in_args = parser.parse_args()
+
+    put_url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + PUTURL
+    del_url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + DELURL
+    get_url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + GETURL
+    inv_url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + INVURL
+
+    cond = threading.Condition()
+
+    # Initialize the flows array
+    for i in range(in_args.nthreads):
+        flows[i] = {}
+
+    # Run through ncycles, where nthreads are started in each cycles and
+    # nflows added from each thread
+    driver(add_flows, in_args.ncycles, in_args.nthreads, in_args.nnodes, \
+           in_args.nflows, put_url, cond, add_ok_rate, add_total_rate)
+
+
+    # Run through ncycles, where nthreads are started in each cycles and
+    # nflows added from each thread
+    if in_args.delete == True:
+        driver(delete_flows, in_args.ncycles, in_args.nthreads, in_args.nnodes, \
+               in_args.nflows, del_url, cond, del_ok_rate, del_total_rate)
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_crawler.py
new file mode 100644 (file)
index 0000000..57efa6b
--- /dev/null
@@ -0,0 +1,145 @@
+__author__ = "Jan Medved"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__email__ = "jmedved@cisco.com"
+
+from operator import itemgetter, attrgetter
+import argparse
+import requests
+# import time
+# import threading
+import re
+import json
+
+INVENTORY_URL = 'restconf/%s/opendaylight-inventory:nodes'
+hdr = {'Accept': 'application/json'}
+
+# Inventory totals
+reported_flows = 0
+found_flows = 0
+nodes = 0
+
+def crawl_flows(flows):
+    global found_flows
+
+    found_flows += len(flows)
+    if in_args.plevel > 1:
+        print '             Flows found: %d' % len(flows)
+        if in_args.plevel > 2:
+            for f in flows:
+                s = json.dumps(f, sort_keys=True, indent=4, separators=(',', ': '))
+                # s = s.replace('{\n', '')
+                # s = s.replace('}', '')
+                s = s.strip()
+                s = s.lstrip('{')
+                s = s.rstrip('}')
+                s = s.replace('\n', '\n            ')
+                s = s.lstrip('\n')
+                print "             Flow %s:" % f['flow-node-inventory:id']
+                print s
+
+
+
+def crawl_table(table):
+    global reported_flows
+
+    try:
+        stats = table['opendaylight-flow-table-statistics:flow-table-statistics']
+        active_flows = stats['opendaylight-flow-table-statistics:active-flows']
+
+        if active_flows > 0:
+            reported_flows += active_flows
+            if in_args.plevel > 1:
+                print '        Table %s:' % table['flow-node-inventory:id']
+                s = json.dumps(stats, sort_keys=True, indent=12, separators=(',', ': '))
+                s = s.replace('{\n', '')
+                s = s.replace('}', '')
+                print s
+    except:
+        print "        Stats for Table '%s' not available." %  \
+              table['flow-node-inventory:id']
+
+    try:
+        flows_in_table = table['flow-node-inventory:flow']
+        crawl_flows(flows_in_table)
+    except(KeyError):
+        pass
+
+
+
+def crawl_node(node):
+    global nodes
+    nodes = nodes + 1
+    if in_args.plevel > 1:
+        print "\nNode '%s':" %(node['id'])
+    elif in_args.plevel > 0:
+        print "%s" %(node['id'])
+
+    try:
+        tables = node['flow-node-inventory:table']
+        if in_args.plevel > 1:
+            print '    Tables: %d' % len(tables)
+
+        for t in tables:
+            crawl_table(t)
+    except:
+        print '    Data for tables not available.'
+
+#    print json.dumps(tables, sort_keys=True, indent=4, separators=(',', ': '))
+
+def crawl_inventory(url):
+    s = requests.Session()
+    r = s.get(url, headers=hdr, stream=False)
+
+    if (r.status_code == 200):
+        try:
+            inv = json.loads(r.content)['nodes']['node']
+            sinv = []
+            for n in range(len(inv)):
+                if re.search('openflow', inv[n]['id']) != None:
+                    sinv.append(inv[n])
+
+#            sinv = sorted(sinv, key=lambda k: int(k['id'].split(':')[-1]))
+            try:
+                sinv = sorted(sinv, key=lambda k: int(re.findall('\d+', k['id'])[0]))
+                for n in range(len(sinv)):
+                    crawl_node(sinv[n])
+            except:
+                print 'Fuck! %s' % sinv[n]['id']
+
+        except(KeyError):
+            print 'Could not retrieve inventory, response not in JSON format'
+    else:
+        print 'Could not retrieve inventory, HTTP error %d' % r.status_code
+
+
+
+if __name__ == "__main__":
+
+    parser = argparse.ArgumentParser(description='Restconf test program')
+    parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
+                        'odl controller is running (default is 127.0.0.1)')
+    parser.add_argument('--odlport', default='8080', help='port on '
+                        'which odl\'s RESTCONF is listening (default is 8080)')
+    parser.add_argument('--plevel', type=int, default=0, help='Print level: '
+                        '0 - Summary (just stats); 1 - Node names; 2 - Node details; '
+                         '3 - Flow details')
+    parser.add_argument('--datastore', choices=['operational', 'config'], \
+                        default='operational', help='Which data store to crawl; '
+                        'default operational')
+
+    in_args = parser.parse_args()
+
+    url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + \
+          INVENTORY_URL % in_args.datastore
+
+    print "Crawling '%s'" % url
+
+    crawl_inventory(url)
+
+    print '\nTotals:'
+    print '    Nodes:          %d' % nodes
+    print '    Reported flows: %d' % reported_flows
+    print '    Found flows:    %d' % found_flows
+
+
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/inventory_perf.py
new file mode 100644 (file)
index 0000000..c7582a0
--- /dev/null
@@ -0,0 +1,136 @@
+__author__ = "Jan Medved"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__email__ = "jmedved@cisco.com"
+
+import argparse
+import requests
+import time
+import threading
+
+class Counter(object):
+    def __init__(self, start=0):
+        self.lock = threading.Lock()
+        self.value = start
+    def increment(self, value=1):
+        self.lock.acquire()
+        try:
+            self.value = self.value + value
+        finally:
+            self.lock.release()
+
+
+class Timer(object):
+    def __init__(self, verbose=False):
+        self.verbose = verbose
+
+    def __enter__(self):
+        self.start = time.time()
+        return self
+
+    def __exit__(self, *args):
+        self.end = time.time()
+        self.secs = self.end - self.start
+        self.msecs = self.secs * 1000  # millisecs
+        if self.verbose:
+            print ("elapsed time: %f ms" % self.msecs)
+
+# Initialize the totals over all threads
+total_requests = Counter(0)
+total_req_rate = Counter(0.0)
+
+total_mbytes = Counter(0.0)
+total_mb_rate = Counter(0.0)
+
+putheaders = {'content-type': 'application/json'}
+getheaders = {'Accept': 'application/json'}
+
+INVENTORY_URL = 'http://localhost:8080/restconf/operational/opendaylight-inventory:nodes'
+N1T0_URL = 'http://localhost:8080/restconf/operational/opendaylight-inventory:nodes/node/openflow:1/table/0'
+
+num_threads = 1
+
+print_lock = threading.Lock()
+
+
+def get_inventory(tnum, url, hdrs, rnum, cond):
+    total_len = float(0)
+    results = {}
+
+    with print_lock:
+        print 'Thread %d: Getting %s' % (tnum, url)
+
+    s = requests.Session()
+    with Timer() as t:
+        for i in range(rnum):
+            r = s.get(url, headers=hdrs, stream=False )
+            total_len += len(r.content)
+
+            try:
+                results[r.status_code] += 1
+            except(KeyError):
+                results[r.status_code] = 1
+
+    total = sum(results.values())
+    rate = total/t.secs
+    total_requests.increment(total)
+    total_req_rate.increment(rate)
+
+    mbytes = total_len / (1024*1024)
+    mrate = mbytes/t.secs
+    total_mbytes.increment(mbytes)
+    total_mb_rate.increment(mrate)
+
+    with print_lock:
+        print '\nThread %d: ' % tnum
+        print '    Elapsed time: %.2f,' % t.secs
+        print '    Requests: %d, Requests/sec: %.2f' % (total, rate)
+        print '    Volume: %.2f MB, Rate: %.2f MByte/s' % (mbytes, mrate)
+        print '    Results: ',
+        print results
+
+    with cond:
+        cond.notifyAll()
+
+
+if __name__ == "__main__":
+
+    parser = argparse.ArgumentParser(description='Restconf test program')
+    parser.add_argument('--odlhost', default='127.0.0.1', help='host where '
+                        'odl controller is running (default is 127.0.0.1)')
+    parser.add_argument('--odlport', default='8080', help='port on '
+                        'which odl\'s RESTCONF is listening (default is 8080)')
+    parser.add_argument('--requests', type=int, default=10, help='number of '
+                        'requests to send')
+    parser.add_argument('--url', default='restconf/operational/opendaylight-inventory:nodes',
+                        help='Url to send.')
+    parser.add_argument('--nthreads', type=int, default=1,
+                        help='Number of request worker threads, default=1')
+    in_args = parser.parse_args()
+
+    url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + in_args.url
+
+    threads = []
+    nthreads = int(in_args.nthreads)
+    cond = threading.Condition()
+
+    for i in range(nthreads):
+        t = threading.Thread(target=get_inventory,
+                             args=(i,url, getheaders, int(in_args.requests), cond))
+        threads.append(t)
+        t.start()
+
+    finished = 0
+    while finished < nthreads:
+        with cond:
+            cond.wait()
+            finished = finished + 1
+
+    print '\nAggregate requests: %d, Aggregate requests/sec: %.2f' % (total_requests.value,
+                                                                    total_req_rate.value)
+    print 'Aggregate Volume: %.2f MB, Aggregate Rate: %.2f MByte/s' % (total_mbytes.value,
+                                                                       total_mb_rate.value)
+
+#    get_inventory(url, getheaders, int(in_args.requests))
+
+#    get_inventory(N1T0_URL, getheaders, 100)
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_incr_put.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_incr_put.py
new file mode 100644 (file)
index 0000000..d22bc35
--- /dev/null
@@ -0,0 +1,126 @@
+__author__ = "Reinaldo Penno"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__version__ = "0.1"
+__email__ = "repenno@cisco.com"
+__status__ = "alpha"
+
+
+#####
+# Incrementally PUT(s) more and more list elements up to numputreq
+# while computing the number of req/s and successful requests
+#
+# Then measure the number of GET request/sec up to numgetreq
+# while computing the number of successful requests
+# 
+# For the default values it is estimated you will consume about
+# 1.5GB of heap memory in ODL
+####
+
+
+import requests
+import time
+
+class Timer(object):
+    def __init__(self, verbose=False):
+        self.verbose = verbose
+
+    def __enter__(self):
+        self.start = time.time()
+        return self
+
+    def __exit__(self, *args):
+        self.end = time.time()
+        self.secs = self.end - self.start
+        self.msecs = self.secs * 1000  # millisecs
+        if self.verbose:
+            print ("elapsed time: %f ms" % self.msecs)
+
+# Parametrized PUT of incremental List elements            
+JSONPUT = """
+{
+  "service-function": [
+    {
+      "ip-mgmt-address": "20.0.0.11",
+      "type": "service-function:napt44",
+      "name": "%d"
+    }
+  ]
+}"""
+
+putheaders = {'content-type': 'application/json'}
+getheaders = {'Accept': 'application/json'}
+# ODL IP:port
+ODLIP   = "127.0.0.1:8080"
+# We fist delete all existing service functions
+DELURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/"
+GETURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/service-function/%d/"
+# Incremental PUT. This URL is for a list element
+PUTURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/service-function/%d/"
+
+# You probably need to adjust this number based on your OS constraints.
+# Maximum number of incremental PUT list elements
+numputreq = 1000000
+# Maximum number of GET requests
+numgetreq = 10000
+# We will present PUT reports every 10000 PUTs
+numputstep = 1000
+# We will present GET reports every 10000 PUTs
+numgetstep = 1000
+
+# Incrementally PUT list elements up to numputreq
+def putperftest():
+    s = requests.Session()
+    print ("Starting PUT Performance. Total of %d requests\n" % numputreq)
+    for numreq in range(0, numputreq, numputstep): 
+        success = 0      
+        with Timer() as t:
+            for i in range(numreq, numreq + numputstep):
+                r = s.put((PUTURL % i),data = (JSONPUT % i), headers=putheaders, stream=False )
+                if (r.status_code == 200):
+                    success+=1
+        print ("=> %d elapsed requests" % (numreq + numputstep))
+        print ("=> %d requests/s in the last %d reqs" % ((numputstep)/t.secs, numputstep))
+        print ("=> %d successful PUT requests in the last %d reqs " % (success, numputstep))
+        print ("\n")
+
+# Delete all service functions
+def delallsf():
+    print ("Deleting all Service Functions")
+    r = requests.delete(DELURL, headers=getheaders)   
+    if (r.status_code == 200) or (r.status_code == 500):
+        print ("Deleted all Service Functions \n")
+        return 0
+    else:
+        print ("Delete Failed \n")
+        exit()
+        return -1
+
+# Retrieve list elements 
+def getperftest():
+    s = requests.Session()
+    print ("Starting GET Performance. Total of %d requests \n" % numgetreq)
+    for numreq in range(0, numgetreq, numgetstep): 
+        success = 0      
+        with Timer() as t:
+            for i in range(numreq, numreq + numgetstep):
+                r = s.get((GETURL % i), stream=False )
+                if (r.status_code == 200):
+                    success+=1
+        print ("=> %d elapsed requests" % (numreq + numgetstep))
+        print ("=> %d requests/s in the last %d reqs" % ((numgetstep)/t.secs, numgetstep))
+        print ("=> %d successful GET requests in the last %d reqs " % (success, numgetstep))
+        print ("\n")
+
+
+if __name__ == "__main__":
+    delallsf()
+    putperftest()
+    getperftest()
+
+
+
+
+
+
+
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_oneput_ngets.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_oneput_ngets.py
new file mode 100644 (file)
index 0000000..1a06118
--- /dev/null
@@ -0,0 +1,111 @@
+__author__ = "Reinaldo Penno"
+__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
+__license__ = "New-style BSD"
+__version__ = "0.1"
+__email__ = "repenno@cisco.com"
+__status__ = "alpha"
+
+
+import requests
+import time
+
+class Timer(object):
+    def __init__(self, verbose=False):
+        self.verbose = verbose
+
+    def __enter__(self):
+        self.start = time.time()
+        return self
+
+    def __exit__(self, *args):
+        self.end = time.time()
+        self.secs = self.end - self.start
+        self.msecs = self.secs * 1000  # millisecs
+        if self.verbose:
+            print ("elapsed time: %f ms" % self.msecs)
+            
+# Parametrized single PUT of list + one element.            
+JSONPUT = """
+{
+  "service-functions": {
+    "service-function": [
+      {
+        "ip-mgmt-address": "20.0.0.10",
+        "type": "dpi",
+        "name": "%d"
+      }
+    ]
+  }
+}"""
+
+putheaders = {'content-type': 'application/json'}
+getheaders = {'Accept': 'application/json'}
+ODLIP   = "127.0.0.1:8080"
+DELURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/"
+GETURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/"
+PUTURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/"
+
+# You probably need to adjust this number based on your OS constraints.
+# Maximum number of incremental PUT list elements
+numputreq = 1
+# Maximum number of GET requests
+numgetreq = 100000
+# We will present PUT reports every 10000 PUTs
+numputstep = 1
+# We will present GET reports every 10000 PUTs
+numgetstep = 1000
+
+
+def getperftest():
+    s = requests.Session()
+    print ("Starting GET Performance. Total of %d requests \n" % numgetreq)
+    for numreq in range(0, numgetreq, numgetstep): 
+        success = 0      
+        with Timer() as t:
+            for i in range(numreq, numreq + numgetstep):
+                r = s.get(GETURL, stream=False )
+                if (r.status_code == 200):
+                    success+=1
+        print ("=> %d elapsed requests" % (numreq + numgetstep))
+        print ("=> %d requests/s in the last %d reqs" % ((numgetstep)/t.secs, numgetstep))
+        print ("=> %d successful GET requests in the last %d reqs " % (success, numgetstep))
+        print ("\n")
+
+# Based on default parameters performs a single PUT. Always overwrite existing elements
+def putperftest():
+    s = requests.Session()
+    print ("Starting PUT Performance. Total of %d requests\n" % numputreq)
+    for numreq in range(0, numputreq, numputstep): 
+        success = 0      
+        with Timer() as t:
+            for i in range(numreq, numreq + numputstep):
+                r = s.put(PUTURL, data = (JSONPUT % i), headers=putheaders, stream=False )
+                if (r.status_code == 200):
+                    success+=1
+        print ("=> %d elapsed requests" % (numreq + numputstep))
+        print ("=> %d requests/s in the last %d reqs" % ((numputstep)/t.secs, numputstep))
+        print ("=> %d successful PUT requests in the last %d reqs " % (success, numputstep))
+        print ("\n")
+
+# Delete all service functions
+def delallsf():
+    print ("Deleting all Service Functions")
+    r = requests.delete(DELURL, headers=getheaders)   
+    if (r.status_code == 200) or (r.status_code == 500):
+        print ("Deleted all Service Functions \n")
+        return 0
+    else:
+        print ("Delete Failed \n")
+        exit()
+        return -1
+
+if __name__ == "__main__":
+    delallsf()
+    putperftest()
+    getperftest()
+
+
+
+
+
+