Removing unneeded files
authorJan Medved <jmedved@cisco.com>
Thu, 16 Oct 2014 18:11:56 +0000 (11:11 -0700)
committerJan Medved <jmedved@cisco.com>
Thu, 16 Oct 2014 18:12:22 +0000 (11:12 -0700)
Change-Id: Iff39b52f5c3c604c40513d05f4b0be7368b8df3c
Signed-off-by: Jan Medved <jmedved@cisco.com>
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_perf.py [deleted file]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_incr_put.py [deleted file]
test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_oneput_ngets.py [deleted file]

diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_perf.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/flow_config_perf.py
deleted file mode 100644 (file)
index 28edfad..0000000
+++ /dev/null
@@ -1,313 +0,0 @@
-__author__ = "Jan Medved"
-__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
-__license__ = "New-style BSD"
-__email__ = "jmedved@cisco.com"
-
-from random import randrange
-import json
-import argparse
-import requests
-import time
-import threading
-import re
-
-class Counter(object):
-    def __init__(self, start=0):
-        self.lock = threading.Lock()
-        self.value = start
-    def increment(self, value=1):
-        self.lock.acquire()
-        try:
-            self.value = self.value + value
-        finally:
-            self.lock.release()
-
-
-class Timer(object):
-    def __init__(self, verbose=False):
-        self.verbose = verbose
-
-    def __enter__(self):
-        self.start = time.time()
-        return self
-
-    def __exit__(self, *args):
-        self.end = time.time()
-        self.secs = self.end - self.start
-        self.msecs = self.secs * 1000  # millisecs
-        if self.verbose:
-            print ("elapsed time: %f ms" % self.msecs)
-
-
-putheaders = {'content-type': 'application/json'}
-getheaders = {'Accept': 'application/json'}
-# ODL IP:port
-# We fist delete all existing service functions
-DELURL  = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
-GETURL  = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
-# Incremental PUT. This URL is for a list element
-PUTURL  = "restconf/config/opendaylight-inventory:nodes/node/openflow:%d/table/0/flow/%d"
-
-INVURL = 'restconf/operational/opendaylight-inventory:nodes'
-N1T0_URL = 'restconf/operational/opendaylight-inventory:nodes/node/openflow:1/table/0'
-
-
-print_lock = threading.Lock()
-threads_done = 0
-
-JSON_FLOW_MOD1 = '''{
-    "flow-node-inventory:flow": [
-        {
-            "flow-node-inventory:cookie": %d,
-            "flow-node-inventory:cookie_mask": 65535,
-            "flow-node-inventory:flow-name": "%s",
-            "flow-node-inventory:hard-timeout": %d,
-            "flow-node-inventory:id": "%s",
-            "flow-node-inventory:idle-timeout": %d,
-            "flow-node-inventory:installHw": false,
-            "flow-node-inventory:instructions": {
-                "flow-node-inventory:instruction": [
-                    {
-                        "flow-node-inventory:apply-actions": {
-                            "flow-node-inventory:action": [
-                                {
-                                    "flow-node-inventory:dec-nw-ttl": {},
-                                    "flow-node-inventory:order": 0
-                                }
-                            ]
-                        },
-                        "flow-node-inventory:order": 0
-                    }
-                ]
-            },
-            "flow-node-inventory:match": {
-                "flow-node-inventory:metadata": {
-                    "flow-node-inventory:metadata": %d
-                }
-            },
-            "flow-node-inventory:priority": 2,
-            "flow-node-inventory:strict": false,
-            "flow-node-inventory:table_id": 0
-        }
-    ]
-}'''
-
-add_ok_rate = Counter(0.0)
-add_total_rate = Counter(0.0)
-del_ok_rate = Counter(0.0)
-del_total_rate = Counter(0.0)
-
-flows = {}
-
-def add_flow(session, url_template, res, tid, node, flow_id, metadata):
-    flow_data = JSON_FLOW_MOD1 % (tid + flow_id, 'TestFlow-%d' % flow_id, 65000,
-                                  str(flow_id), 65000, metadata)
-    flow_url = url_template % (node, flow_id)
-    r = session.put(flow_url, data=flow_data, headers=putheaders, stream=False )
-
-    try:
-        res[r.status_code] += 1
-    except(KeyError):
-        res[r.status_code] = 1
-
-
-def delete_flow(session, url_template, res, tid, node, flow_id):
-    flow_url = url_template % (node, flow_id)
-    r = session.delete(flow_url, headers=getheaders)
-    try:
-        res[r.status_code] += 1
-    except(KeyError):
-        res[r.status_code] = 1
-
-
-def get_num_nodes(session, inventory_url, default_nodes):
-    """
-    Determines the number of OF nodes in the connected mininet network. If
-    mininet is not connected, the default number of flows is 16
-    """
-    nodes = default_nodes
-    r = session.get(inventory_url, headers=getheaders, stream=False )
-    if (r.status_code == 200):
-        try:
-            inv = json.loads(r.content)['nodes']['node']
-            nn = 0
-            for n in range(len(inv)):
-                if re.search('openflow', inv[n]['id']) != None:
-                    nn = nn + 1
-            if nn != 0:
-                nodes = nn
-        except(KeyError):
-            pass
-
-    return nodes
-
-def add_flows(put_url, nnodes, nflows, start_flow, tid, cond):
-    """
-    The function that add flows into the ODL config space.
-    """
-    global threads_done
-
-    add_res = {}
-    add_res[200] = 0
-
-    s = requests.Session()
-
-    nnodes = get_num_nodes(s, inv_url, nnodes)
-
-    with print_lock:
-        print '    Thread %d:\n        Adding %d flows on %d nodes' % (tid, nflows, nnodes)
-
-    with Timer() as t:
-        for flow in range(nflows):
-            node_id = randrange(1, nnodes+1)
-            flow_id = tid*100000 + flow + start_flow
-            flows[tid][flow_id] = node_id
-            add_flow(s, put_url, add_res, tid, node_id, flow_id, flow*2+1)
-
-    add_time = t.secs
-    add_ok_rate_t = add_res[200]/add_time
-    add_total_rate_t = sum(add_res.values())/add_time
-
-    add_ok_rate.increment(add_ok_rate_t)
-    add_total_rate.increment(add_total_rate_t)
-
-    with print_lock:
-        print '    Thread %d: ' % tid
-        print '        Add time: %.2f,' % add_time
-        print '        Add success rate:  %.2f, Add total rate: %.2f' % \
-                        (add_ok_rate_t, add_total_rate_t)
-        print '        Add Results: ',
-        print add_res
-        threads_done = threads_done + 1
-
-    s.close()
-
-    with cond:
-        cond.notifyAll()
-
-
-def delete_flows(del_url, nnodes, nflows, start_flow, tid, cond):
-    """
-    The function that deletes flow from the ODL config space that have been
-    added using the 'add_flows()' function.
-    """
-    global threads_done
-
-    del_res = {}
-    del_res[200] = 0
-
-    s = requests.Session()
-    nnodes = get_num_nodes(s, inv_url, nnodes)
-
-    with print_lock:
-        print 'Thread %d: Deleting %d flows on %d nodes' % (tid, nflows, nnodes)
-
-    with Timer() as t:
-        for flow in range(nflows):
-            flow_id = tid*100000 + flow + start_flow
-            delete_flow(s, del_url, del_res, 100, flows[tid][flow_id], flow_id)
-
-    del_time = t.secs
-
-    del_ok_rate_t = del_res[200]/del_time
-    del_total_rate_t = sum(del_res.values())/del_time
-
-    del_ok_rate.increment(del_ok_rate_t)
-    del_total_rate.increment(del_total_rate_t)
-
-    with print_lock:
-        print '    Thread %d: ' % tid
-        print '        Delete time: %.2f,' % del_time
-        print '        Delete success rate:  %.2f, Delete total rate: %.2f' % \
-                        (del_ok_rate_t, del_total_rate_t)
-        print '        Delete Results: ',
-        print del_res
-        threads_done = threads_done + 1
-
-    s.close()
-
-    with cond:
-        cond.notifyAll()
-
-
-def driver(function, ncycles, nthreads, nnodes, nflows, url, cond, ok_rate, total_rate):
-    """
-    The top-level driver function that drives the execution of the flow-add and
-    flow-delete tests.
-    """
-    global threads_done
-
-    for c in range(ncycles):
-        with print_lock:
-            print '\nCycle %d:' % c
-
-        threads = []
-        for i in range(nthreads):
-            t = threading.Thread(target=function,
-                                 args=(url, nnodes, nflows, c*nflows, i, cond))
-            threads.append(t)
-            t.start()
-
-        # Wait for all threads to finish
-        while threads_done < in_args.nthreads:
-            with cond:
-                cond.wait()
-
-        with print_lock:
-             print '    Overall success rate:  %.2f, Overall rate: %.2f' % \
-                            (ok_rate.value, total_rate.value)
-             threads_done = 0
-
-        ok_rate.value = 0
-        total_rate.value = 0
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description='Flow programming performance test: '
-                                     'First adds and then deletes flows into '
-                                     'the config tree, as specified by optional parameters.')
-    parser.add_argument('--odlhost', default='127.0.0.1', help='Host where '
-                        'odl controller is running (default is 127.0.0.1)')
-    parser.add_argument('--odlport', default='8080', help='Port on '
-                        'which odl\'s RESTCONF is listening (default is 8080)')
-    parser.add_argument('--nflows', type=int, default=10, help='Number of '
-                        'flow add/delete requests to send in  each cycle; default 10')
-    parser.add_argument('--ncycles', type=int, default=1, help='Number of '
-                        'flow add/delete cycles to send in each thread; default 1')
-    parser.add_argument('--nthreads', type=int, default=1,
-                        help='Number of request worker threads, default=1. '
-                        'Each thread will add/delete nflows.')
-    parser.add_argument('--nnodes', type=int, default=16,
-                        help='Number of nodes if mininet is not connected, default=16. '
-                        'If mininet is connected, flows will be evenly distributed '
-                        '(programmed) into connected nodes.')
-    parser.add_argument('--delete', dest='delete', action='store_true', default=True,
-                        help='Delete all added flows one by one, benchmark delete '
-                        'performance.')
-    parser.add_argument('--no-delete', dest='delete', action='store_false',
-                        help='Add flows and leave them in the config data store.')
-
-    in_args = parser.parse_args()
-
-    put_url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + PUTURL
-    del_url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + DELURL
-    get_url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + GETURL
-    inv_url = 'http://' + in_args.odlhost + ":" + in_args.odlport + '/' + INVURL
-
-    cond = threading.Condition()
-
-    # Initialize the flows array
-    for i in range(in_args.nthreads):
-        flows[i] = {}
-
-    # Run through ncycles, where nthreads are started in each cycles and
-    # nflows added from each thread
-    driver(add_flows, in_args.ncycles, in_args.nthreads, in_args.nnodes, \
-           in_args.nflows, put_url, cond, add_ok_rate, add_total_rate)
-
-
-    # Run through ncycles, where nthreads are started in each cycles and
-    # nflows added from each thread
-    if in_args.delete == True:
-        driver(delete_flows, in_args.ncycles, in_args.nthreads, in_args.nnodes, \
-               in_args.nflows, del_url, cond, del_ok_rate, del_total_rate)
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_incr_put.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_incr_put.py
deleted file mode 100644 (file)
index d22bc35..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-__author__ = "Reinaldo Penno"
-__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
-__license__ = "New-style BSD"
-__version__ = "0.1"
-__email__ = "repenno@cisco.com"
-__status__ = "alpha"
-
-
-#####
-# Incrementally PUT(s) more and more list elements up to numputreq
-# while computing the number of req/s and successful requests
-#
-# Then measure the number of GET request/sec up to numgetreq
-# while computing the number of successful requests
-# 
-# For the default values it is estimated you will consume about
-# 1.5GB of heap memory in ODL
-####
-
-
-import requests
-import time
-
-class Timer(object):
-    def __init__(self, verbose=False):
-        self.verbose = verbose
-
-    def __enter__(self):
-        self.start = time.time()
-        return self
-
-    def __exit__(self, *args):
-        self.end = time.time()
-        self.secs = self.end - self.start
-        self.msecs = self.secs * 1000  # millisecs
-        if self.verbose:
-            print ("elapsed time: %f ms" % self.msecs)
-
-# Parametrized PUT of incremental List elements            
-JSONPUT = """
-{
-  "service-function": [
-    {
-      "ip-mgmt-address": "20.0.0.11",
-      "type": "service-function:napt44",
-      "name": "%d"
-    }
-  ]
-}"""
-
-putheaders = {'content-type': 'application/json'}
-getheaders = {'Accept': 'application/json'}
-# ODL IP:port
-ODLIP   = "127.0.0.1:8080"
-# We fist delete all existing service functions
-DELURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/"
-GETURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/service-function/%d/"
-# Incremental PUT. This URL is for a list element
-PUTURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/service-function/%d/"
-
-# You probably need to adjust this number based on your OS constraints.
-# Maximum number of incremental PUT list elements
-numputreq = 1000000
-# Maximum number of GET requests
-numgetreq = 10000
-# We will present PUT reports every 10000 PUTs
-numputstep = 1000
-# We will present GET reports every 10000 PUTs
-numgetstep = 1000
-
-# Incrementally PUT list elements up to numputreq
-def putperftest():
-    s = requests.Session()
-    print ("Starting PUT Performance. Total of %d requests\n" % numputreq)
-    for numreq in range(0, numputreq, numputstep): 
-        success = 0      
-        with Timer() as t:
-            for i in range(numreq, numreq + numputstep):
-                r = s.put((PUTURL % i),data = (JSONPUT % i), headers=putheaders, stream=False )
-                if (r.status_code == 200):
-                    success+=1
-        print ("=> %d elapsed requests" % (numreq + numputstep))
-        print ("=> %d requests/s in the last %d reqs" % ((numputstep)/t.secs, numputstep))
-        print ("=> %d successful PUT requests in the last %d reqs " % (success, numputstep))
-        print ("\n")
-
-# Delete all service functions
-def delallsf():
-    print ("Deleting all Service Functions")
-    r = requests.delete(DELURL, headers=getheaders)   
-    if (r.status_code == 200) or (r.status_code == 500):
-        print ("Deleted all Service Functions \n")
-        return 0
-    else:
-        print ("Delete Failed \n")
-        exit()
-        return -1
-
-# Retrieve list elements 
-def getperftest():
-    s = requests.Session()
-    print ("Starting GET Performance. Total of %d requests \n" % numgetreq)
-    for numreq in range(0, numgetreq, numgetstep): 
-        success = 0      
-        with Timer() as t:
-            for i in range(numreq, numreq + numgetstep):
-                r = s.get((GETURL % i), stream=False )
-                if (r.status_code == 200):
-                    success+=1
-        print ("=> %d elapsed requests" % (numreq + numgetstep))
-        print ("=> %d requests/s in the last %d reqs" % ((numgetstep)/t.secs, numgetstep))
-        print ("=> %d successful GET requests in the last %d reqs " % (success, numgetstep))
-        print ("\n")
-
-
-if __name__ == "__main__":
-    delallsf()
-    putperftest()
-    getperftest()
-
-
-
-
-
-
-
diff --git a/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_oneput_ngets.py b/test/tools/odl-mdsal-clustering-tests/clustering-performance-test/restconf_oneput_ngets.py
deleted file mode 100644 (file)
index 1a06118..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-__author__ = "Reinaldo Penno"
-__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
-__license__ = "New-style BSD"
-__version__ = "0.1"
-__email__ = "repenno@cisco.com"
-__status__ = "alpha"
-
-
-import requests
-import time
-
-class Timer(object):
-    def __init__(self, verbose=False):
-        self.verbose = verbose
-
-    def __enter__(self):
-        self.start = time.time()
-        return self
-
-    def __exit__(self, *args):
-        self.end = time.time()
-        self.secs = self.end - self.start
-        self.msecs = self.secs * 1000  # millisecs
-        if self.verbose:
-            print ("elapsed time: %f ms" % self.msecs)
-            
-# Parametrized single PUT of list + one element.            
-JSONPUT = """
-{
-  "service-functions": {
-    "service-function": [
-      {
-        "ip-mgmt-address": "20.0.0.10",
-        "type": "dpi",
-        "name": "%d"
-      }
-    ]
-  }
-}"""
-
-putheaders = {'content-type': 'application/json'}
-getheaders = {'Accept': 'application/json'}
-ODLIP   = "127.0.0.1:8080"
-DELURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/"
-GETURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/"
-PUTURL  = "http://" + ODLIP + "/restconf/config/service-function:service-functions/"
-
-# You probably need to adjust this number based on your OS constraints.
-# Maximum number of incremental PUT list elements
-numputreq = 1
-# Maximum number of GET requests
-numgetreq = 100000
-# We will present PUT reports every 10000 PUTs
-numputstep = 1
-# We will present GET reports every 10000 PUTs
-numgetstep = 1000
-
-
-def getperftest():
-    s = requests.Session()
-    print ("Starting GET Performance. Total of %d requests \n" % numgetreq)
-    for numreq in range(0, numgetreq, numgetstep): 
-        success = 0      
-        with Timer() as t:
-            for i in range(numreq, numreq + numgetstep):
-                r = s.get(GETURL, stream=False )
-                if (r.status_code == 200):
-                    success+=1
-        print ("=> %d elapsed requests" % (numreq + numgetstep))
-        print ("=> %d requests/s in the last %d reqs" % ((numgetstep)/t.secs, numgetstep))
-        print ("=> %d successful GET requests in the last %d reqs " % (success, numgetstep))
-        print ("\n")
-
-# Based on default parameters performs a single PUT. Always overwrite existing elements
-def putperftest():
-    s = requests.Session()
-    print ("Starting PUT Performance. Total of %d requests\n" % numputreq)
-    for numreq in range(0, numputreq, numputstep): 
-        success = 0      
-        with Timer() as t:
-            for i in range(numreq, numreq + numputstep):
-                r = s.put(PUTURL, data = (JSONPUT % i), headers=putheaders, stream=False )
-                if (r.status_code == 200):
-                    success+=1
-        print ("=> %d elapsed requests" % (numreq + numputstep))
-        print ("=> %d requests/s in the last %d reqs" % ((numputstep)/t.secs, numputstep))
-        print ("=> %d successful PUT requests in the last %d reqs " % (success, numputstep))
-        print ("\n")
-
-# Delete all service functions
-def delallsf():
-    print ("Deleting all Service Functions")
-    r = requests.delete(DELURL, headers=getheaders)   
-    if (r.status_code == 200) or (r.status_code == 500):
-        print ("Deleted all Service Functions \n")
-        return 0
-    else:
-        print ("Delete Failed \n")
-        exit()
-        return -1
-
-if __name__ == "__main__":
-    delallsf()
-    putperftest()
-    getperftest()
-
-
-
-
-
-