2 ##############################################################################
3 # Copyright (c) 2015 Cisco Systems All rights reserved.
5 # This program and the accompanying materials are made available under the
6 # terms of the Eclipse Public License v1.0 which accompanies this distribution,
7 # and is available at http://www.eclipse.org/legal/epl-v10.html
8 ##############################################################################
10 __author__ = "Jan Medved"
11 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
12 __license__ = "Eclipse Public License v1.0"
13 __email__ = "jmedved@cisco.com"
23 def send_test_request(operation, clients, servers, payload_size, iterations):
25 Sends a request to the rpcbenchmark app to start a data store benchmark test run.
26 The rpcbenchmark app will perform the requested benchmark test and return measured
27 test execution time and RPC throughput
29 :param operation: operation type
30 :param clients: number of simulated RPC clients
31 :param servers: Number of simulated RPC servers if operation type is ROUTED-***
32 :param payload_size: Payload size for the test RPCs
33 :param iterations: Number of iterations to run
34 :return: Result from the test request REST call (json)
36 url = BASE_URL + "operations/rpcbenchmark:start-test"
37 postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
39 test_request_template = '''{
48 data = test_request_template % (operation, clients, servers, payload_size, iterations)
49 r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
50 result = {u'http-status': r.status_code}
51 if r.status_code == 200:
52 result = dict(result.items() + json.loads(r.content)['output'].items())
54 print 'Error %s, %s' % (r.status_code, r.content)
58 def print_results(run_type, idx, res):
60 Prints results from a dsbenchmakr test run to console
61 :param run_type: String parameter that can be used to identify the type of the
62 test run (e.g. WARMUP or TEST)
63 :param idx: Index of the test run
64 :param res: Parsed json (disctionary) that was returned from a dsbenchmark
68 print '%s #%d: Ok: %d, Error: %d, Rate: %d, Exec time: %d' % \
70 res[u'global-rtc-client-ok'], res[u'global-rtc-client-error'], res[u'rate'], res[u'exec-time'])
73 def run_test(warmup_runs, test_runs, operation, clients, servers, payload_size, iterations):
75 Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
76 the specified number of dsbenchmark test runs and computes the average time
77 for building the test data (a list of lists) and the average time for the
78 execution of the test.
79 :param warmup_runs: # of warmup runs
80 :param test_runs: # of test runs
81 :param operation: PUT, MERGE or DELETE
82 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
83 :param outer_elem: Number of elements in the outer list
84 :param inner_elem: Number of elements in the inner list
85 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
86 :return: average build time AND average test execution time
91 for idx in range(warmup_runs):
92 res = send_test_request(operation, clients, servers, payload_size, iterations)
93 print_results('WARM-UP', idx, res)
95 for idx in range(test_runs):
96 res = send_test_request(operation, clients, servers, payload_size, iterations)
97 print_results('TEST', idx, res)
98 total_exec_time += res['exec-time']
99 total_rate += res['rate']
101 return total_exec_time / test_runs, total_rate / test_runs
104 if __name__ == "__main__":
105 parser = argparse.ArgumentParser(description='RPC Benchmarking')
108 parser.add_argument("--host", default="localhost", help="IP of the target host where benchmarks will be run.")
109 parser.add_argument("--port", type=int, default=8181, help="The port number of target host.")
112 parser.add_argument("--operation", choices=["GLOBAL-RTC", "ROUTED-RTC"], default='GLOBAL-RTC',
113 help='RPC and client type. RPC can be global or routcan be run-to-completion (RTC).'
114 '(default: GLOBAL-RTC - Global RPC, Run-to-completion client)')
115 parser.add_argument("--warm", type=int, default=10, help='The number of warm-up runs before the measured test runs'
117 parser.add_argument("--run", type=int, default=10,
118 help='The number of measured test runs. Reported results are based on these average of all'
119 " measured runs. (Default 10)")
120 parser.add_argument("--clients", type=int, nargs='+', default=[1, 2, 4, 8, 16, 32, 64],
121 help='The number of test RPC clients to start. (Default 10)')
122 parser.add_argument("--servers", type=int, nargs='+', default=[1, 2, 4, 8, 16, 32, 64],
123 help='The number of routed RPC servers to start in the routed RPC test. Ignored in the global '
124 'RPC test. (Default 10)')
125 parser.add_argument("--iterations", type=int, default=10, help='The number requests that each RPC client issues '
126 'during the test run. (Default 10)')
127 parser.add_argument("--payload", type=int, default=10, help='Payload size for the RPC - number of elements in a '
128 'simple integer list. (Default 10)')
130 args = parser.parse_args()
131 BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
133 if args.operation == 'GLOBAL-RTC':
136 servers = args.servers
138 # Run the benchmark tests and collect data in a csv file for import into a graphing software
139 f = open('test.csv', 'wt')
141 writer = csv.writer(f)
146 for client in args.clients:
148 run_test(args.warm, args.run, args.operation, client, svr, args.payload, args.iterations)
149 rate_row.append(rate)
150 rate_matrix.append(rate_row)
153 writer.writerow(('RPC Rates:', ''))
154 writer.writerows(rate_matrix)