2 ##############################################################################
3 # Copyright (c) 2015 Cisco Systems All rights reserved.
5 # This program and the accompanying materials are made available under the
6 # terms of the Eclipse Public License v1.0 which accompanies this distribution,
7 # and is available at http://www.eclipse.org/legal/epl-v10.html
8 ##############################################################################
16 __author__ = "Jan Medved"
17 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
18 __license__ = "Eclipse Public License v1.0"
19 __email__ = "jmedved@cisco.com"
25 def send_test_request(producer_type, producers, listeners, payload_size, iterations):
27 Sends a request to the rpcbenchmark app to start a data store benchmark test run.
28 The rpcbenchmark app will perform the requested benchmark test and return measured
29 test execution time and RPC throughput
31 :param operation: operation type
32 :param clients: number of simulated RPC clients
33 :param servers: Number of simulated RPC servers if operation type is ROUTED-***
34 :param payload_size: Payload size for the test RPCs
35 :param iterations: Number of iterations to run
36 :return: Result from the test request REST call (json)
38 url = BASE_URL + "operations/ntfbenchmark:start-test"
39 postheaders = {"content-type": "application/json", "Accept": "application/json"}
41 test_request_template = """{
43 "producer-type": "%s",
50 data = test_request_template % (
58 url, data, headers=postheaders, stream=False, auth=("admin", "admin")
60 result = {"http-status": r.status_code}
61 if r.status_code == 200:
62 result = dict(result.items() + json.loads(r.content)["output"].items())
64 print("Error %s, %s" % (r.status_code, r.content))
68 def print_results(run_type, idx, res):
70 Prints results from a dsbenchmakr test run to console
71 :param run_type: String parameter that can be used to identify the type of the
72 test run (e.g. WARMUP or TEST)
73 :param idx: Index of the test run
74 :param res: Parsed json (disctionary) that was returned from a dsbenchmark
79 "%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d"
84 res["producer-error"],
88 res["producer-elapsed-time"],
89 res["listener-elapsed-time"],
104 Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
105 the specified number of dsbenchmark test runs and computes the average time
106 for building the test data (a list of lists) and the average time for the
107 execution of the test.
108 :param warmup_runs: # of warmup runs
109 :param test_runs: # of test runs
110 :param operation: PUT, MERGE or DELETE
111 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
112 :param outer_elem: Number of elements in the outer list
113 :param inner_elem: Number of elements in the inner list
114 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
115 :return: average build time AND average test execution time
117 total_exec_time = 0.0
121 for idx in range(warmup_runs):
122 res = send_test_request(
123 producer_type, producers, listeners, payload_size, iterations
125 print_results("WARM-UP", idx, res)
127 for idx in range(test_runs):
128 res = send_test_request(
129 producer_type, producers, listeners, payload_size, iterations
131 print_results("TEST", idx, res)
132 total_exec_time += res["listener-elapsed-time"]
133 total_prate += res["producer-rate"]
134 total_lrate += res["listener-rate"]
136 return total_exec_time / test_runs, total_prate / test_runs, total_lrate / test_runs
139 if __name__ == "__main__":
140 parser = argparse.ArgumentParser(description="RPC Benchmarking")
146 help="IP of the target host where benchmarks will be run.",
149 "--port", type=int, default=8181, help="The port number of target host."
155 choices=["DROPPING", "BLOCKING"],
158 help="Producer type. (default: BLOCKING)",
164 help="The number of warm-up runs before the measured test runs" "(Default 10)",
170 help="The number of measured test runs. Reported results are based on these average of all"
171 " measured runs. (Default 10)",
177 default=[1, 2, 4, 8, 16, 32],
178 help="The number of test producers to start. (Default 10)",
184 default=[1, 2, 4, 8, 16, 32],
185 help="The number of test listeners to start. (Default 10)",
191 help="The number requests that each producer issues "
192 "during the test run. (Default 10)",
198 help="Payload size for the RPC - number of elements in a "
199 "simple integer list. (Default 10)",
202 args = parser.parse_args()
203 BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
205 # Run the benchmark tests and collect data in a csv file for import into a graphing software
206 f = open("test.csv", "wt")
208 writer = csv.writer(f)
211 for prod in args.producers:
214 for lis in args.listeners:
215 exec_time, prate, lrate = run_test(
225 "Producers: %d, Listeners: %d, prate: %d, lrate: %d"
226 % (prod, lis, prate, lrate)
228 lrate_row.append(lrate)
229 prate_row.append(prate)
231 lrate_matrix.append(lrate_row)
232 prate_matrix.append(prate_row)
237 # writer.writerow((('%s:' % args.ptype), '', '', ''))
238 # writer.writerow(('', exec_time, prate, lrate))
240 writer.writerow(("Listener Rates:", ""))
241 writer.writerows(lrate_matrix)
242 writer.writerow(("Producer Rates:", ""))
243 writer.writerows(prate_matrix)