11 __author__ = "Jan Medved"
12 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
13 __license__ = "New-style BSD"
14 __email__ = "jmedved@cisco.com"
17 parser = argparse.ArgumentParser(
18 description="Datastore Benchmarking"
20 "See documentation @:"
21 "https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns" # noqa
29 help="the IP of the target host to initiate benchmark testing on.",
32 "--port", type=int, default=8181, help="the port number of target host."
38 choices=["TX-CHAINING", "SIMPLE-TX"],
40 default=["TX-CHAINING", "SIMPLE-TX"],
41 help="list of the transaction types to execute.",
44 "--total", type=int, default=100000, help="total number of elements to process."
49 default=[1, 10, 100, 1000, 10000, 100000],
51 help="number of inner elements to process.",
56 default=[1, 10, 100, 1000, 10000, 100000],
58 help="number of operations per transaction.",
62 choices=["PUT", "MERGE", "DELETE", "READ"],
64 default=["PUT", "MERGE", "DELETE", "READ"],
65 help="list of the types operations to execute.",
69 choices=["BINDING-AWARE", "BINDING-INDEPENDENT"],
71 default=["BINDING-AWARE", "BINDING-INDEPENDENT"],
72 help="list of data formats to execute.",
76 choices=["CONFIG", "OPERATIONAL", "BOTH"],
78 default=["OPERATIONAL", "CONFIG"],
79 help="data-store type (config/operational) to use",
81 # There is also "listeners" parameter specified in the Yang file now.
86 help="number of warmup runs before official test runs",
92 help="number of official test runs. Note: Reported results are based on these runs.",
98 help="keywords filter for results to be drawn in plot (special keywords: all, none).",
102 choices=["miliseconds", "microseconds"],
103 default="microseconds",
104 help="units of test duration values provided by dsbenchmark controller feature",
108 dest="outfilestruct",
109 default="perf_per_struct.csv",
110 help="units of test duration values provided by dsbenchmark controller feature",
115 default="perf_per_ops.csv",
116 help="units of test duration values provided by dsbenchmark controller feature",
118 args = parser.parse_args()
121 BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
124 def send_clear_request():
126 Sends a clear request to the dsbenchmark app. A clear will clear the test-exec data store
127 and clear the 'test-executing' flag.
130 url = BASE_URL + "operations/dsbenchmark:cleanup-store"
132 r = requests.post(url, stream=False, auth=("admin", "admin"))
136 def send_test_request(
137 tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
140 Sends a request to the dsbenchmark app to start a data store benchmark test run.
141 The dsbenchmark app will perform the requested benchmark test and return measured
143 :param operation: PUT, MERGE, DELETE or READ
144 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
145 :param datastore: OPERATIONAL, CONFIG or BOTH
146 :param outer_elem: Number of elements in the outer list
147 :param inner_elem: Number of elements in the inner list
148 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
151 url = BASE_URL + "operations/dsbenchmark:start-test"
152 postheaders = {"content-type": "application/json", "Accept": "application/json"}
154 test_request_template = """{
156 "transaction-type": "%s",
165 data = test_request_template % (
175 url, data, headers=postheaders, stream=False, auth=("admin", "admin")
177 result = {u"http-status": r.status_code}
178 if r.status_code == 200:
179 result = dict(result.items() + json.loads(r.content)["output"].items())
181 print("Error %s, %s" % (r.status_code, r.content))
185 def print_results(run_type, idx, res):
187 Prints results from a dsbenchmakr test run to console
188 :param run_type: String parameter that can be used to identify the type of the
189 test run (e.g. WARMUP or TEST)
190 :param idx: Index of the test run
191 :param res: Parsed json (disctionary) that was returned from a dsbenchmark
196 "%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d"
201 res[u"listBuildTime"],
221 Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
222 the specified number of dsbenchmark test runs and computes the average time
223 for building the test data (a list of lists) and the average time for the
224 execution of the test.
225 :param warmup_runs: # of warmup runs
226 :param test_runs: # of test runs
227 :param operation: PUT, MERGE or DELETE
228 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
229 :param datastore: OPERATIONAL, CONFIG or BOTH
230 :param outer_elem: Number of elements in the outer list
231 :param inner_elem: Number of elements in the inner list
232 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
233 :return: average build time AND average test execution time
235 total_build_time = 0.0
236 total_exec_time = 0.0
256 for idx in range(warmup_runs):
257 res = send_test_request(
258 tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
260 print_results("WARMUP", idx, res)
262 for idx in range(test_runs):
263 res = send_test_request(
264 tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
266 print_results("TEST", idx, res)
267 total_build_time += res["listBuildTime"]
268 total_exec_time += res["execTime"]
270 return total_build_time / test_runs, total_exec_time / test_runs
286 Stores a record to the list (dictionary) of values to be written into a csv file for plotting purposes.
287 :param values: The list (dictionary) to be used for storing the result
288 :param operation: PUT, MERGE or DELETE
289 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
290 :param datastore: OPERATIONAL, CONFIG or BOTH
291 :param outer_elem: Number of elements in the outer list
292 :param inner_elem: Number of elements in the inner list
293 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
294 :param value_name: Value name (name for the measured value)
295 :param value: The (measured) value
315 values[plot_key] = value
318 def write_results_to_file(values, file_name, key_filter):
320 Writes specified results (values) into the file (file_name). Results are filtered according to key_filter value.
321 Produces a csv file consumable by Jnekins integration environment.
322 :param file_name: Name of the (csv) file to be created
323 :param values: The list (dictionary) to be written into the file
324 :param key_filter: A regexp string to filter the results to be finally put into the file
329 f = open(file_name, "wt")
331 for key in sorted(values):
332 if (key_filter != "none") & (
333 (key_filter == "all") | (re.search(key_filter, key) is not None)
335 first_line += key + ","
336 second_line += str(values[key]) + ","
337 first_line = first_line[:-1]
338 second_line = second_line[:-1]
339 f.write(first_line + "\n")
340 f.write(second_line + "\n")
345 if __name__ == "__main__":
347 TX_TYPES = args.txtype
348 TOTAL_ELEMENTS = args.total
349 INNER_ELEMENTS = args.inner
350 OPS_PER_TX = args.ops
351 OPERATIONS = args.optype
352 DATA_FORMATS = args.format
353 DATASTORES = args.datastore
354 PLOT_FILTER = args.plot
355 if args.units == "miliseconds":
360 # Dictionaries for storing keys & values to plot
365 WARMUP_RUNS = args.warmup
366 TEST_RUNS = args.runs
368 # Clean up any data that may be present in the data store
371 # Run the benchmark tests and collect data in a csv file for import into a graphing software
372 f = open("test.csv", "wt")
374 start_time = time.time()
375 print("Start time: %f " % (start_time))
377 writer = csv.writer(f)
379 # Determine the impact of transaction type, data format and data structure on performance.
380 # Iterate over all transaction types, data formats, operation types, and different
381 # list-of-lists layouts; always use a single operation in each transaction
382 print("\n#######################################")
383 print("Tx type, data format & data structure")
384 print("#######################################")
385 for tx_type in TX_TYPES:
386 print("***************************************")
387 print("Transaction Type: %s" % tx_type)
388 print("***************************************")
389 writer.writerow((("%s:" % tx_type), "", ""))
391 for fmt in DATA_FORMATS:
392 print("---------------------------------------")
393 print("Data format: %s" % fmt)
394 print("---------------------------------------")
395 writer.writerow(("", ("%s:" % fmt), ""))
397 for datastore in DATASTORES:
399 print("Data store: %s" % datastore)
402 for oper in OPERATIONS:
403 print("Operation: %s" % oper)
404 writer.writerow(("", "", "%s:" % oper))
406 for elem in INNER_ELEMENTS:
407 avg_build_time, avg_exec_time = run_test(
414 TOTAL_ELEMENTS / elem,
418 e_label = "%d/%d" % (TOTAL_ELEMENTS / elem, elem)
427 (avg_build_time + avg_exec_time),
436 TOTAL_ELEMENTS / elem,
440 avg_build_time / TIME_DIV,
448 TOTAL_ELEMENTS / elem,
452 avg_exec_time / TIME_DIV,
455 # Determine the impact of number of writes per transaction on performance.
456 # Iterate over all transaction types, data formats, operation types, and
457 # operations-per-transaction; always use a list of lists where the inner list has one parameter
458 print("\n#######################################")
460 print("#######################################")
461 for tx_type in TX_TYPES:
462 print("***************************************")
463 print("Transaction Type: %s" % tx_type)
464 print("***************************************")
465 writer.writerow((("%s:" % tx_type), "", ""))
467 for fmt in DATA_FORMATS:
468 print("---------------------------------------")
469 print("Data format: %s" % fmt)
470 print("---------------------------------------")
471 writer.writerow(("", ("%s:" % fmt), ""))
473 for datastore in DATASTORES:
475 print("Data store: %s" % datastore)
478 for oper in OPERATIONS:
479 print("Operation: %s" % oper)
480 writer.writerow(("", "", "%s:" % oper))
482 for wtx in OPS_PER_TX:
483 avg_build_time, avg_exec_time = run_test(
502 (avg_build_time + avg_exec_time),
511 TOTAL_ELEMENTS / elem,
515 avg_build_time / TIME_DIV,
523 TOTAL_ELEMENTS / elem,
527 avg_exec_time / TIME_DIV,
530 write_results_to_file(PLOT1, args.outfilestruct, PLOT_FILTER)
531 write_results_to_file(PLOT2, args.outfileops, PLOT_FILTER)
533 end_time = time.time()
534 print("End time: %f " % (end_time))
535 print("Total execution time: %f" % ((end_time - start_time)))