Auto-generated patch by python-black
[integration/test.git] / tools / mdsal_benchmark / dsbenchmark.py
1 #!/usr/bin/python
2
3 import argparse
4 import requests
5 import json
6 import csv
7 import time
8 import re
9
10
11 __author__ = "Jan Medved"
12 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
13 __license__ = "New-style BSD"
14 __email__ = "jmedved@cisco.com"
15
16
17 parser = argparse.ArgumentParser(
18     description="Datastore Benchmarking"
19     ""
20     "See documentation @:"
21     "https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns"  # noqa
22     ""
23 )
24
25 # Host Config
26 parser.add_argument(
27     "--host",
28     default="localhost",
29     help="the IP of the target host to initiate benchmark testing on.",
30 )
31 parser.add_argument(
32     "--port", type=int, default=8181, help="the port number of target host."
33 )
34
35 # Test Parameters
36 parser.add_argument(
37     "--txtype",
38     choices=["TX-CHAINING", "SIMPLE-TX"],
39     nargs="+",
40     default=["TX-CHAINING", "SIMPLE-TX"],
41     help="list of the transaction types to execute.",
42 )
43 parser.add_argument(
44     "--total", type=int, default=100000, help="total number of elements to process."
45 )
46 parser.add_argument(
47     "--inner",
48     type=int,
49     default=[1, 10, 100, 1000, 10000, 100000],
50     nargs="+",
51     help="number of inner elements to process.",
52 )
53 parser.add_argument(
54     "--ops",
55     type=int,
56     default=[1, 10, 100, 1000, 10000, 100000],
57     nargs="+",
58     help="number of operations per transaction.",
59 )
60 parser.add_argument(
61     "--optype",
62     choices=["PUT", "MERGE", "DELETE", "READ"],
63     nargs="+",
64     default=["PUT", "MERGE", "DELETE", "READ"],
65     help="list of the types operations to execute.",
66 )
67 parser.add_argument(
68     "--format",
69     choices=["BINDING-AWARE", "BINDING-INDEPENDENT"],
70     nargs="+",
71     default=["BINDING-AWARE", "BINDING-INDEPENDENT"],
72     help="list of data formats to execute.",
73 )
74 parser.add_argument(
75     "--datastore",
76     choices=["CONFIG", "OPERATIONAL", "BOTH"],
77     nargs="+",
78     default=["OPERATIONAL", "CONFIG"],
79     help="data-store type (config/operational) to use",
80 )
81 # There is also "listeners" parameter specified in the Yang file now.
82 parser.add_argument(
83     "--warmup",
84     type=int,
85     default=10,
86     help="number of warmup runs before official test runs",
87 )
88 parser.add_argument(
89     "--runs",
90     type=int,
91     default=10,
92     help="number of official test runs. Note: Reported results are based on these runs.",
93 )
94 parser.add_argument(
95     "--plot",
96     type=str,
97     default="none",
98     help="keywords filter for results to be drawn in plot (special keywords: all, none).",
99 )
100 parser.add_argument(
101     "--units",
102     choices=["miliseconds", "microseconds"],
103     default="microseconds",
104     help="units of test duration values provided by dsbenchmark controller feature",
105 )
106 parser.add_argument(
107     "--outfile-struct",
108     dest="outfilestruct",
109     default="perf_per_struct.csv",
110     help="units of test duration values provided by dsbenchmark controller feature",
111 )
112 parser.add_argument(
113     "--outfile-ops",
114     dest="outfileops",
115     default="perf_per_ops.csv",
116     help="units of test duration values provided by dsbenchmark controller feature",
117 )
118 args = parser.parse_args()
119
120
121 BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
122
123
124 def send_clear_request():
125     """
126     Sends a clear request to the dsbenchmark app. A clear will clear the test-exec data store
127     and clear the 'test-executing' flag.
128     :return: None
129     """
130     url = BASE_URL + "operations/dsbenchmark:cleanup-store"
131
132     r = requests.post(url, stream=False, auth=("admin", "admin"))
133     print(r.status_code)
134
135
136 def send_test_request(
137     tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
138 ):
139     """
140     Sends a request to the dsbenchmark app to start a data store benchmark test run.
141     The dsbenchmark app will perform the requested benchmark test and return measured
142     transaction times
143     :param operation: PUT, MERGE, DELETE or READ
144     :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
145     :param datastore: OPERATIONAL, CONFIG or BOTH
146     :param outer_elem: Number of elements in the outer list
147     :param inner_elem: Number of elements in the inner list
148     :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
149     :return:
150     """
151     url = BASE_URL + "operations/dsbenchmark:start-test"
152     postheaders = {"content-type": "application/json", "Accept": "application/json"}
153
154     test_request_template = """{
155         "input": {
156             "transaction-type": "%s",
157             "operation": "%s",
158             "data-format": "%s",
159             "data-store": "%s",
160             "outerElements": %d,
161             "innerElements": %d,
162             "putsPerTx": %d
163         }
164     }"""
165     data = test_request_template % (
166         tx_type,
167         operation,
168         data_fmt,
169         datastore,
170         outer_elem,
171         inner_elem,
172         ops_per_tx,
173     )
174     r = requests.post(
175         url, data, headers=postheaders, stream=False, auth=("admin", "admin")
176     )
177     result = {u"http-status": r.status_code}
178     if r.status_code == 200:
179         result = dict(result.items() + json.loads(r.content)["output"].items())
180     else:
181         print("Error %s, %s" % (r.status_code, r.content))
182     return result
183
184
185 def print_results(run_type, idx, res):
186     """
187     Prints results from a dsbenchmakr test run to console
188     :param run_type: String parameter that can be used to identify the type of the
189                      test run (e.g. WARMUP or TEST)
190     :param idx: Index of the test run
191     :param res: Parsed json (disctionary) that was returned from a dsbenchmark
192                 test run
193     :return: None
194     """
195     print(
196         "%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d"
197         % (
198             run_type,
199             idx,
200             res[u"status"],
201             res[u"listBuildTime"],
202             res[u"execTime"],
203             res[u"txOk"],
204             res[u"txError"],
205         )
206     )
207
208
209 def run_test(
210     warmup_runs,
211     test_runs,
212     tx_type,
213     operation,
214     data_fmt,
215     datastore,
216     outer_elem,
217     inner_elem,
218     ops_per_tx,
219 ):
220     """
221     Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
222     the specified number of dsbenchmark test runs and computes the average time
223     for building the test data (a list of lists) and the average time for the
224     execution of the test.
225     :param warmup_runs: # of warmup runs
226     :param test_runs: # of test runs
227     :param operation: PUT, MERGE or DELETE
228     :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
229     :param datastore: OPERATIONAL, CONFIG or BOTH
230     :param outer_elem: Number of elements in the outer list
231     :param inner_elem: Number of elements in the inner list
232     :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
233     :return: average build time AND average test execution time
234     """
235     total_build_time = 0.0
236     total_exec_time = 0.0
237
238     print(
239         "Tx Type:",
240         tx_type,
241         "Operation:",
242         operation,
243         "Data Format:",
244         data_fmt,
245         "Datastore:",
246         datastore,
247     )
248     print(
249         "Outer Elements:",
250         outer_elem,
251         "Inner Elements:",
252         inner_elem,
253         "PutsPerTx:",
254         ops_per_tx,
255     )
256     for idx in range(warmup_runs):
257         res = send_test_request(
258             tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
259         )
260         print_results("WARMUP", idx, res)
261
262     for idx in range(test_runs):
263         res = send_test_request(
264             tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
265         )
266         print_results("TEST", idx, res)
267         total_build_time += res["listBuildTime"]
268         total_exec_time += res["execTime"]
269
270     return total_build_time / test_runs, total_exec_time / test_runs
271
272
273 def store_result(
274     values,
275     tx_type,
276     operation,
277     data_fmt,
278     datastore,
279     outer_elem,
280     inner_elem,
281     ops_per_tx,
282     value_name,
283     value,
284 ):
285     """
286     Stores a record to the list (dictionary) of values to be written into a csv file for plotting purposes.
287     :param values: The list (dictionary) to be used for storing the result
288     :param operation: PUT, MERGE or DELETE
289     :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
290     :param datastore: OPERATIONAL, CONFIG or BOTH
291     :param outer_elem: Number of elements in the outer list
292     :param inner_elem: Number of elements in the inner list
293     :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
294     :param value_name: Value name (name for the measured value)
295     :param value: The (measured) value
296     :return: none
297     """
298     plot_key = (
299         datastore
300         + "-"
301         + data_fmt
302         + "-"
303         + tx_type
304         + "-"
305         + operation
306         + "-"
307         + str(outer_elem)
308         + "/"
309         + str(inner_elem)
310         + "OUTER/INNER-"
311         + str(ops_per_tx)
312         + "OP-"
313         + value_name
314     )
315     values[plot_key] = value
316
317
318 def write_results_to_file(values, file_name, key_filter):
319     """
320     Writes specified results (values) into the file (file_name). Results are filtered according to key_filter value.
321     Produces a csv file consumable by Jnekins integration environment.
322     :param file_name: Name of the (csv) file to be created
323     :param values: The list (dictionary) to be written into the file
324     :param key_filter: A regexp string to filter the results to be finally put into the file
325     :return: none
326     """
327     first_line = ""
328     second_line = ""
329     f = open(file_name, "wt")
330     try:
331         for key in sorted(values):
332             if (key_filter != "none") & (
333                 (key_filter == "all") | (re.search(key_filter, key) is not None)
334             ):
335                 first_line += key + ","
336                 second_line += str(values[key]) + ","
337         first_line = first_line[:-1]
338         second_line = second_line[:-1]
339         f.write(first_line + "\n")
340         f.write(second_line + "\n")
341     finally:
342         f.close()
343
344
345 if __name__ == "__main__":
346     # Test Parameters
347     TX_TYPES = args.txtype
348     TOTAL_ELEMENTS = args.total
349     INNER_ELEMENTS = args.inner
350     OPS_PER_TX = args.ops
351     OPERATIONS = args.optype
352     DATA_FORMATS = args.format
353     DATASTORES = args.datastore
354     PLOT_FILTER = args.plot
355     if args.units == "miliseconds":
356         TIME_DIV = 1
357     else:
358         TIME_DIV = 1000
359
360     # Dictionaries for storing keys & values to plot
361     PLOT1 = {}
362     PLOT2 = {}
363
364     # Iterations
365     WARMUP_RUNS = args.warmup
366     TEST_RUNS = args.runs
367
368     # Clean up any data that may be present in the data store
369     send_clear_request()
370
371     # Run the benchmark tests and collect data in a csv file for import into a graphing software
372     f = open("test.csv", "wt")
373     try:
374         start_time = time.time()
375         print("Start time: %f " % (start_time))
376
377         writer = csv.writer(f)
378
379         # Determine the impact of transaction type, data format and data structure on performance.
380         # Iterate over all transaction types, data formats, operation types, and different
381         # list-of-lists layouts; always use a single operation in each transaction
382         print("\n#######################################")
383         print("Tx type, data format & data structure")
384         print("#######################################")
385         for tx_type in TX_TYPES:
386             print("***************************************")
387             print("Transaction Type: %s" % tx_type)
388             print("***************************************")
389             writer.writerow((("%s:" % tx_type), "", ""))
390
391             for fmt in DATA_FORMATS:
392                 print("---------------------------------------")
393                 print("Data format: %s" % fmt)
394                 print("---------------------------------------")
395                 writer.writerow(("", ("%s:" % fmt), ""))
396
397                 for datastore in DATASTORES:
398                     print
399                     print("Data store: %s" % datastore)
400                     print
401
402                     for oper in OPERATIONS:
403                         print("Operation: %s" % oper)
404                         writer.writerow(("", "", "%s:" % oper))
405
406                         for elem in INNER_ELEMENTS:
407                             avg_build_time, avg_exec_time = run_test(
408                                 WARMUP_RUNS,
409                                 TEST_RUNS,
410                                 tx_type,
411                                 oper,
412                                 fmt,
413                                 datastore,
414                                 TOTAL_ELEMENTS / elem,
415                                 elem,
416                                 1,
417                             )
418                             e_label = "%d/%d" % (TOTAL_ELEMENTS / elem, elem)
419                             writer.writerow(
420                                 (
421                                     "",
422                                     "",
423                                     "",
424                                     e_label,
425                                     avg_build_time,
426                                     avg_exec_time,
427                                     (avg_build_time + avg_exec_time),
428                                 )
429                             )
430                             store_result(
431                                 PLOT1,
432                                 tx_type,
433                                 oper,
434                                 fmt,
435                                 datastore,
436                                 TOTAL_ELEMENTS / elem,
437                                 elem,
438                                 1,
439                                 "BUILD",
440                                 avg_build_time / TIME_DIV,
441                             )
442                             store_result(
443                                 PLOT1,
444                                 tx_type,
445                                 oper,
446                                 fmt,
447                                 datastore,
448                                 TOTAL_ELEMENTS / elem,
449                                 elem,
450                                 1,
451                                 "EXEC",
452                                 avg_exec_time / TIME_DIV,
453                             )
454
455         # Determine the impact of number of writes per transaction on performance.
456         # Iterate over all transaction types, data formats, operation types, and
457         # operations-per-transaction; always use a list of lists where the inner list has one parameter
458         print("\n#######################################")
459         print("Puts per tx")
460         print("#######################################")
461         for tx_type in TX_TYPES:
462             print("***************************************")
463             print("Transaction Type: %s" % tx_type)
464             print("***************************************")
465             writer.writerow((("%s:" % tx_type), "", ""))
466
467             for fmt in DATA_FORMATS:
468                 print("---------------------------------------")
469                 print("Data format: %s" % fmt)
470                 print("---------------------------------------")
471                 writer.writerow(("", ("%s:" % fmt), ""))
472
473                 for datastore in DATASTORES:
474                     print
475                     print("Data store: %s" % datastore)
476                     print
477
478                     for oper in OPERATIONS:
479                         print("Operation: %s" % oper)
480                         writer.writerow(("", "", "%s:" % oper))
481
482                         for wtx in OPS_PER_TX:
483                             avg_build_time, avg_exec_time = run_test(
484                                 WARMUP_RUNS,
485                                 TEST_RUNS,
486                                 tx_type,
487                                 oper,
488                                 fmt,
489                                 datastore,
490                                 TOTAL_ELEMENTS,
491                                 1,
492                                 wtx,
493                             )
494                             writer.writerow(
495                                 (
496                                     "",
497                                     "",
498                                     "",
499                                     wtx,
500                                     avg_build_time,
501                                     avg_exec_time,
502                                     (avg_build_time + avg_exec_time),
503                                 )
504                             )
505                             store_result(
506                                 PLOT2,
507                                 tx_type,
508                                 oper,
509                                 fmt,
510                                 datastore,
511                                 TOTAL_ELEMENTS / elem,
512                                 1,
513                                 wtx,
514                                 "BUILD",
515                                 avg_build_time / TIME_DIV,
516                             )
517                             store_result(
518                                 PLOT2,
519                                 tx_type,
520                                 oper,
521                                 fmt,
522                                 datastore,
523                                 TOTAL_ELEMENTS / elem,
524                                 1,
525                                 wtx,
526                                 "EXEC",
527                                 avg_exec_time / TIME_DIV,
528                             )
529
530         write_results_to_file(PLOT1, args.outfilestruct, PLOT_FILTER)
531         write_results_to_file(PLOT2, args.outfileops, PLOT_FILTER)
532
533         end_time = time.time()
534         print("End time: %f " % (end_time))
535         print("Total execution time: %f" % ((end_time - start_time)))
536
537     finally:
538         f.close()