Bump pre-commit black to 22.1.0
[integration/test.git] / tools / mdsal_benchmark / dsbenchmark.py
index 5e7b553c0c8a44f6d0c9d55b9c7ed202e5dfb10d..4dbc004d34cb7bab12e281e96a4e5ee3b872f989 100755 (executable)
@@ -14,42 +14,107 @@ __license__ = "New-style BSD"
 __email__ = "jmedved@cisco.com"
 
 
-parser = argparse.ArgumentParser(description='Datastore Benchmarking'
-                                             ''
-                                             'See documentation @:'
-                                             'https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns'  # noqa
-                                             '')
+parser = argparse.ArgumentParser(
+    description="Datastore Benchmarking"
+    ""
+    "See documentation @:"
+    "https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns"  # noqa
+    ""
+)
 
 # Host Config
-parser.add_argument("--host", default="localhost", help="the IP of the target host to initiate benchmark testing on.")
-parser.add_argument("--port", type=int, default=8181, help="the port number of target host.")
+parser.add_argument(
+    "--host",
+    default="localhost",
+    help="the IP of the target host to initiate benchmark testing on.",
+)
+parser.add_argument(
+    "--port", type=int, default=8181, help="the port number of target host."
+)
 
 # Test Parameters
-parser.add_argument("--txtype", choices=["TX-CHAINING", "SIMPLE-TX"], nargs='+', default=["TX-CHAINING", "SIMPLE-TX"],
-                    help="list of the transaction types to execute.")
-parser.add_argument("--total", type=int, default=100000, help="total number of elements to process.")
-parser.add_argument("--inner", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
-                    help="number of inner elements to process.")
-parser.add_argument("--ops", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
-                    help="number of operations per transaction.")
-parser.add_argument("--optype", choices=["PUT", "MERGE", "DELETE", "READ"], nargs='+',
-                    default=["PUT", "MERGE", "DELETE", "READ"], help="list of the types operations to execute.")
-parser.add_argument("--format", choices=["BINDING-AWARE", "BINDING-INDEPENDENT"], nargs='+',
-                    default=["BINDING-AWARE", "BINDING-INDEPENDENT"], help="list of data formats to execute.")
-parser.add_argument("--datastore", choices=["CONFIG", "OPERATIONAL", "BOTH"], nargs='+',
-                    default=["OPERATIONAL", "CONFIG"], help="data-store type (config/operational) to use")
+parser.add_argument(
+    "--txtype",
+    choices=["TX-CHAINING", "SIMPLE-TX"],
+    nargs="+",
+    default=["TX-CHAINING", "SIMPLE-TX"],
+    help="list of the transaction types to execute.",
+)
+parser.add_argument(
+    "--total", type=int, default=100000, help="total number of elements to process."
+)
+parser.add_argument(
+    "--inner",
+    type=int,
+    default=[1, 10, 100, 1000, 10000, 100000],
+    nargs="+",
+    help="number of inner elements to process.",
+)
+parser.add_argument(
+    "--ops",
+    type=int,
+    default=[1, 10, 100, 1000, 10000, 100000],
+    nargs="+",
+    help="number of operations per transaction.",
+)
+parser.add_argument(
+    "--optype",
+    choices=["PUT", "MERGE", "DELETE", "READ"],
+    nargs="+",
+    default=["PUT", "MERGE", "DELETE", "READ"],
+    help="list of the types operations to execute.",
+)
+parser.add_argument(
+    "--format",
+    choices=["BINDING-AWARE", "BINDING-INDEPENDENT"],
+    nargs="+",
+    default=["BINDING-AWARE", "BINDING-INDEPENDENT"],
+    help="list of data formats to execute.",
+)
+parser.add_argument(
+    "--datastore",
+    choices=["CONFIG", "OPERATIONAL", "BOTH"],
+    nargs="+",
+    default=["OPERATIONAL", "CONFIG"],
+    help="data-store type (config/operational) to use",
+)
 # There is also "listeners" parameter specified in the Yang file now.
-parser.add_argument("--warmup", type=int, default=10, help="number of warmup runs before official test runs")
-parser.add_argument("--runs", type=int, default=10,
-                    help="number of official test runs. Note: Reported results are based on these runs.")
-parser.add_argument("--plot", type=str, default='none',
-                    help="keywords filter for results to be drawn in plot (special keywords: all, none).")
-parser.add_argument("--units", choices=["miliseconds", "microseconds"], default="microseconds",
-                    help="units of test duration values provided by dsbenchmark controller feature")
-parser.add_argument("--outfile-struct", dest="outfilestruct", default="perf_per_struct.csv",
-                    help="units of test duration values provided by dsbenchmark controller feature")
-parser.add_argument("--outfile-ops", dest="outfileops", default="perf_per_ops.csv",
-                    help="units of test duration values provided by dsbenchmark controller feature")
+parser.add_argument(
+    "--warmup",
+    type=int,
+    default=10,
+    help="number of warmup runs before official test runs",
+)
+parser.add_argument(
+    "--runs",
+    type=int,
+    default=10,
+    help="number of official test runs. Note: Reported results are based on these runs.",
+)
+parser.add_argument(
+    "--plot",
+    type=str,
+    default="none",
+    help="keywords filter for results to be drawn in plot (special keywords: all, none).",
+)
+parser.add_argument(
+    "--units",
+    choices=["miliseconds", "microseconds"],
+    default="microseconds",
+    help="units of test duration values provided by dsbenchmark controller feature",
+)
+parser.add_argument(
+    "--outfile-struct",
+    dest="outfilestruct",
+    default="perf_per_struct.csv",
+    help="units of test duration values provided by dsbenchmark controller feature",
+)
+parser.add_argument(
+    "--outfile-ops",
+    dest="outfileops",
+    default="perf_per_ops.csv",
+    help="units of test duration values provided by dsbenchmark controller feature",
+)
 args = parser.parse_args()
 
 
@@ -64,11 +129,13 @@ def send_clear_request():
     """
     url = BASE_URL + "operations/dsbenchmark:cleanup-store"
 
-    r = requests.post(url, stream=False, auth=('admin', 'admin'))
+    r = requests.post(url, stream=False, auth=("admin", "admin"))
     print(r.status_code)
 
 
-def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
+def send_test_request(
+    tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
+):
     """
     Sends a request to the dsbenchmark app to start a data store benchmark test run.
     The dsbenchmark app will perform the requested benchmark test and return measured
@@ -82,9 +149,9 @@ def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner
     :return:
     """
     url = BASE_URL + "operations/dsbenchmark:start-test"
-    postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
+    postheaders = {"content-type": "application/json", "Accept": "application/json"}
 
-    test_request_template = '''{
+    test_request_template = """{
         "input": {
             "transaction-type": "%s",
             "operation": "%s",
@@ -94,14 +161,24 @@ def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner
             "innerElements": %d,
             "putsPerTx": %d
         }
-    }'''
-    data = test_request_template % (tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
-    r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
-    result = {u'http-status': r.status_code}
+    }"""
+    data = test_request_template % (
+        tx_type,
+        operation,
+        data_fmt,
+        datastore,
+        outer_elem,
+        inner_elem,
+        ops_per_tx,
+    )
+    r = requests.post(
+        url, data, headers=postheaders, stream=False, auth=("admin", "admin")
+    )
+    result = {"http-status": r.status_code}
     if r.status_code == 200:
-        result = dict(result.items() + json.loads(r.content)['output'].items())
+        result = dict(result.items() + json.loads(r.content)["output"].items())
     else:
-        print('Error %s, %s' % (r.status_code, r.content))
+        print("Error %s, %s" % (r.status_code, r.content))
     return result
 
 
@@ -115,11 +192,31 @@ def print_results(run_type, idx, res):
                 test run
     :return: None
     """
-    print('%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' %
-          (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError']))
-
-
-def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
+    print(
+        "%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d"
+        % (
+            run_type,
+            idx,
+            res["status"],
+            res["listBuildTime"],
+            res["execTime"],
+            res["txOk"],
+            res["txError"],
+        )
+    )
+
+
+def run_test(
+    warmup_runs,
+    test_runs,
+    tx_type,
+    operation,
+    data_fmt,
+    datastore,
+    outer_elem,
+    inner_elem,
+    ops_per_tx,
+):
     """
     Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
     the specified number of dsbenchmark test runs and computes the average time
@@ -138,23 +235,53 @@ def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, ou
     total_build_time = 0.0
     total_exec_time = 0.0
 
-    print("Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore,)
-    print("Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx)
+    print(
+        "Tx Type:",
+        tx_type,
+        "Operation:",
+        operation,
+        "Data Format:",
+        data_fmt,
+        "Datastore:",
+        datastore,
+    )
+    print(
+        "Outer Elements:",
+        outer_elem,
+        "Inner Elements:",
+        inner_elem,
+        "PutsPerTx:",
+        ops_per_tx,
+    )
     for idx in range(warmup_runs):
-        res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
-        print_results('WARMUP', idx, res)
+        res = send_test_request(
+            tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
+        )
+        print_results("WARMUP", idx, res)
 
     for idx in range(test_runs):
-        res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
-        print_results('TEST', idx, res)
-        total_build_time += res['listBuildTime']
-        total_exec_time += res['execTime']
+        res = send_test_request(
+            tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx
+        )
+        print_results("TEST", idx, res)
+        total_build_time += res["listBuildTime"]
+        total_exec_time += res["execTime"]
 
     return total_build_time / test_runs, total_exec_time / test_runs
 
 
-def store_result(values, tx_type, operation, data_fmt, datastore,
-                 outer_elem, inner_elem, ops_per_tx, value_name, value):
+def store_result(
+    values,
+    tx_type,
+    operation,
+    data_fmt,
+    datastore,
+    outer_elem,
+    inner_elem,
+    ops_per_tx,
+    value_name,
+    value,
+):
     """
     Stores a record to the list (dictionary) of values to be written into a csv file for plotting purposes.
     :param values: The list (dictionary) to be used for storing the result
@@ -168,8 +295,23 @@ def store_result(values, tx_type, operation, data_fmt, datastore,
     :param value: The (measured) value
     :return: none
     """
-    plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/'
-                + str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
+    plot_key = (
+        datastore
+        + "-"
+        + data_fmt
+        + "-"
+        + tx_type
+        + "-"
+        + operation
+        + "-"
+        + str(outer_elem)
+        + "/"
+        + str(inner_elem)
+        + "OUTER/INNER-"
+        + str(ops_per_tx)
+        + "OP-"
+        + value_name
+    )
     values[plot_key] = value
 
 
@@ -182,18 +324,20 @@ def write_results_to_file(values, file_name, key_filter):
     :param key_filter: A regexp string to filter the results to be finally put into the file
     :return: none
     """
-    first_line = ''
-    second_line = ''
-    f = open(file_name, 'wt')
+    first_line = ""
+    second_line = ""
+    f = open(file_name, "wt")
     try:
         for key in sorted(values):
-            if (key_filter != 'none') & ((key_filter == 'all') | (re.search(key_filter, key) is not None)):
-                first_line += key + ','
-                second_line += str(values[key]) + ','
+            if (key_filter != "none") & (
+                (key_filter == "all") | (re.search(key_filter, key) is not None)
+            ):
+                first_line += key + ","
+                second_line += str(values[key]) + ","
         first_line = first_line[:-1]
         second_line = second_line[:-1]
-        f.write(first_line + '\n')
-        f.write(second_line + '\n')
+        f.write(first_line + "\n")
+        f.write(second_line + "\n")
     finally:
         f.close()
 
@@ -208,7 +352,7 @@ if __name__ == "__main__":
     DATA_FORMATS = args.format
     DATASTORES = args.datastore
     PLOT_FILTER = args.plot
-    if args.units == 'miliseconds':
+    if args.units == "miliseconds":
         TIME_DIV = 1
     else:
         TIME_DIV = 1000
@@ -225,7 +369,7 @@ if __name__ == "__main__":
     send_clear_request()
 
     # Run the benchmark tests and collect data in a csv file for import into a graphing software
-    f = open('test.csv', 'wt')
+    f = open("test.csv", "wt")
     try:
         start_time = time.time()
         print("Start time: %f " % (start_time))
@@ -235,77 +379,153 @@ if __name__ == "__main__":
         # Determine the impact of transaction type, data format and data structure on performance.
         # Iterate over all transaction types, data formats, operation types, and different
         # list-of-lists layouts; always use a single operation in each transaction
-        print('\n#######################################')
-        print('Tx type, data format & data structure')
-        print('#######################################')
+        print("\n#######################################")
+        print("Tx type, data format & data structure")
+        print("#######################################")
         for tx_type in TX_TYPES:
-            print('***************************************')
-            print('Transaction Type: %s' % tx_type)
-            print('***************************************')
-            writer.writerow((('%s:' % tx_type), '', ''))
+            print("***************************************")
+            print("Transaction Type: %s" % tx_type)
+            print("***************************************")
+            writer.writerow((("%s:" % tx_type), "", ""))
 
             for fmt in DATA_FORMATS:
-                print('---------------------------------------')
-                print('Data format: %s' % fmt)
-                print('---------------------------------------')
-                writer.writerow(('', ('%s:' % fmt), ''))
+                print("---------------------------------------")
+                print("Data format: %s" % fmt)
+                print("---------------------------------------")
+                writer.writerow(("", ("%s:" % fmt), ""))
 
                 for datastore in DATASTORES:
                     print
-                    print('Data store: %s' % datastore)
+                    print("Data store: %s" % datastore)
                     print
 
                     for oper in OPERATIONS:
-                        print('Operation: %s' % oper)
-                        writer.writerow(('', '', '%s:' % oper))
+                        print("Operation: %s" % oper)
+                        writer.writerow(("", "", "%s:" % oper))
 
                         for elem in INNER_ELEMENTS:
-                            avg_build_time, avg_exec_time = run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt,
-                                                                     datastore, TOTAL_ELEMENTS / elem, elem, 1)
-                            e_label = '%d/%d' % (TOTAL_ELEMENTS / elem, elem)
-                            writer.writerow(('', '', '', e_label, avg_build_time, avg_exec_time,
-                                             (avg_build_time + avg_exec_time)))
-                            store_result(PLOT1, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, elem, 1,
-                                         'BUILD', avg_build_time / TIME_DIV)
-                            store_result(PLOT1, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, elem, 1,
-                                         'EXEC', avg_exec_time / TIME_DIV)
+                            avg_build_time, avg_exec_time = run_test(
+                                WARMUP_RUNS,
+                                TEST_RUNS,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                elem,
+                                1,
+                            )
+                            e_label = "%d/%d" % (TOTAL_ELEMENTS / elem, elem)
+                            writer.writerow(
+                                (
+                                    "",
+                                    "",
+                                    "",
+                                    e_label,
+                                    avg_build_time,
+                                    avg_exec_time,
+                                    (avg_build_time + avg_exec_time),
+                                )
+                            )
+                            store_result(
+                                PLOT1,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                elem,
+                                1,
+                                "BUILD",
+                                avg_build_time / TIME_DIV,
+                            )
+                            store_result(
+                                PLOT1,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                elem,
+                                1,
+                                "EXEC",
+                                avg_exec_time / TIME_DIV,
+                            )
 
         # Determine the impact of number of writes per transaction on performance.
         # Iterate over all transaction types, data formats, operation types, and
         # operations-per-transaction; always use a list of lists where the inner list has one parameter
-        print('\n#######################################')
-        print('Puts per tx')
-        print('#######################################')
+        print("\n#######################################")
+        print("Puts per tx")
+        print("#######################################")
         for tx_type in TX_TYPES:
-            print('***************************************')
-            print('Transaction Type: %s' % tx_type)
-            print('***************************************')
-            writer.writerow((('%s:' % tx_type), '', ''))
+            print("***************************************")
+            print("Transaction Type: %s" % tx_type)
+            print("***************************************")
+            writer.writerow((("%s:" % tx_type), "", ""))
 
             for fmt in DATA_FORMATS:
-                print('---------------------------------------')
-                print('Data format: %s' % fmt)
-                print('---------------------------------------')
-                writer.writerow(('', ('%s:' % fmt), ''))
+                print("---------------------------------------")
+                print("Data format: %s" % fmt)
+                print("---------------------------------------")
+                writer.writerow(("", ("%s:" % fmt), ""))
 
                 for datastore in DATASTORES:
                     print
-                    print('Data store: %s' % datastore)
+                    print("Data store: %s" % datastore)
                     print
 
                     for oper in OPERATIONS:
-                        print('Operation: %s' % oper)
-                        writer.writerow(('', '', '%s:' % oper))
+                        print("Operation: %s" % oper)
+                        writer.writerow(("", "", "%s:" % oper))
 
                         for wtx in OPS_PER_TX:
-                            avg_build_time, avg_exec_time = \
-                                run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS, 1, wtx)
-                            writer.writerow(('', '', '', wtx, avg_build_time, avg_exec_time,
-                                             (avg_build_time + avg_exec_time)))
-                            store_result(PLOT2, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, 1, wtx,
-                                         'BUILD', avg_build_time / TIME_DIV)
-                            store_result(PLOT2, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, 1, wtx,
-                                         'EXEC', avg_exec_time / TIME_DIV)
+                            avg_build_time, avg_exec_time = run_test(
+                                WARMUP_RUNS,
+                                TEST_RUNS,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS,
+                                1,
+                                wtx,
+                            )
+                            writer.writerow(
+                                (
+                                    "",
+                                    "",
+                                    "",
+                                    wtx,
+                                    avg_build_time,
+                                    avg_exec_time,
+                                    (avg_build_time + avg_exec_time),
+                                )
+                            )
+                            store_result(
+                                PLOT2,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                1,
+                                wtx,
+                                "BUILD",
+                                avg_build_time / TIME_DIV,
+                            )
+                            store_result(
+                                PLOT2,
+                                tx_type,
+                                oper,
+                                fmt,
+                                datastore,
+                                TOTAL_ELEMENTS / elem,
+                                1,
+                                wtx,
+                                "EXEC",
+                                avg_exec_time / TIME_DIV,
+                            )
 
         write_results_to_file(PLOT1, args.outfilestruct, PLOT_FILTER)
         write_results_to_file(PLOT2, args.outfileops, PLOT_FILTER)