:return: Result from the test request REST call (json)
"""
url = BASE_URL + "operations/ntfbenchmark:start-test"
- postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
+ postheaders = {"content-type": "application/json", "Accept": "application/json"}
- test_request_template = '''{
+ test_request_template = """{
"input": {
"producer-type": "%s",
"producers": "%s",
"payload-size": "%s",
"iterations": "%s"
}
- }'''
- data = test_request_template % (producer_type, producers, listeners, payload_size, iterations)
- r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
- result = {u'http-status': r.status_code}
+ }"""
+ data = test_request_template % (
+ producer_type,
+ producers,
+ listeners,
+ payload_size,
+ iterations,
+ )
+ r = requests.post(
+ url, data, headers=postheaders, stream=False, auth=("admin", "admin")
+ )
+ result = {u"http-status": r.status_code}
if r.status_code == 200:
- result = dict(result.items() + json.loads(r.content)['output'].items())
+ result = dict(result.items() + json.loads(r.content)["output"].items())
else:
- print('Error %s, %s' % (r.status_code, r.content))
+ print("Error %s, %s" % (r.status_code, r.content))
return result
test run
:return: None
"""
- print('%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d' %
- (run_type, idx,
- res[u'producer-ok'], res[u'producer-error'], res[u'listener-ok'], res[u'producer-rate'],
- res[u'listener-rate'], res[u'producer-elapsed-time'], res[u'listener-elapsed-time']))
-
-
-def run_test(warmup_runs, test_runs, producer_type, producers, listeners, payload_size, iterations):
+ print(
+ "%s #%d: ProdOk: %d, ProdError: %d, LisOk: %d, ProdRate: %d, LisRate %d, ProdTime: %d, ListTime %d"
+ % (
+ run_type,
+ idx,
+ res[u"producer-ok"],
+ res[u"producer-error"],
+ res[u"listener-ok"],
+ res[u"producer-rate"],
+ res[u"listener-rate"],
+ res[u"producer-elapsed-time"],
+ res[u"listener-elapsed-time"],
+ )
+ )
+
+
+def run_test(
+ warmup_runs,
+ test_runs,
+ producer_type,
+ producers,
+ listeners,
+ payload_size,
+ iterations,
+):
"""
Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
the specified number of dsbenchmark test runs and computes the average time
total_lrate = 0.0
for idx in range(warmup_runs):
- res = send_test_request(producer_type, producers, listeners, payload_size, iterations)
- print_results('WARM-UP', idx, res)
+ res = send_test_request(
+ producer_type, producers, listeners, payload_size, iterations
+ )
+ print_results("WARM-UP", idx, res)
for idx in range(test_runs):
- res = send_test_request(producer_type, producers, listeners, payload_size, iterations)
- print_results('TEST', idx, res)
- total_exec_time += res['listener-elapsed-time']
- total_prate += res['producer-rate']
- total_lrate += res['listener-rate']
+ res = send_test_request(
+ producer_type, producers, listeners, payload_size, iterations
+ )
+ print_results("TEST", idx, res)
+ total_exec_time += res["listener-elapsed-time"]
+ total_prate += res["producer-rate"]
+ total_lrate += res["listener-rate"]
return total_exec_time / test_runs, total_prate / test_runs, total_lrate / test_runs
if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='RPC Benchmarking')
+ parser = argparse.ArgumentParser(description="RPC Benchmarking")
# Host Config
- parser.add_argument("--host", default="localhost", help="IP of the target host where benchmarks will be run.")
- parser.add_argument("--port", type=int, default=8181, help="The port number of target host.")
+ parser.add_argument(
+ "--host",
+ default="localhost",
+ help="IP of the target host where benchmarks will be run.",
+ )
+ parser.add_argument(
+ "--port", type=int, default=8181, help="The port number of target host."
+ )
# Test Parameters
- parser.add_argument("--ptype", choices=["DROPPING", "BLOCKING"], nargs='+', default='BLOCKING',
- help='Producer type. (default: BLOCKING)')
- parser.add_argument("--warm", type=int, default=10, help='The number of warm-up runs before the measured test runs'
- '(Default 10)')
- parser.add_argument("--run", type=int, default=10,
- help='The number of measured test runs. Reported results are based on these average of all'
- " measured runs. (Default 10)")
- parser.add_argument("--producers", type=int, nargs='+', default=[1, 2, 4, 8, 16, 32],
- help='The number of test producers to start. (Default 10)')
- parser.add_argument("--listeners", type=int, nargs='+', default=[1, 2, 4, 8, 16, 32],
- help='The number of test listeners to start. (Default 10)')
- parser.add_argument("--iterations", type=int, default=100, help='The number requests that each producer issues '
- 'during the test run. (Default 10)')
- parser.add_argument("--payload", type=int, default=10, help='Payload size for the RPC - number of elements in a '
- 'simple integer list. (Default 10)')
+ parser.add_argument(
+ "--ptype",
+ choices=["DROPPING", "BLOCKING"],
+ nargs="+",
+ default="BLOCKING",
+ help="Producer type. (default: BLOCKING)",
+ )
+ parser.add_argument(
+ "--warm",
+ type=int,
+ default=10,
+ help="The number of warm-up runs before the measured test runs" "(Default 10)",
+ )
+ parser.add_argument(
+ "--run",
+ type=int,
+ default=10,
+ help="The number of measured test runs. Reported results are based on these average of all"
+ " measured runs. (Default 10)",
+ )
+ parser.add_argument(
+ "--producers",
+ type=int,
+ nargs="+",
+ default=[1, 2, 4, 8, 16, 32],
+ help="The number of test producers to start. (Default 10)",
+ )
+ parser.add_argument(
+ "--listeners",
+ type=int,
+ nargs="+",
+ default=[1, 2, 4, 8, 16, 32],
+ help="The number of test listeners to start. (Default 10)",
+ )
+ parser.add_argument(
+ "--iterations",
+ type=int,
+ default=100,
+ help="The number requests that each producer issues "
+ "during the test run. (Default 10)",
+ )
+ parser.add_argument(
+ "--payload",
+ type=int,
+ default=10,
+ help="Payload size for the RPC - number of elements in a "
+ "simple integer list. (Default 10)",
+ )
args = parser.parse_args()
BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
# Run the benchmark tests and collect data in a csv file for import into a graphing software
- f = open('test.csv', 'wt')
+ f = open("test.csv", "wt")
try:
writer = csv.writer(f)
lrate_matrix = []
prate_matrix = []
for prod in args.producers:
- lrate_row = ['']
- prate_row = ['']
+ lrate_row = [""]
+ prate_row = [""]
for lis in args.listeners:
- exec_time, prate, lrate = run_test(args.warm, args.run, args.ptype, prod, lis,
- args.payload, args.iterations)
- print('Producers: %d, Listeners: %d, prate: %d, lrate: %d' % (prod, lis, prate, lrate))
+ exec_time, prate, lrate = run_test(
+ args.warm,
+ args.run,
+ args.ptype,
+ prod,
+ lis,
+ args.payload,
+ args.iterations,
+ )
+ print(
+ "Producers: %d, Listeners: %d, prate: %d, lrate: %d"
+ % (prod, lis, prate, lrate)
+ )
lrate_row.append(lrate)
prate_row.append(prate)
# writer.writerow((('%s:' % args.ptype), '', '', ''))
# writer.writerow(('', exec_time, prate, lrate))
- writer.writerow(('Listener Rates:', ''))
+ writer.writerow(("Listener Rates:", ""))
writer.writerows(lrate_matrix)
- writer.writerow(('Producer Rates:', ''))
+ writer.writerow(("Producer Rates:", ""))
writer.writerows(prate_matrix)
finally: