2 __author__ = "Jan Medved"
3 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
4 __license__ = "New-style BSD"
5 __email__ = "jmedved@cisco.com"
14 parser = argparse.ArgumentParser(description='Datastore Benchmarking'
16 'See documentation @:'
17 'https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns' # noqa
21 parser.add_argument("--host", default="localhost", help="the IP of the target host to initiate benchmark testing on.")
22 parser.add_argument("--port", type=int, default=8181, help="the port number of target host.")
25 parser.add_argument("--txtype", choices=["TX-CHAINING", "SIMPLE-TX"], nargs='+', default=["TX-CHAINING", "SIMPLE-TX"],
26 help="list of the transaction types to execute.")
27 parser.add_argument("--total", type=int, default=100000, help="total number of elements to process.")
28 parser.add_argument("--inner", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
29 help="number of inner elements to process.")
30 parser.add_argument("--ops", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
31 help="number of operations per transaction.")
32 parser.add_argument("--optype", choices=["PUT", "MERGE", "DELETE", "READ"], nargs='+',
33 default=["PUT", "MERGE", "DELETE", "READ"], help="list of the types operations to execute.")
34 parser.add_argument("--format", choices=["BINDING-AWARE", "BINDING-INDEPENDENT"], nargs='+',
35 default=["BINDING-AWARE", "BINDING-INDEPENDENT"], help="list of data formats to execute.")
36 parser.add_argument("--warmup", type=int, default=10, help="number of warmup runs before official test runs")
37 parser.add_argument("--runs", type=int, default=10,
38 help="number of official test runs. Note: Reported results are based on these runs.")
39 parser.add_argument("--plot", type=str, default='none',
40 help="keywords filter for results to be drawn in plot (special keywords: all, none).")
41 parser.add_argument("--units", choices=["miliseconds", "microseconds"], default="microseconds",
42 help="units of test duration values provided by dsbenchmark controller feature")
43 args = parser.parse_args()
46 BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
49 def send_clear_request():
51 Sends a clear request to the dsbenchmark app. A clear will clear the test-exec data store
52 and clear the 'test-executing' flag.
55 url = BASE_URL + "operations/dsbenchmark:cleanup-store"
57 r = requests.post(url, stream=False, auth=('admin', 'admin'))
61 def send_test_request(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx):
63 Sends a request to the dsbenchmark app to start a data store benchmark test run.
64 The dsbenchmark app will perform the requested benchmark test and return measured
66 :param operation: PUT, MERGE, DELETE or READ
67 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
68 :param outer_elem: Number of elements in the outer list
69 :param inner_elem: Number of elements in the inner list
70 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
73 url = BASE_URL + "operations/dsbenchmark:start-test"
74 postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
76 test_request_template = '''{
78 "transaction-type": "%s",
86 data = test_request_template % (tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
87 r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
88 result = {u'http-status': r.status_code}
89 if r.status_code == 200:
90 result = dict(result.items() + json.loads(r.content)['output'].items())
92 print 'Error %s, %s' % (r.status_code, r.content)
96 def print_results(run_type, idx, res):
98 Prints results from a dsbenchmakr test run to console
99 :param run_type: String parameter that can be used to identify the type of the
100 test run (e.g. WARMUP or TEST)
101 :param idx: Index of the test run
102 :param res: Parsed json (disctionary) that was returned from a dsbenchmark
106 print '%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' % \
107 (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError'])
110 def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx):
112 Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
113 the specified number of dsbenchmark test runs and computes the average time
114 for building the test data (a list of lists) and the average time for the
115 execution of the test.
116 :param warmup_runs: # of warmup runs
117 :param test_runs: # of test runs
118 :param operation: PUT, MERGE or DELETE
119 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
120 :param outer_elem: Number of elements in the outer list
121 :param inner_elem: Number of elements in the inner list
122 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
123 :return: average build time AND average test execution time
125 total_build_time = 0.0
126 total_exec_time = 0.0
128 print 'Tx Type: {0:s}, Operation: {1:s}, Data Format: {2:s}, Outer/Inner Elements: {3:d}/{4:d}, PutsPerTx {5:d}' \
129 .format(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
130 for idx in range(warmup_runs):
131 res = send_test_request(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
132 print_results('WARMUP', idx, res)
134 for idx in range(test_runs):
135 res = send_test_request(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
136 print_results('TEST', idx, res)
137 total_build_time += res['listBuildTime']
138 total_exec_time += res['execTime']
140 return total_build_time / test_runs, total_exec_time / test_runs
143 def store_result(values, tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx, value_name, value):
145 Stores a record to the list (dictionary) of values to be written into a csv file for plotting purposes.
146 :param values: The list (dictionary) to be used for storing the result
147 :param operation: PUT, MERGE or DELETE
148 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
149 :param outer_elem: Number of elements in the outer list
150 :param inner_elem: Number of elements in the inner list
151 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
152 :param value_name: Value name (name for the measured value)
153 :param value: The (measured) value
156 plot_key = (tx_type + '-' + data_fmt + '-' + operation + '-' + str(outer_elem) + '/' +
157 str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
158 values[plot_key] = value
161 def write_results_to_file(values, file_name, key_filter):
163 Writes specified results (values) into the file (file_name). Results are filtered according to key_filter value.
164 Produces a csv file consumable by Jnekins integration environment.
165 :param file_name: Name of the (csv) file to be created
166 :param values: The list (dictionary) to be written into the file
167 :param key_filter: A regexp string to filter the results to be finally put into the file
172 f = open(file_name, 'wt')
174 for key in sorted(values):
175 if (key_filter != 'none') & ((key_filter == 'all') | (re.search(key_filter, key) is not None)):
176 first_line += key + ', '
177 second_line += str(values[key]) + ', '
178 first_line = first_line[:-2]
179 second_line = second_line[:-2]
180 f.write(first_line + '\n')
181 f.write(second_line + '\n')
186 if __name__ == "__main__":
188 TX_TYPES = args.txtype
189 TOTAL_ELEMENTS = args.total
190 INNER_ELEMENTS = args.inner
191 OPS_PER_TX = args.ops
192 OPERATIONS = args.optype
193 DATA_FORMATS = args.format
194 PLOT_FILTER = args.plot
195 if args.units == 'miliseconds':
200 # Dictionaries for storing keys & values to plot
205 WARMUP_RUNS = args.warmup
206 TEST_RUNS = args.runs
208 # Clean up any data that may be present in the data store
211 # Run the benchmark tests and collect data in a csv file for import into a graphing software
212 f = open('test.csv', 'wt')
214 start_time = time.time()
215 print "Start time: %f " % start_time
217 writer = csv.writer(f)
219 # Determine the impact of transaction type, data format and data structure on performance.
220 # Iterate over all transaction types, data formats, operation types, and different
221 # list-of-lists layouts; always use a single operation in each transaction
222 print '\n#######################################'
223 print 'Tx type, data format & data structure'
224 print '#######################################'
225 for tx_type in TX_TYPES:
226 print '***************************************'
227 print 'Transaction Type: %s' % tx_type
228 print '***************************************'
229 writer.writerow((('%s:' % tx_type), '', ''))
231 for fmt in DATA_FORMATS:
232 print '---------------------------------------'
233 print 'Data format: %s' % fmt
234 print '---------------------------------------'
235 writer.writerow(('', ('%s:' % fmt), ''))
237 for oper in OPERATIONS:
238 print 'Operation: %s' % oper
239 writer.writerow(('', '', '%s:' % oper))
241 for elem in INNER_ELEMENTS:
242 avg_build_time, avg_exec_time = \
243 run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, elem, 1)
244 e_label = '%d/%d' % (TOTAL_ELEMENTS / elem, elem)
245 writer.writerow(('', '', '', e_label, avg_build_time, avg_exec_time,
246 (avg_build_time + avg_exec_time)))
247 store_result(PLOT1, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, elem, 1,
248 'BUILD', avg_build_time / TIME_DIV)
249 store_result(PLOT1, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, elem, 1,
250 'EXEC', avg_exec_time / TIME_DIV)
252 # Determine the impact of number of writes per transaction on performance.
253 # Iterate over all transaction types, data formats, operation types, and
254 # operations-per-transaction; always use a list of lists where the inner list has one parameter
255 print '\n#######################################'
257 print '#######################################'
258 for tx_type in TX_TYPES:
259 print '***************************************'
260 print 'Transaction Type: %s' % tx_type
261 print '***************************************'
262 writer.writerow((('%s:' % tx_type), '', ''))
264 for fmt in DATA_FORMATS:
265 print '---------------------------------------'
266 print 'Data format: %s' % fmt
267 print '---------------------------------------'
268 writer.writerow(('', ('%s:' % fmt), ''))
270 for oper in OPERATIONS:
271 print 'Operation: %s' % oper
272 writer.writerow(('', '', '%s:' % oper))
274 for wtx in OPS_PER_TX:
275 avg_build_time, avg_exec_time = \
276 run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, TOTAL_ELEMENTS, 1, wtx)
277 writer.writerow(('', '', '', wtx, avg_build_time, avg_exec_time,
278 (avg_build_time + avg_exec_time)))
279 store_result(PLOT2, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, 1, wtx,
280 'BUILD', avg_build_time / TIME_DIV)
281 store_result(PLOT2, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, 1, wtx,
282 'EXEC', avg_exec_time / TIME_DIV)
284 write_results_to_file(PLOT1, 'perf_per_struct.csv', PLOT_FILTER)
285 write_results_to_file(PLOT2, 'perf_per_ops.csv', PLOT_FILTER)
287 end_time = time.time()
288 print "End time: %f " % end_time
289 print "Total execution time: %f" % (end_time - start_time)