11 __author__ = "Jan Medved"
12 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
13 __license__ = "New-style BSD"
14 __email__ = "jmedved@cisco.com"
17 parser = argparse.ArgumentParser(description='Datastore Benchmarking'
19 'See documentation @:'
20 'https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns' # noqa
24 parser.add_argument("--host", default="localhost", help="the IP of the target host to initiate benchmark testing on.")
25 parser.add_argument("--port", type=int, default=8181, help="the port number of target host.")
28 parser.add_argument("--txtype", choices=["TX-CHAINING", "SIMPLE-TX"], nargs='+', default=["TX-CHAINING", "SIMPLE-TX"],
29 help="list of the transaction types to execute.")
30 parser.add_argument("--total", type=int, default=100000, help="total number of elements to process.")
31 parser.add_argument("--inner", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
32 help="number of inner elements to process.")
33 parser.add_argument("--ops", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
34 help="number of operations per transaction.")
35 parser.add_argument("--optype", choices=["PUT", "MERGE", "DELETE", "READ"], nargs='+',
36 default=["PUT", "MERGE", "DELETE", "READ"], help="list of the types operations to execute.")
37 parser.add_argument("--format", choices=["BINDING-AWARE", "BINDING-INDEPENDENT"], nargs='+',
38 default=["BINDING-AWARE", "BINDING-INDEPENDENT"], help="list of data formats to execute.")
39 parser.add_argument("--datastore", choices=["CONFIG", "OPERATIONAL", "BOTH"], nargs='+',
40 default=["OPERATIONAL", "CONFIG"], help="data-store type (config/operational) to use")
41 # There is also "listeners" parameter specified in the Yang file now.
42 parser.add_argument("--warmup", type=int, default=10, help="number of warmup runs before official test runs")
43 parser.add_argument("--runs", type=int, default=10,
44 help="number of official test runs. Note: Reported results are based on these runs.")
45 parser.add_argument("--plot", type=str, default='none',
46 help="keywords filter for results to be drawn in plot (special keywords: all, none).")
47 parser.add_argument("--units", choices=["miliseconds", "microseconds"], default="microseconds",
48 help="units of test duration values provided by dsbenchmark controller feature")
49 parser.add_argument("--outfile-struct", dest="outfilestruct", default="perf_per_struct.csv",
50 help="units of test duration values provided by dsbenchmark controller feature")
51 parser.add_argument("--outfile-ops", dest="outfileops", default="perf_per_ops.csv",
52 help="units of test duration values provided by dsbenchmark controller feature")
53 args = parser.parse_args()
56 BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
59 def send_clear_request():
61 Sends a clear request to the dsbenchmark app. A clear will clear the test-exec data store
62 and clear the 'test-executing' flag.
65 url = BASE_URL + "operations/dsbenchmark:cleanup-store"
67 r = requests.post(url, stream=False, auth=('admin', 'admin'))
71 def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
73 Sends a request to the dsbenchmark app to start a data store benchmark test run.
74 The dsbenchmark app will perform the requested benchmark test and return measured
76 :param operation: PUT, MERGE, DELETE or READ
77 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
78 :param datastore: OPERATIONAL, CONFIG or BOTH
79 :param outer_elem: Number of elements in the outer list
80 :param inner_elem: Number of elements in the inner list
81 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
84 url = BASE_URL + "operations/dsbenchmark:start-test"
85 postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
87 test_request_template = '''{
89 "transaction-type": "%s",
98 data = test_request_template % (tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
99 r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
100 result = {u'http-status': r.status_code}
101 if r.status_code == 200:
102 result = dict(result.items() + json.loads(r.content)['output'].items())
104 print 'Error %s, %s' % (r.status_code, r.content)
108 def print_results(run_type, idx, res):
110 Prints results from a dsbenchmakr test run to console
111 :param run_type: String parameter that can be used to identify the type of the
112 test run (e.g. WARMUP or TEST)
113 :param idx: Index of the test run
114 :param res: Parsed json (disctionary) that was returned from a dsbenchmark
118 print '%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' % \
119 (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError'])
122 def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
124 Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
125 the specified number of dsbenchmark test runs and computes the average time
126 for building the test data (a list of lists) and the average time for the
127 execution of the test.
128 :param warmup_runs: # of warmup runs
129 :param test_runs: # of test runs
130 :param operation: PUT, MERGE or DELETE
131 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
132 :param datastore: OPERATIONAL, CONFIG or BOTH
133 :param outer_elem: Number of elements in the outer list
134 :param inner_elem: Number of elements in the inner list
135 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
136 :return: average build time AND average test execution time
138 total_build_time = 0.0
139 total_exec_time = 0.0
141 print "Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore,
142 print "Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx
143 for idx in range(warmup_runs):
144 res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
145 print_results('WARMUP', idx, res)
147 for idx in range(test_runs):
148 res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
149 print_results('TEST', idx, res)
150 total_build_time += res['listBuildTime']
151 total_exec_time += res['execTime']
153 return total_build_time / test_runs, total_exec_time / test_runs
156 def store_result(values, tx_type, operation, data_fmt, datastore,
157 outer_elem, inner_elem, ops_per_tx, value_name, value):
159 Stores a record to the list (dictionary) of values to be written into a csv file for plotting purposes.
160 :param values: The list (dictionary) to be used for storing the result
161 :param operation: PUT, MERGE or DELETE
162 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
163 :param datastore: OPERATIONAL, CONFIG or BOTH
164 :param outer_elem: Number of elements in the outer list
165 :param inner_elem: Number of elements in the inner list
166 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
167 :param value_name: Value name (name for the measured value)
168 :param value: The (measured) value
171 plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/' +
172 str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
173 values[plot_key] = value
176 def write_results_to_file(values, file_name, key_filter):
178 Writes specified results (values) into the file (file_name). Results are filtered according to key_filter value.
179 Produces a csv file consumable by Jnekins integration environment.
180 :param file_name: Name of the (csv) file to be created
181 :param values: The list (dictionary) to be written into the file
182 :param key_filter: A regexp string to filter the results to be finally put into the file
187 f = open(file_name, 'wt')
189 for key in sorted(values):
190 if (key_filter != 'none') & ((key_filter == 'all') | (re.search(key_filter, key) is not None)):
191 first_line += key + ','
192 second_line += str(values[key]) + ','
193 first_line = first_line[:-1]
194 second_line = second_line[:-1]
195 f.write(first_line + '\n')
196 f.write(second_line + '\n')
201 if __name__ == "__main__":
203 TX_TYPES = args.txtype
204 TOTAL_ELEMENTS = args.total
205 INNER_ELEMENTS = args.inner
206 OPS_PER_TX = args.ops
207 OPERATIONS = args.optype
208 DATA_FORMATS = args.format
209 DATASTORES = args.datastore
210 PLOT_FILTER = args.plot
211 if args.units == 'miliseconds':
216 # Dictionaries for storing keys & values to plot
221 WARMUP_RUNS = args.warmup
222 TEST_RUNS = args.runs
224 # Clean up any data that may be present in the data store
227 # Run the benchmark tests and collect data in a csv file for import into a graphing software
228 f = open('test.csv', 'wt')
230 start_time = time.time()
231 print "Start time: %f " % start_time
233 writer = csv.writer(f)
235 # Determine the impact of transaction type, data format and data structure on performance.
236 # Iterate over all transaction types, data formats, operation types, and different
237 # list-of-lists layouts; always use a single operation in each transaction
238 print '\n#######################################'
239 print 'Tx type, data format & data structure'
240 print '#######################################'
241 for tx_type in TX_TYPES:
242 print '***************************************'
243 print 'Transaction Type: %s' % tx_type
244 print '***************************************'
245 writer.writerow((('%s:' % tx_type), '', ''))
247 for fmt in DATA_FORMATS:
248 print '---------------------------------------'
249 print 'Data format: %s' % fmt
250 print '---------------------------------------'
251 writer.writerow(('', ('%s:' % fmt), ''))
253 for datastore in DATASTORES:
255 print 'Data store: %s' % datastore
258 for oper in OPERATIONS:
259 print 'Operation: %s' % oper
260 writer.writerow(('', '', '%s:' % oper))
262 for elem in INNER_ELEMENTS:
263 avg_build_time, avg_exec_time = run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt,
264 datastore, TOTAL_ELEMENTS / elem, elem, 1)
265 e_label = '%d/%d' % (TOTAL_ELEMENTS / elem, elem)
266 writer.writerow(('', '', '', e_label, avg_build_time, avg_exec_time,
267 (avg_build_time + avg_exec_time)))
268 store_result(PLOT1, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, elem, 1,
269 'BUILD', avg_build_time / TIME_DIV)
270 store_result(PLOT1, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, elem, 1,
271 'EXEC', avg_exec_time / TIME_DIV)
273 # Determine the impact of number of writes per transaction on performance.
274 # Iterate over all transaction types, data formats, operation types, and
275 # operations-per-transaction; always use a list of lists where the inner list has one parameter
276 print '\n#######################################'
278 print '#######################################'
279 for tx_type in TX_TYPES:
280 print '***************************************'
281 print 'Transaction Type: %s' % tx_type
282 print '***************************************'
283 writer.writerow((('%s:' % tx_type), '', ''))
285 for fmt in DATA_FORMATS:
286 print '---------------------------------------'
287 print 'Data format: %s' % fmt
288 print '---------------------------------------'
289 writer.writerow(('', ('%s:' % fmt), ''))
291 for datastore in DATASTORES:
293 print 'Data store: %s' % datastore
296 for oper in OPERATIONS:
297 print 'Operation: %s' % oper
298 writer.writerow(('', '', '%s:' % oper))
300 for wtx in OPS_PER_TX:
301 avg_build_time, avg_exec_time = \
302 run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS, 1, wtx)
303 writer.writerow(('', '', '', wtx, avg_build_time, avg_exec_time,
304 (avg_build_time + avg_exec_time)))
305 store_result(PLOT2, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, 1, wtx,
306 'BUILD', avg_build_time / TIME_DIV)
307 store_result(PLOT2, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, 1, wtx,
308 'EXEC', avg_exec_time / TIME_DIV)
310 write_results_to_file(PLOT1, args.outfilestruct, PLOT_FILTER)
311 write_results_to_file(PLOT2, args.outfileops, PLOT_FILTER)
313 end_time = time.time()
314 print "End time: %f " % end_time
315 print "Total execution time: %f" % (end_time - start_time)