11 __author__ = "Jan Medved"
12 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
13 __license__ = "New-style BSD"
14 __email__ = "jmedved@cisco.com"
17 parser = argparse.ArgumentParser(description='Datastore Benchmarking'
19 'See documentation @:'
20 'https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns' # noqa
24 parser.add_argument("--host", default="localhost", help="the IP of the target host to initiate benchmark testing on.")
25 parser.add_argument("--port", type=int, default=8181, help="the port number of target host.")
28 parser.add_argument("--txtype", choices=["TX-CHAINING", "SIMPLE-TX"], nargs='+', default=["TX-CHAINING", "SIMPLE-TX"],
29 help="list of the transaction types to execute.")
30 parser.add_argument("--total", type=int, default=100000, help="total number of elements to process.")
31 parser.add_argument("--inner", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
32 help="number of inner elements to process.")
33 parser.add_argument("--ops", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
34 help="number of operations per transaction.")
35 parser.add_argument("--optype", choices=["PUT", "MERGE", "DELETE", "READ"], nargs='+',
36 default=["PUT", "MERGE", "DELETE", "READ"], help="list of the types operations to execute.")
37 parser.add_argument("--format", choices=["BINDING-AWARE", "BINDING-INDEPENDENT"], nargs='+',
38 default=["BINDING-AWARE", "BINDING-INDEPENDENT"], help="list of data formats to execute.")
39 parser.add_argument("--datastore", choices=["CONFIG", "OPERATIONAL", "BOTH"], nargs='+',
40 default=["OPERATIONAL", "CONFIG"], help="data-store type (config/operational) to use")
41 # There is also "listeners" parameter specified in the Yang file now.
42 parser.add_argument("--warmup", type=int, default=10, help="number of warmup runs before official test runs")
43 parser.add_argument("--runs", type=int, default=10,
44 help="number of official test runs. Note: Reported results are based on these runs.")
45 parser.add_argument("--plot", type=str, default='none',
46 help="keywords filter for results to be drawn in plot (special keywords: all, none).")
47 parser.add_argument("--units", choices=["miliseconds", "microseconds"], default="microseconds",
48 help="units of test duration values provided by dsbenchmark controller feature")
49 args = parser.parse_args()
52 BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
55 def send_clear_request():
57 Sends a clear request to the dsbenchmark app. A clear will clear the test-exec data store
58 and clear the 'test-executing' flag.
61 url = BASE_URL + "operations/dsbenchmark:cleanup-store"
63 r = requests.post(url, stream=False, auth=('admin', 'admin'))
67 def send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
69 Sends a request to the dsbenchmark app to start a data store benchmark test run.
70 The dsbenchmark app will perform the requested benchmark test and return measured
72 :param operation: PUT, MERGE, DELETE or READ
73 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
74 :param datastore: OPERATIONAL, CONFIG or BOTH
75 :param outer_elem: Number of elements in the outer list
76 :param inner_elem: Number of elements in the inner list
77 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
80 url = BASE_URL + "operations/dsbenchmark:start-test"
81 postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
83 test_request_template = '''{
85 "transaction-type": "%s",
94 data = test_request_template % (tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
95 r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
96 result = {u'http-status': r.status_code}
97 if r.status_code == 200:
98 result = dict(result.items() + json.loads(r.content)['output'].items())
100 print 'Error %s, %s' % (r.status_code, r.content)
104 def print_results(run_type, idx, res):
106 Prints results from a dsbenchmakr test run to console
107 :param run_type: String parameter that can be used to identify the type of the
108 test run (e.g. WARMUP or TEST)
109 :param idx: Index of the test run
110 :param res: Parsed json (disctionary) that was returned from a dsbenchmark
114 print '%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' % \
115 (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError'])
118 def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx):
120 Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
121 the specified number of dsbenchmark test runs and computes the average time
122 for building the test data (a list of lists) and the average time for the
123 execution of the test.
124 :param warmup_runs: # of warmup runs
125 :param test_runs: # of test runs
126 :param operation: PUT, MERGE or DELETE
127 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
128 :param datastore: OPERATIONAL, CONFIG or BOTH
129 :param outer_elem: Number of elements in the outer list
130 :param inner_elem: Number of elements in the inner list
131 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
132 :return: average build time AND average test execution time
134 total_build_time = 0.0
135 total_exec_time = 0.0
137 print "Tx Type:", tx_type, "Operation:", operation, "Data Format:", data_fmt, "Datastore:", datastore,
138 print "Outer Elements:", outer_elem, "Inner Elements:", inner_elem, "PutsPerTx:", ops_per_tx
139 for idx in range(warmup_runs):
140 res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
141 print_results('WARMUP', idx, res)
143 for idx in range(test_runs):
144 res = send_test_request(tx_type, operation, data_fmt, datastore, outer_elem, inner_elem, ops_per_tx)
145 print_results('TEST', idx, res)
146 total_build_time += res['listBuildTime']
147 total_exec_time += res['execTime']
149 return total_build_time / test_runs, total_exec_time / test_runs
152 def store_result(values, tx_type, operation, data_fmt, datastore,
153 outer_elem, inner_elem, ops_per_tx, value_name, value):
155 Stores a record to the list (dictionary) of values to be written into a csv file for plotting purposes.
156 :param values: The list (dictionary) to be used for storing the result
157 :param operation: PUT, MERGE or DELETE
158 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
159 :param datastore: OPERATIONAL, CONFIG or BOTH
160 :param outer_elem: Number of elements in the outer list
161 :param inner_elem: Number of elements in the inner list
162 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
163 :param value_name: Value name (name for the measured value)
164 :param value: The (measured) value
167 plot_key = (datastore + '-' + data_fmt + '-' + tx_type + '-' + operation + '-' + str(outer_elem) + '/' +
168 str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
169 values[plot_key] = value
172 def write_results_to_file(values, file_name, key_filter):
174 Writes specified results (values) into the file (file_name). Results are filtered according to key_filter value.
175 Produces a csv file consumable by Jnekins integration environment.
176 :param file_name: Name of the (csv) file to be created
177 :param values: The list (dictionary) to be written into the file
178 :param key_filter: A regexp string to filter the results to be finally put into the file
183 f = open(file_name, 'wt')
185 for key in sorted(values):
186 if (key_filter != 'none') & ((key_filter == 'all') | (re.search(key_filter, key) is not None)):
187 first_line += key + ','
188 second_line += str(values[key]) + ','
189 first_line = first_line[:-1]
190 second_line = second_line[:-1]
191 f.write(first_line + '\n')
192 f.write(second_line + '\n')
197 if __name__ == "__main__":
199 TX_TYPES = args.txtype
200 TOTAL_ELEMENTS = args.total
201 INNER_ELEMENTS = args.inner
202 OPS_PER_TX = args.ops
203 OPERATIONS = args.optype
204 DATA_FORMATS = args.format
205 DATASTORES = args.datastore
206 PLOT_FILTER = args.plot
207 if args.units == 'miliseconds':
212 # Dictionaries for storing keys & values to plot
217 WARMUP_RUNS = args.warmup
218 TEST_RUNS = args.runs
220 # Clean up any data that may be present in the data store
223 # Run the benchmark tests and collect data in a csv file for import into a graphing software
224 f = open('test.csv', 'wt')
226 start_time = time.time()
227 print "Start time: %f " % start_time
229 writer = csv.writer(f)
231 # Determine the impact of transaction type, data format and data structure on performance.
232 # Iterate over all transaction types, data formats, operation types, and different
233 # list-of-lists layouts; always use a single operation in each transaction
234 print '\n#######################################'
235 print 'Tx type, data format & data structure'
236 print '#######################################'
237 for tx_type in TX_TYPES:
238 print '***************************************'
239 print 'Transaction Type: %s' % tx_type
240 print '***************************************'
241 writer.writerow((('%s:' % tx_type), '', ''))
243 for fmt in DATA_FORMATS:
244 print '---------------------------------------'
245 print 'Data format: %s' % fmt
246 print '---------------------------------------'
247 writer.writerow(('', ('%s:' % fmt), ''))
249 for datastore in DATASTORES:
251 print 'Data store: %s' % datastore
254 for oper in OPERATIONS:
255 print 'Operation: %s' % oper
256 writer.writerow(('', '', '%s:' % oper))
258 for elem in INNER_ELEMENTS:
259 avg_build_time, avg_exec_time = run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt,
260 datastore, TOTAL_ELEMENTS / elem, elem, 1)
261 e_label = '%d/%d' % (TOTAL_ELEMENTS / elem, elem)
262 writer.writerow(('', '', '', e_label, avg_build_time, avg_exec_time,
263 (avg_build_time + avg_exec_time)))
264 store_result(PLOT1, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, elem, 1,
265 'BUILD', avg_build_time / TIME_DIV)
266 store_result(PLOT1, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, elem, 1,
267 'EXEC', avg_exec_time / TIME_DIV)
269 # Determine the impact of number of writes per transaction on performance.
270 # Iterate over all transaction types, data formats, operation types, and
271 # operations-per-transaction; always use a list of lists where the inner list has one parameter
272 print '\n#######################################'
274 print '#######################################'
275 for tx_type in TX_TYPES:
276 print '***************************************'
277 print 'Transaction Type: %s' % tx_type
278 print '***************************************'
279 writer.writerow((('%s:' % tx_type), '', ''))
281 for fmt in DATA_FORMATS:
282 print '---------------------------------------'
283 print 'Data format: %s' % fmt
284 print '---------------------------------------'
285 writer.writerow(('', ('%s:' % fmt), ''))
287 for datastore in DATASTORES:
289 print 'Data store: %s' % datastore
292 for oper in OPERATIONS:
293 print 'Operation: %s' % oper
294 writer.writerow(('', '', '%s:' % oper))
296 for wtx in OPS_PER_TX:
297 avg_build_time, avg_exec_time = \
298 run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS, 1, wtx)
299 writer.writerow(('', '', '', wtx, avg_build_time, avg_exec_time,
300 (avg_build_time + avg_exec_time)))
301 store_result(PLOT2, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, 1, wtx,
302 'BUILD', avg_build_time / TIME_DIV)
303 store_result(PLOT2, tx_type, oper, fmt, datastore, TOTAL_ELEMENTS / elem, 1, wtx,
304 'EXEC', avg_exec_time / TIME_DIV)
306 write_results_to_file(PLOT1, 'perf_per_struct.csv', PLOT_FILTER)
307 write_results_to_file(PLOT2, 'perf_per_ops.csv', PLOT_FILTER)
309 end_time = time.time()
310 print "End time: %f " % end_time
311 print "Total execution time: %f" % (end_time - start_time)