11 __author__ = "Jan Medved"
12 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
13 __license__ = "New-style BSD"
14 __email__ = "jmedved@cisco.com"
17 parser = argparse.ArgumentParser(description='Datastore Benchmarking'
19 'See documentation @:'
20 'https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns' # noqa
24 parser.add_argument("--host", default="localhost", help="the IP of the target host to initiate benchmark testing on.")
25 parser.add_argument("--port", type=int, default=8181, help="the port number of target host.")
28 parser.add_argument("--txtype", choices=["TX-CHAINING", "SIMPLE-TX"], nargs='+', default=["TX-CHAINING", "SIMPLE-TX"],
29 help="list of the transaction types to execute.")
30 parser.add_argument("--total", type=int, default=100000, help="total number of elements to process.")
31 parser.add_argument("--inner", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
32 help="number of inner elements to process.")
33 parser.add_argument("--ops", type=int, default=[1, 10, 100, 1000, 10000, 100000], nargs='+',
34 help="number of operations per transaction.")
35 parser.add_argument("--optype", choices=["PUT", "MERGE", "DELETE", "READ"], nargs='+',
36 default=["PUT", "MERGE", "DELETE", "READ"], help="list of the types operations to execute.")
37 parser.add_argument("--format", choices=["BINDING-AWARE", "BINDING-INDEPENDENT"], nargs='+',
38 default=["BINDING-AWARE", "BINDING-INDEPENDENT"], help="list of data formats to execute.")
39 parser.add_argument("--warmup", type=int, default=10, help="number of warmup runs before official test runs")
40 parser.add_argument("--runs", type=int, default=10,
41 help="number of official test runs. Note: Reported results are based on these runs.")
42 parser.add_argument("--plot", type=str, default='none',
43 help="keywords filter for results to be drawn in plot (special keywords: all, none).")
44 parser.add_argument("--units", choices=["miliseconds", "microseconds"], default="microseconds",
45 help="units of test duration values provided by dsbenchmark controller feature")
46 args = parser.parse_args()
49 BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
52 def send_clear_request():
54 Sends a clear request to the dsbenchmark app. A clear will clear the test-exec data store
55 and clear the 'test-executing' flag.
58 url = BASE_URL + "operations/dsbenchmark:cleanup-store"
60 r = requests.post(url, stream=False, auth=('admin', 'admin'))
64 def send_test_request(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx):
66 Sends a request to the dsbenchmark app to start a data store benchmark test run.
67 The dsbenchmark app will perform the requested benchmark test and return measured
69 :param operation: PUT, MERGE, DELETE or READ
70 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
71 :param outer_elem: Number of elements in the outer list
72 :param inner_elem: Number of elements in the inner list
73 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
76 url = BASE_URL + "operations/dsbenchmark:start-test"
77 postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
79 test_request_template = '''{
81 "transaction-type": "%s",
89 data = test_request_template % (tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
90 r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
91 result = {u'http-status': r.status_code}
92 if r.status_code == 200:
93 result = dict(result.items() + json.loads(r.content)['output'].items())
95 print 'Error %s, %s' % (r.status_code, r.content)
99 def print_results(run_type, idx, res):
101 Prints results from a dsbenchmakr test run to console
102 :param run_type: String parameter that can be used to identify the type of the
103 test run (e.g. WARMUP or TEST)
104 :param idx: Index of the test run
105 :param res: Parsed json (disctionary) that was returned from a dsbenchmark
109 print '%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' % \
110 (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError'])
113 def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx):
115 Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
116 the specified number of dsbenchmark test runs and computes the average time
117 for building the test data (a list of lists) and the average time for the
118 execution of the test.
119 :param warmup_runs: # of warmup runs
120 :param test_runs: # of test runs
121 :param operation: PUT, MERGE or DELETE
122 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
123 :param outer_elem: Number of elements in the outer list
124 :param inner_elem: Number of elements in the inner list
125 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
126 :return: average build time AND average test execution time
128 total_build_time = 0.0
129 total_exec_time = 0.0
131 print 'Tx Type: {0:s}, Operation: {1:s}, Data Format: {2:s}, Outer/Inner Elements: {3:d}/{4:d}, PutsPerTx {5:d}' \
132 .format(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
133 for idx in range(warmup_runs):
134 res = send_test_request(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
135 print_results('WARMUP', idx, res)
137 for idx in range(test_runs):
138 res = send_test_request(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
139 print_results('TEST', idx, res)
140 total_build_time += res['listBuildTime']
141 total_exec_time += res['execTime']
143 return total_build_time / test_runs, total_exec_time / test_runs
146 def store_result(values, tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx, value_name, value):
148 Stores a record to the list (dictionary) of values to be written into a csv file for plotting purposes.
149 :param values: The list (dictionary) to be used for storing the result
150 :param operation: PUT, MERGE or DELETE
151 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
152 :param outer_elem: Number of elements in the outer list
153 :param inner_elem: Number of elements in the inner list
154 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
155 :param value_name: Value name (name for the measured value)
156 :param value: The (measured) value
159 plot_key = (tx_type + '-' + data_fmt + '-' + operation + '-' + str(outer_elem) + '/' +
160 str(inner_elem) + 'OUTER/INNER-' + str(ops_per_tx) + 'OP-' + value_name)
161 values[plot_key] = value
164 def write_results_to_file(values, file_name, key_filter):
166 Writes specified results (values) into the file (file_name). Results are filtered according to key_filter value.
167 Produces a csv file consumable by Jnekins integration environment.
168 :param file_name: Name of the (csv) file to be created
169 :param values: The list (dictionary) to be written into the file
170 :param key_filter: A regexp string to filter the results to be finally put into the file
175 f = open(file_name, 'wt')
177 for key in sorted(values):
178 if (key_filter != 'none') & ((key_filter == 'all') | (re.search(key_filter, key) is not None)):
179 first_line += key + ', '
180 second_line += str(values[key]) + ', '
181 first_line = first_line[:-2]
182 second_line = second_line[:-2]
183 f.write(first_line + '\n')
184 f.write(second_line + '\n')
189 if __name__ == "__main__":
191 TX_TYPES = args.txtype
192 TOTAL_ELEMENTS = args.total
193 INNER_ELEMENTS = args.inner
194 OPS_PER_TX = args.ops
195 OPERATIONS = args.optype
196 DATA_FORMATS = args.format
197 PLOT_FILTER = args.plot
198 if args.units == 'miliseconds':
203 # Dictionaries for storing keys & values to plot
208 WARMUP_RUNS = args.warmup
209 TEST_RUNS = args.runs
211 # Clean up any data that may be present in the data store
214 # Run the benchmark tests and collect data in a csv file for import into a graphing software
215 f = open('test.csv', 'wt')
217 start_time = time.time()
218 print "Start time: %f " % start_time
220 writer = csv.writer(f)
222 # Determine the impact of transaction type, data format and data structure on performance.
223 # Iterate over all transaction types, data formats, operation types, and different
224 # list-of-lists layouts; always use a single operation in each transaction
225 print '\n#######################################'
226 print 'Tx type, data format & data structure'
227 print '#######################################'
228 for tx_type in TX_TYPES:
229 print '***************************************'
230 print 'Transaction Type: %s' % tx_type
231 print '***************************************'
232 writer.writerow((('%s:' % tx_type), '', ''))
234 for fmt in DATA_FORMATS:
235 print '---------------------------------------'
236 print 'Data format: %s' % fmt
237 print '---------------------------------------'
238 writer.writerow(('', ('%s:' % fmt), ''))
240 for oper in OPERATIONS:
241 print 'Operation: %s' % oper
242 writer.writerow(('', '', '%s:' % oper))
244 for elem in INNER_ELEMENTS:
245 avg_build_time, avg_exec_time = \
246 run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, elem, 1)
247 e_label = '%d/%d' % (TOTAL_ELEMENTS / elem, elem)
248 writer.writerow(('', '', '', e_label, avg_build_time, avg_exec_time,
249 (avg_build_time + avg_exec_time)))
250 store_result(PLOT1, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, elem, 1,
251 'BUILD', avg_build_time / TIME_DIV)
252 store_result(PLOT1, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, elem, 1,
253 'EXEC', avg_exec_time / TIME_DIV)
255 # Determine the impact of number of writes per transaction on performance.
256 # Iterate over all transaction types, data formats, operation types, and
257 # operations-per-transaction; always use a list of lists where the inner list has one parameter
258 print '\n#######################################'
260 print '#######################################'
261 for tx_type in TX_TYPES:
262 print '***************************************'
263 print 'Transaction Type: %s' % tx_type
264 print '***************************************'
265 writer.writerow((('%s:' % tx_type), '', ''))
267 for fmt in DATA_FORMATS:
268 print '---------------------------------------'
269 print 'Data format: %s' % fmt
270 print '---------------------------------------'
271 writer.writerow(('', ('%s:' % fmt), ''))
273 for oper in OPERATIONS:
274 print 'Operation: %s' % oper
275 writer.writerow(('', '', '%s:' % oper))
277 for wtx in OPS_PER_TX:
278 avg_build_time, avg_exec_time = \
279 run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, TOTAL_ELEMENTS, 1, wtx)
280 writer.writerow(('', '', '', wtx, avg_build_time, avg_exec_time,
281 (avg_build_time + avg_exec_time)))
282 store_result(PLOT2, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, 1, wtx,
283 'BUILD', avg_build_time / TIME_DIV)
284 store_result(PLOT2, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, 1, wtx,
285 'EXEC', avg_exec_time / TIME_DIV)
287 write_results_to_file(PLOT1, 'perf_per_struct.csv', PLOT_FILTER)
288 write_results_to_file(PLOT2, 'perf_per_ops.csv', PLOT_FILTER)
290 end_time = time.time()
291 print "End time: %f " % end_time
292 print "Total execution time: %f" % (end_time - start_time)