2 __author__ = "Jan Medved"
3 __copyright__ = "Copyright(c) 2015, Cisco Systems, Inc."
4 __license__ = "New-style BSD"
5 __email__ = "jmedved@cisco.com"
13 parser = argparse.ArgumentParser(description='Datastore Benchmarking'
15 'See documentation @:'
16 'https://wiki.opendaylight.org/view/Controller_Core_Functionality_Tutorials:Tutorials:Data_Store_Benchmarking_and_Data_Access_Patterns' # noqa
20 parser.add_argument("--host", default="localhost", help="the IP of the target host to initiate benchmark testing on.")
21 parser.add_argument("--port", type=int, default=8181, help="the port number of target host.")
24 parser.add_argument("--txtype", choices=["TX-CHAINING", "SIMPLE-TX"], nargs='+', default=["TX-CHAINING", "SIMPLE-TX"],
25 help="list of the transaction types to execute.")
26 parser.add_argument("--total", type=int, default=100000, help="total number of elements to process.")
27 parser.add_argument("--inner", type=int, default=[1, 10, 100, 1000, 10000, 100000],
28 help="number of inner elements to process.")
29 parser.add_argument("--ops", type=int, default=[1, 10, 100, 1000, 10000, 100000],
30 help="number of operations per transaction.")
31 parser.add_argument("--optype", choices=["PUT", "MERGE", "DELETE", "READ"], nargs='+',
32 default=["PUT", "MERGE", "DELETE", "READ"], help="list of the types operations to execute.")
33 parser.add_argument("--format", choices=["BINDING-AWARE", "BINDING-INDEPENDENT"], nargs='+',
34 default=["BINDING-AWARE", "BINDING-INDEPENDENT"], help="list of data formats to execute.")
35 parser.add_argument("--warmup", type=int, default=10, help="number of warmup runs before official test runs")
36 parser.add_argument("--runs", type=int, default=10,
37 help="number of official test runs. Note: Reported results are based on these runs.")
38 args = parser.parse_args()
41 BASE_URL = "http://%s:%d/restconf/" % (args.host, args.port)
44 def send_clear_request():
46 Sends a clear request to the dsbenchmark app. A clear will clear the test-exec data store
47 and clear the 'test-executing' flag.
50 url = BASE_URL + "operations/dsbenchmark:cleanup-store"
52 r = requests.post(url, stream=False, auth=('admin', 'admin'))
56 def send_test_request(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx):
58 Sends a request to the dsbenchmark app to start a data store benchmark test run.
59 The dsbenchmark app will perform the requested benchmark test and return measured
61 :param operation: PUT, MERGE, DELETE or READ
62 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
63 :param outer_elem: Number of elements in the outer list
64 :param inner_elem: Number of elements in the inner list
65 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
68 url = BASE_URL + "operations/dsbenchmark:start-test"
69 postheaders = {'content-type': 'application/json', 'Accept': 'application/json'}
71 test_request_template = '''{
73 "transaction-type": "%s",
81 data = test_request_template % (tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
82 r = requests.post(url, data, headers=postheaders, stream=False, auth=('admin', 'admin'))
83 result = {u'http-status': r.status_code}
84 if r.status_code == 200:
85 result = dict(result.items() + json.loads(r.content)['output'].items())
87 print 'Error %s, %s' % (r.status_code, r.content)
91 def print_results(run_type, idx, res):
93 Prints results from a dsbenchmakr test run to console
94 :param run_type: String parameter that can be used to identify the type of the
95 test run (e.g. WARMUP or TEST)
96 :param idx: Index of the test run
97 :param res: Parsed json (disctionary) that was returned from a dsbenchmark
101 print '%s #%d: status: %s, listBuildTime %d, testExecTime %d, txOk %d, txError %d' % \
102 (run_type, idx, res[u'status'], res[u'listBuildTime'], res[u'execTime'], res[u'txOk'], res[u'txError'])
105 def run_test(warmup_runs, test_runs, tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx):
107 Execute a benchmark test. Performs the JVM 'wamrup' before the test, runs
108 the specified number of dsbenchmark test runs and computes the average time
109 for building the test data (a list of lists) and the average time for the
110 execution of the test.
111 :param warmup_runs: # of warmup runs
112 :param test_runs: # of test runs
113 :param operation: PUT, MERGE or DELETE
114 :param data_fmt: BINDING-AWARE or BINDING-INDEPENDENT
115 :param outer_elem: Number of elements in the outer list
116 :param inner_elem: Number of elements in the inner list
117 :param ops_per_tx: Number of operations (PUTs, MERGEs or DELETEs) on each transaction
118 :return: average build time AND average test execution time
120 total_build_time = 0.0
121 total_exec_time = 0.0
123 print 'Tx Type: {0:s}, Operation: {1:s}, Data Format: {2:s}, Outer/Inner Elements: {3:d}/{4:d}, PutsPerTx {5:d}' \
124 .format(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
125 for idx in range(warmup_runs):
126 res = send_test_request(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
127 print_results('WARMUP', idx, res)
129 for idx in range(test_runs):
130 res = send_test_request(tx_type, operation, data_fmt, outer_elem, inner_elem, ops_per_tx)
131 print_results('TEST', idx, res)
132 total_build_time += res['listBuildTime']
133 total_exec_time += res['execTime']
135 return total_build_time / test_runs, total_exec_time / test_runs
138 if __name__ == "__main__":
140 TX_TYPES = args.txtype
141 TOTAL_ELEMENTS = args.total
142 INNER_ELEMENTS = args.inner
143 OPS_PER_TX = args.ops
144 OPERATIONS = args.optype
145 DATA_FORMATS = args.format
148 WARMUP_RUNS = args.warmup
149 TEST_RUNS = args.runs
151 # Clean up any data that may be present in the data store
154 # Run the benchmark tests and collect data in a csv file for import into a graphing software
155 f = open('test.csv', 'wt')
157 start_time = time.time()
158 print "Start time: %f " % start_time
160 writer = csv.writer(f)
162 # Determine the impact of transaction type, data format and data structure on performance.
163 # Iterate over all transaction types, data formats, operation types, and different
164 # list-of-lists layouts; always use a single operation in each transaction
165 print '\n#######################################'
166 print 'Tx type, data format & data structure'
167 print '#######################################'
168 for tx_type in TX_TYPES:
169 print '***************************************'
170 print 'Transaction Type: %s' % tx_type
171 print '***************************************'
172 writer.writerow((('%s:' % tx_type), '', ''))
174 for fmt in DATA_FORMATS:
175 print '---------------------------------------'
176 print 'Data format: %s' % fmt
177 print '---------------------------------------'
178 writer.writerow(('', ('%s:' % fmt), ''))
180 for oper in OPERATIONS:
181 print 'Operation: %s' % oper
182 writer.writerow(('', '', '%s:' % oper))
184 for elem in INNER_ELEMENTS:
185 avg_build_time, avg_exec_time = \
186 run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, TOTAL_ELEMENTS / elem, elem, 1)
187 e_label = '%d/%d' % (TOTAL_ELEMENTS / elem, elem)
188 writer.writerow(('', '', '', e_label, avg_build_time, avg_exec_time,
189 (avg_build_time + avg_exec_time)))
191 # Determine the impact of number of writes per transaction on performance.
192 # Iterate over all transaction types, data formats, operation types, and
193 # operations-per-transaction; always use a list of lists where the inner list has one parameter
194 print '\n#######################################'
196 print '#######################################'
197 for tx_type in TX_TYPES:
198 print '***************************************'
199 print 'Transaction Type: %s' % tx_type
200 print '***************************************'
201 writer.writerow((('%s:' % tx_type), '', ''))
203 for fmt in DATA_FORMATS:
204 print '---------------------------------------'
205 print 'Data format: %s' % fmt
206 print '---------------------------------------'
207 writer.writerow(('', ('%s:' % fmt), ''))
209 for oper in OPERATIONS:
210 print 'Operation: %s' % oper
211 writer.writerow(('', '', '%s:' % oper))
213 for wtx in OPS_PER_TX:
214 avg_build_time, avg_exec_time = \
215 run_test(WARMUP_RUNS, TEST_RUNS, tx_type, oper, fmt, TOTAL_ELEMENTS, 1, wtx)
216 writer.writerow(('', '', '', wtx, avg_build_time, avg_exec_time,
217 (avg_build_time + avg_exec_time)))
219 end_time = time.time()
220 print "End time: %f " % end_time
221 print "Total execution time: %f" % (end_time - start_time)