-
Notifications
You must be signed in to change notification settings - Fork 139
/
Copy pathbenchrun.py
306 lines (274 loc) · 13.8 KB
/
benchrun.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
#!/usr/bin/env python
import json
import os
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from subprocess import PIPE, Popen, check_call
from tempfile import NamedTemporaryFile
from tabulate import tabulate
class MongoShellCommandError(Exception):
""" Raised when the mongo shell comes back with an unexpected error
"""
def parse_arguments():
usage = "python benchrun.py -f <list of test files> -t <list of thread counts>\n run with --help for argument descriptions"
parser = ArgumentParser(description="mongo-perf micro-benchmark utility", usage=usage, formatter_class=RawTextHelpFormatter)
parser.add_argument('-f', '--testfiles', dest='testfiles', nargs="+",
help='Provide a list of js test files to run',
default=None)
parser.add_argument('-t', '--threads', dest='threads', nargs="+",
help='Specify which thread configuration to use',
type=int, default=[1, 2, 4, 8, 12, 16])
parser.add_argument('-m', '--multidb', dest='multidb',
help='Specify how many databases the test should use',
type=int, default=1)
parser.add_argument('-c', '--multicoll', dest='multicoll',
help='Specify how many collections the test should use',
type=int, default=1)
parser.add_argument('--trialTime', dest='seconds',
help='Specify how many seconds to run each trial',
type=int, default=5)
parser.add_argument('--trialCount', dest='trials',
help='Specify how many trials to run',
type=int, default=1)
parser.add_argument('--host', dest='hostname',
help='hostname of the mongod/mongos under test',
default='localhost')
parser.add_argument('--port', dest='port',
help='Port of the mongod/mongos under test',
default='27017')
parser.add_argument('--replset', dest='replica_set',
help='replica set name of the mongod/mongos under test',
default=None)
parser.add_argument('-u', '--username', dest='username',
help='username to use for mongodb authentication',
default=None)
parser.add_argument('-p', '--password', dest='password',
help='password to use for mongodb authentication',
default=None)
parser.add_argument('--shard', dest='shard',
help='Specify shard cluster the test should use, 0 - no shard, 1 - shard with {_id: hashed}, 2 - shard with {_id: 1}',
type=int, default=0, choices=[0, 1, 2])
parser.add_argument('-s', '--shell', dest='shellpath',
help="Path to the mongo shell executable to use.",
default='mongo')
parser.add_argument('-w', dest='w',
help='w write concern',
type=int, default=0)
parser.add_argument('-j', dest='j',
nargs='?', const='true', choices=['true', 'false'],
help='this option turns on the j write concern',
default='false')
parser.add_argument('--writeCmd', dest='writeCmd',
nargs='?', const='true', choices=['true', 'false'],
help='this option turns on use of the write commands instead of legacy write operations',
default='true')
parser.add_argument('--readCmd', dest='readCmd',
nargs='?', const='true', choices=['true', 'false'],
help='this option turns on use of the read commands instead of legacy read operations',
default='false')
parser.add_argument('--includeFilter', dest='includeFilter', nargs='+', action="append",
help="Run just the specified tests/suites. Can specify multiple tags per --includeFilter\n"
"flag. All tests/suites matching any of the tags will be run.\n"
"Can specify multiple --includeFilter flags on the command line. A test\n"
"must match all the --includeFilter clauses in order to be run.\n\n"
"Ex 1: --includeFilter insert remove --includeFilter core \n"
" will run all tests tagged with (\"insert\" OR \"remove\") AND (\"core\").\n"
"Ex 2: --includeFilter %%\n"
" will run all tests",
default=[])
parser.add_argument('--excludeFilter', dest='excludeFilter', nargs='+', action="append",
help="Exclude tests matching all of the tags included.\n"
"Can specify multiple --excludeFilter flags on the command line. A test\n"
"matching any --excludeFilter clauses will not be run.\n"
"A test that is both included according to --includeFilter and excluded by --excludeFilter,\n"
"will not be run.\n\n"
"Ex: --excludeFilter slow old --excludeFilter broken \n"
" will exclude all tests tagged with (\"slow\" AND \"old\") OR (\"broken\").",
default=[])
parser.add_argument('--out', dest='outfile',
help='write the results as json to the specified file')
parser.add_argument('--exclude-testbed', dest='excludeTestbed', nargs='?', const='true',
choices=['true','false'], default='false',
help='Exclude testbed information from results file')
parser.add_argument('--printArgs', dest='printArgs', nargs='?', const='true',
choices=['true','false'], default='false',
help='Print the benchrun args before running the test.')
parser.add_argument('--generateMongoeBenchConfigFiles', dest='mongoebench_config_dir',
help='Changes the behavior of this script to write JSON config files\n'
'equivalent to the operations performed in the list of specified JS test\n'
'files without actually running the test cases. A mongod process must\n'
'still be running while the JSON config files are being generated.')
parser.add_argument('--shareDataset', dest='shareDataset', nargs='?', const='true',
choices=['true','false'], default='false',
help='Share the dataset, created by the first test with all following tests/trials.')
parser.add_argument('--variantName', dest='variantName', nargs="?",
help='The variant name defined in mongod',
type=str, default=None)
parser.add_argument('--variants', dest='variants', nargs="+",
help='Compare perf for different variants',
type=int, default=[])
parser.add_argument('--tsvSummary', dest='tsvSummary', nargs="?",
help='Print a TSV format summary at the end',
choices=[True, False], type=bool, default=False)
return parser
def print_summary(results_parsed):
table =[]
for result in results_parsed["results"]:
name = result["name"]
variant = result.get("variant", "")
for thread, values in result["results"].items():
if isinstance(values, dict):
table.append([name, variant, thread, values['ops_per_sec'],
values['ops_per_sec_median'], len(values['ops_per_sec_values']),
values['ops_per_sec_stdev']])
print(tabulate(table, headers=["name", "variant", "thread_count", "ops_per_sec(mean)",
"ops_per_sec(median)", "count", "stdev"], floatfmt=".4f"))
def main():
parser = parse_arguments()
args = parser.parse_args()
if not args.testfiles:
print("Must provide at least one test file."
" Run with --help for details.")
sys.exit(1)
for testfile in args.testfiles:
if not os.path.exists(testfile):
print(("A test file that was passed in does not exist: %s"
% testfile))
sys.exit(1)
if args.multidb < 1:
print("MultiDB option must be greater than zero. Will be set to 1.")
args.multidb = 1
if args.multicoll < 1:
print("MultiCollection option must be greater than zero."
" Will be set to 1.")
args.multicoll = 1
if args.shard < 0:
print("shard option must be [0, 2]. Will be set to 0.")
args.shard = 0
elif args.shard > 2:
print("shard option must be [0, 2] . Will be set to 2.")
args.shard = 2
auth = []
using_auth = False
if isinstance(args.username, str) and isinstance(args.password, str):
auth = ["-u", args.username, "-p", args.password, "--authenticationDatabase", "admin"]
using_auth = True
elif isinstance(args.username, str) or isinstance(args.password, str):
print("Warning: You specified one of username or password, but not the other.")
print(" Benchrun will continue without authentication.")
if args.includeFilter == [] :
args.includeFilter = '%'
elif len(args.includeFilter) == 1 :
args.includeFilter = args.includeFilter[0]
if args.includeFilter == ['%'] :
args.includeFilter = '%'
if args.username:
auth = ["-u", args.username, "-p", args.password, "--authenticationDatabase", "admin"]
else:
auth = []
if args.variantName is not None and not args.variants:
print("Variants nums are not specified.")
sys.exit(1)
check_call([args.shellpath, "--norc",
"--host", args.hostname, "--port", args.port,
"--eval", "print('db version: ' + db.version());"
" db.serverBuildInfo().gitVersion;"] + auth)
print("")
commands = []
# load test files
for testfile in ['util/utils.js', 'util/docGenerators.js'] + args.testfiles:
if not os.path.exists(testfile):
raise MongoShellCommandError("test file %s doesn't exist" % testfile)
commands.append("load('%s');" % testfile)
# put all crud options in a Map
crud_options = {}
crud_options["writeConcern"] = {}
if (args.j):
crud_options["writeConcern"]["j"] = args.j
if (args.w):
crud_options["writeConcern"]["w"] = args.w
crud_options["writeCmdMode"] = args.writeCmd
crud_options["readCmdMode"] = args.readCmd
mongoebench_options = {"traceOnly": False}
if args.mongoebench_config_dir is not None:
mongoebench_options["directory"] = os.path.abspath(args.mongoebench_config_dir)
mongoebench_options["traceOnly"] = True
try:
os.makedirs(args.mongoebench_config_dir)
except OSError:
# The directory already exists.
pass
authstr = ""
if using_auth:
authstr = ", '" + args.username + "', '" + args.password + "'"
variant_name_str = "null"
if args.variantName:
variant_name_str = "'" + args.variantName + "'"
commands.append("mongoPerfRunTests(" +
str(args.threads) + ", " +
str(args.multidb) + ", " +
str(args.multicoll) + ", " +
str(args.seconds) + ", " +
str(args.trials) + ", " +
str(json.dumps(args.includeFilter)) + ", " +
str(json.dumps(args.excludeFilter)) + ", " +
str(args.shard) + ", " +
str(json.dumps(crud_options)) + ", " +
str(args.excludeTestbed) + ", " +
str(args.printArgs) + ", " +
str(args.shareDataset) + ", " +
variant_name_str + ", " +
str(args.variants) + ", " +
str(json.dumps(mongoebench_options)) +
authstr +
");")
commands = '\n'.join(commands)
print(commands)
with NamedTemporaryFile('w', suffix='.js') as js_file:
js_file.write(commands)
js_file.flush()
# Open a mongo shell subprocess and load necessary files.
mongo_proc = Popen([args.shellpath, "--norc", "--quiet", js_file.name,
"--host", args.hostname, "--port", args.port] + auth,
stdout=PIPE, text=True)
# Read test output.
readout = False
getting_results = False
got_results = False
line_results = ""
for line in iter(mongo_proc.stdout.readline, ''):
line = line.strip()
if line == "@@@START@@@":
readout = True
getting_results = False
elif line == "@@@END@@@":
readout = False
getting_results = False
elif line == "@@@RESULTS_START@@@":
readout = False
getting_results = True
elif line == "@@@RESULTS_END@@@":
readout = False
got_results = True
getting_results = False
elif readout:
print(line)
elif not got_results and getting_results:
line_results += line
print("Finished Testing.")
results_parsed = json.loads(line_results)
if args.outfile:
out = open(args.outfile, 'w')
json.dump(results_parsed, out, indent=4, separators=(',', ': '))
out.close()
else:
print(json.dumps(results_parsed, indent=4, separators=(',', ': ')))
if args.tsvSummary:
print_summary(results_parsed)
if __name__ == '__main__':
try:
main()
except Exception as e:
sys.stderr.write('Error: %s\n' % e)
sys.exit(1)
sys.exit(0)