-
Notifications
You must be signed in to change notification settings - Fork 62
/
Copy pathbenchmark2xml.py
executable file
·155 lines (128 loc) · 6.13 KB
/
benchmark2xml.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
#!/usr/bin/env python2.7
"""
Copyright 2018-Present Couchbase, Inc.
Use of this software is governed by the Business Source License included in
the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that
file, in accordance with the Business Source License, use of this software will
be governed by the Apache License, Version 2.0, included in the file
licenses/APL2.txt.
This script takes the JSON output from a Google Benchmark testsuite and
produces an XML version of the same report, which we can use for the CBNT
performance test system.
Usage: python benchmark2xml.py -f some_test.json -o test-detail.xml -t ms -c
"""
import optparse
import sys
import os
import json
import collections
def main():
parser = optparse.OptionParser()
required_args = optparse.OptionGroup(parser, 'Required Arguments')
required_args.add_option('-b', '--benchmark_file', action='store',
type='string', dest='input_file',
help='The input file from Google Benchmark')
required_args.add_option('-o', '--output_file', action='store',
type='string', dest='output_file',
help='The file to output the generated XML to')
required_args.add_option('-c', '--cbnt_metrics', action='store',
dest='cbnt_metrics', type='string',
help='String that specifies the list of metrics '
'to be tracked, comma separated. That can '
'be any of the metrics measured in a bench '
'(eg, GBench real_time\'/\'cpu_time\', or '
'any user metric.)')
parser.add_option_group(required_args)
optional_args = optparse.OptionGroup(parser, 'Optional Arguments')
optional_args.add_option('-s', '--separator', action='store',
dest='separator', type='string', default='/',
help='The separator character used in the test '
'name')
optional_args.add_option('-i', '--in_place', action='store_true',
dest='consume', default=False,
help='Edit the input file in place. '
'(Note, destroys the original file)')
optional_args.add_option('-n', '--name', action='store', dest='suite_name',
type='string', default="",
help='An optional string which gets added to the '
'start of each test suite name. For example'
'"Logger/".')
parser.add_option_group(optional_args)
(options, args) = parser.parse_args()
# Check no arguments were passed to the script,
# everything is done through options parsing
if len(args) != 0:
print('benchmark2xml does not take any direct arguments')
parser.print_help()
sys.exit(-2)
# Check that all options have a setting, even the
# optional ones. Optional args should all have default values.
for option in options.__dict__:
if options.__dict__[option] is None:
print('Some required arguments were not set')
parser.print_help()
sys.exit(-2)
# Open the specified input file
try:
input_file = open(options.input_file.strip(), 'r')
except IOError as e:
print('Input file does not exist or cannot be opened:\n\t {}'.
format(e))
sys.exit(-1)
# Load the json data from the file so we can use it.
# If we encounter an error, then exit.
try:
json_data = json.load(input_file)
except Exception as e:
print('Failed to load JSON data from input file:\n\t {}'.format(e))
sys.exit(-1)
input_file.close()
timestamp = json_data['context']['date'].replace(' ', 'T')
# Get the base names of the test suite
test_suites = collections.defaultdict(list)
for suite in json_data['benchmarks']:
name = suite['name'].split(options.separator.strip())[0]
test_suites[name].append(suite)
# If we are consuming the input file, delete it
if options.consume:
try:
os.remove(options.input_file.strip())
except Exception as e:
print('Failed to remove the input file:\n\t {}'.format(e))
sys.exit(-1)
# Create the output file, if we encounter an error then exit
try:
output_file = open(options.output_file.strip(), 'w')
except IOError as e:
print('Output file could not be created:\n\t{}'.format(e))
sys.exit(-1)
# Write the XML data to the output file in the format used within CBNT.
testcase_string = ' <testcase name="{}" time="%f" classname="{}{}"/>\n'
output_file.write('<testsuites timestamp="{}">\n'.format(timestamp))
metrics = options.cbnt_metrics.split(',')
for test_suite in test_suites:
output_file.write(' <testsuite name="{}{}">\n'.
format(options.suite_name, test_suite))
for test in test_suites[test_suite]:
base_name = options.separator.join(
test['name'].split(options.separator.strip())[1:])
if not base_name:
base_name = test['name']
for metric in metrics:
if metric not in test:
print('Metric \'{}\' not found for test {}'.format(
metric, test))
sys.exit(-1)
test_name = options.separator.join([base_name,
metric])
# Note: The sample goes in a field called 'time' in the xml
# only because that is the CBNT way of naming things.
sample = test[metric]
output_file.write(
testcase_string.format(test_name, options.suite_name,
test_suite) % sample)
output_file.write(' </testsuite>\n')
output_file.write('</testsuites>\n')
output_file.close()
if __name__ == '__main__':
main()