|
1 | 1 | import math
|
2 | 2 | import json
|
3 | 3 | import pycurl
|
4 |
| -import sys |
5 | 4 |
|
6 |
| -from . import tests |
7 |
| -from .tests import Test |
8 |
| -from . import parsing |
9 |
| -from .parsing import * |
| 5 | +from constants import DEFAULT_TIMEOUT |
| 6 | +from tests import Test, coerce_to_string |
| 7 | +from parsing import * |
10 | 8 |
|
11 |
| -# Python 2/3 switches |
12 |
| -if sys.version_info[0] > 2: |
13 |
| - from past.builtins import basestring |
14 | 9 |
|
15 |
| -# Python 3 compatibility shims |
16 |
| -from . import six |
17 |
| -from .six import binary_type |
18 |
| -from .six import text_type |
19 | 10 |
|
20 | 11 | """
|
21 | 12 | Encapsulates logic related to benchmarking
|
@@ -109,10 +100,7 @@ def std_deviation(array):
|
109 | 100 |
|
110 | 101 | average = AGGREGATES['mean_arithmetic'](array)
|
111 | 102 | variance = map(lambda x: (x - average)**2, array)
|
112 |
| - try: |
113 |
| - len(variance) |
114 |
| - except TypeError: # Python 3.3 workaround until can use the statistics module from 3.4 |
115 |
| - variance = list(variance) |
| 103 | + variance = list(variance) |
116 | 104 | stdev = AGGREGATES['mean_arithmetic'](variance)
|
117 | 105 | return math.sqrt(stdev)
|
118 | 106 |
|
@@ -193,13 +181,9 @@ def realize_partial(self, context=None):
|
193 | 181 | # Enhanceme - once extract is done, check if variables already bound,
|
194 | 182 | # in that case template out
|
195 | 183 | return self
|
196 |
| - else: |
197 |
| - copyout = copy.cop |
198 |
| - |
199 |
| - pass |
200 | 184 |
|
201 | 185 |
|
202 |
| -def configure_curl(self, timeout=tests.DEFAULT_TIMEOUT, context=None, curl_handle=None): |
| 186 | +def configure_curl(self, timeout=DEFAULT_TIMEOUT, context=None, curl_handle=None): |
203 | 187 | curl = super().configure_curl(self, timeout=timeout,
|
204 | 188 | context=context, curl_handle=curl_handle)
|
205 | 189 | # Simulate results from different users hitting server
|
@@ -229,42 +213,42 @@ def parse_benchmark(base_url, node):
|
229 | 213 | else:
|
230 | 214 | raise ValueError('Invalid benchmark output format: ' + format)
|
231 | 215 | elif key == u'output_file':
|
232 |
| - if not isinstance(value, basestring): |
| 216 | + if not isinstance(value, str): |
233 | 217 | raise ValueError("Invalid output file format")
|
234 | 218 | benchmark.output_file = value
|
235 | 219 | elif key == u'metrics':
|
236 |
| - if isinstance(value, basestring): |
| 220 | + if isinstance(value, str): |
237 | 221 | # Single value
|
238 |
| - benchmark.add_metric(tests.coerce_to_string(value)) |
| 222 | + benchmark.add_metric(coerce_to_string(value)) |
239 | 223 | # FIXME refactor the parsing of metrics here, lots of duplicated logic
|
240 | 224 | elif isinstance(value, list) or isinstance(value, set):
|
241 | 225 | # List of single values or list of {metric:aggregate, ...}
|
242 | 226 | for metric in value:
|
243 | 227 | if isinstance(metric, dict):
|
244 | 228 | for metricname, aggregate in metric.items():
|
245 |
| - if not isinstance(metricname, basestring): |
| 229 | + if not isinstance(metricname, str): |
246 | 230 | raise TypeError(
|
247 | 231 | "Invalid metric input: non-string metric name")
|
248 |
| - if not isinstance(aggregate, basestring): |
| 232 | + if not isinstance(aggregate, str): |
249 | 233 | raise TypeError(
|
250 | 234 | "Invalid aggregate input: non-string aggregate name")
|
251 | 235 | # TODO unicode-safe this
|
252 |
| - benchmark.add_metric(tests.coerce_to_string(metricname), |
253 |
| - tests.coerce_to_string(aggregate)) |
| 236 | + benchmark.add_metric(coerce_to_string(metricname), |
| 237 | + coerce_to_string(aggregate)) |
254 | 238 |
|
255 |
| - elif isinstance(metric, basestring): |
256 |
| - benchmark.add_metric(tests.coerce_to_string(metric)) |
| 239 | + elif isinstance(metric, str): |
| 240 | + benchmark.add_metric(coerce_to_string(metric)) |
257 | 241 | elif isinstance(value, dict):
|
258 | 242 | # Dictionary of metric-aggregate pairs
|
259 | 243 | for metricname, aggregate in value.items():
|
260 |
| - if not isinstance(metricname, basestring): |
| 244 | + if not isinstance(metricname, str): |
261 | 245 | raise TypeError(
|
262 | 246 | "Invalid metric input: non-string metric name")
|
263 |
| - if not isinstance(aggregate, basestring): |
| 247 | + if not isinstance(aggregate, str): |
264 | 248 | raise TypeError(
|
265 | 249 | "Invalid aggregate input: non-string aggregate name")
|
266 |
| - benchmark.add_metric(tests.coerce_to_string(metricname), |
267 |
| - tests.coerce_to_string(aggregate)) |
| 250 | + benchmark.add_metric(coerce_to_string(metricname), |
| 251 | + coerce_to_string(aggregate)) |
268 | 252 | else:
|
269 | 253 | raise TypeError(
|
270 | 254 | "Invalid benchmark metric datatype: " + str(value))
|
|
0 commit comments