|
| 1 | +import datetime |
| 2 | +import os |
| 3 | +import re |
| 4 | +import statistics |
| 5 | +import time |
| 6 | +from os import listdir |
| 7 | +from os.path import isfile, join |
| 8 | + |
| 9 | +FILE_ENDING_RESULT = '.result' |
| 10 | +FILE_ENDING_SUMMARIZED = '.summarized' |
| 11 | +FILE_ENDING_ANALYZED = '.analyzed' |
| 12 | +RESULTS_FOLDER = 'time_measurements' |
| 13 | + |
| 14 | + |
| 15 | +def timing(f): |
| 16 | + def wrap(*args): |
| 17 | + time1 = time.time() |
| 18 | + ret = f(*args) |
| 19 | + time2 = time.time() |
| 20 | + |
| 21 | + _write_time_measure_result_to_file(time1, time2) |
| 22 | + |
| 23 | + return ret |
| 24 | + |
| 25 | + def _write_time_measure_result_to_file(time1, time2): |
| 26 | + date = _get_timestamp(time2) |
| 27 | + result = '%s.%s, %0.3f' % (f.__module__, f.__qualname__, (time2 - time1) * 1000.0) |
| 28 | + _append_line_to_file(date + FILE_ENDING_RESULT, result) |
| 29 | + |
| 30 | + return wrap |
| 31 | + |
| 32 | + |
| 33 | +def _append_line_to_file(filename, result): |
| 34 | + with open(os.path.join(RESULTS_FOLDER, filename), 'a+', encoding='utf8') as file: |
| 35 | + file.write(result + '\n') |
| 36 | + |
| 37 | + |
| 38 | +def _get_timestamp(time2): |
| 39 | + return datetime.datetime.fromtimestamp(time2).strftime('%Y-%m-%d_%H:%M') |
| 40 | + |
| 41 | + |
| 42 | +def export_analyzed_results_as_csv(): |
| 43 | + results_map = _get_results_from_files() |
| 44 | + |
| 45 | + date = _get_timestamp(time.time()) |
| 46 | + csv_header = 'func, MAX, MIN, AVG, MED, STD, VAR' |
| 47 | + _append_line_to_file(date + FILE_ENDING_ANALYZED, csv_header) |
| 48 | + |
| 49 | + for key in sorted(results_map, key=lambda k: statistics.mean(results_map[k]), reverse=True): |
| 50 | + csv_line = key + ',' + \ |
| 51 | + str(max(results_map[key])) + ',' + \ |
| 52 | + str(min(results_map[key])) + ',' + \ |
| 53 | + str(statistics.mean(results_map[key])) + ',' + \ |
| 54 | + str(statistics.median(results_map[key])) + ',' + \ |
| 55 | + str(statistics.stdev(results_map[key])) + ',' + \ |
| 56 | + str(statistics.variance(results_map[key])) |
| 57 | + _append_line_to_file(date + FILE_ENDING_ANALYZED, csv_line) |
| 58 | + |
| 59 | + |
| 60 | +def export_results_as_csv(): |
| 61 | + results_map = _get_results_from_files() |
| 62 | + date = _get_timestamp(time.time()) |
| 63 | + |
| 64 | + for key in sorted(results_map, key=lambda k: statistics.mean(results_map[k]), reverse=True): |
| 65 | + values_as_csv = ",".join([str(v) for v in results_map[key]]) |
| 66 | + csv_line = key + ',' + values_as_csv |
| 67 | + _append_line_to_file(date + FILE_ENDING_SUMMARIZED, csv_line) |
| 68 | + |
| 69 | + |
| 70 | +def _get_results_from_files(): |
| 71 | + results_map = {} |
| 72 | + result_files = [f for f in listdir(RESULTS_FOLDER) |
| 73 | + if isfile(join(RESULTS_FOLDER, f)) and re.match(r'.*' + FILE_ENDING_RESULT, f)] |
| 74 | + |
| 75 | + for f in result_files: |
| 76 | + _read_results_from_file_to_map(f, results_map) |
| 77 | + |
| 78 | + return results_map |
| 79 | + |
| 80 | + |
| 81 | +def _read_results_from_file_to_map(f, results_map): |
| 82 | + file = open(os.path.join(RESULTS_FOLDER, f)) |
| 83 | + results = file.readlines() |
| 84 | + for r in results: |
| 85 | + _put_result_line_to_map(r, results_map) |
| 86 | + |
| 87 | + |
| 88 | +def _put_result_line_to_map(r, results_map): |
| 89 | + function_name, function_time = r.split(',') |
| 90 | + if function_name not in results_map.keys(): |
| 91 | + results_map[function_name] = [] |
| 92 | + results_map[function_name].append(float(function_time)) |
0 commit comments