|
4 | 4 | import datetime
|
5 | 5 | import os
|
6 | 6 | import sys
|
| 7 | +import csv |
7 | 8 | from unittest import TestLoader
|
8 | 9 |
|
9 | 10 | # Abort if config file is not specified.
|
|
14 | 15 | sys.exit(0)
|
15 | 16 |
|
16 | 17 | n = 1
|
| 18 | +url = None |
17 | 19 | parser = configparser.RawConfigParser()
|
18 | 20 | try:
|
19 | 21 | parser.read(config)
|
|
24 | 26 | else:
|
25 | 27 | print('No test directory specified in your config file. Please do so.')
|
26 | 28 | sys.exit(0)
|
| 29 | + if not parser.has_option('dashboard', 'LOG_DIR'): |
| 30 | + print('No log directory specified in your config file. Please do so.') |
| 31 | + sys.exit(0) |
27 | 32 | if parser.has_option('dashboard', 'SUBMIT_RESULTS_URL'):
|
28 | 33 | url = parser.get('dashboard', 'SUBMIT_RESULTS_URL')
|
29 | 34 | else:
|
30 | 35 | print('No url specified in your config file for submitting test results. Please do so.')
|
31 |
| - sys.exit(0) |
32 |
| -except configparser.Error: |
33 |
| - raise |
| 36 | +except configparser.Error as e: |
| 37 | + print("Something went wrong while parsing the configuration file:\n{}".format(e)) |
34 | 38 |
|
35 |
| -data = {'test_runs': []} |
| 39 | +data = {'test_runs': [], 'grouped_tests': []} |
| 40 | +log = open("test_runs.log", "w") |
| 41 | +log.write("\"start_time\",\"stop_time\",\"test_name\"\n") |
36 | 42 |
|
37 | 43 | if test_dir:
|
38 | 44 | suites = TestLoader().discover(test_dir, pattern="*test*.py")
|
|
41 | 47 | for case in suite:
|
42 | 48 | for test in case:
|
43 | 49 | result = None
|
| 50 | + t1 = str(datetime.datetime.now()) |
44 | 51 | time1 = time.time()
|
45 | 52 | result = test.run(result)
|
46 | 53 | time2 = time.time()
|
| 54 | + t2 = str(datetime.datetime.now()) |
| 55 | + log.write("\"{}\",\"{}\",\"{}\"\n".format(t1, t2, str(test))) |
47 | 56 | t = (time2 - time1) * 1000
|
48 | 57 | data['test_runs'].append({'name': str(test), 'exec_time': t, 'time': str(datetime.datetime.now()),
|
49 | 58 | 'successful': result.wasSuccessful(), 'iter': i + 1})
|
50 | 59 |
|
51 |
| -# Try to send test results to the dashboard |
52 |
| -try: |
53 |
| - requests.post(url, json=data) |
54 |
| - print('Sent unit test results to the dashboard.') |
55 |
| -except: |
56 |
| - print('Sending unit test results to the dashboard failed.') |
57 |
| - raise |
| 60 | +log.close() |
| 61 | + |
| 62 | +# Read and parse the log containing the test runs |
| 63 | +runs = [] |
| 64 | +with open('test_runs.log') as log: |
| 65 | + reader = csv.DictReader(log) |
| 66 | + for row in reader: |
| 67 | + runs.append([datetime.datetime.strptime(row["start_time"], "%Y-%m-%d %H:%M:%S.%f"), |
| 68 | + datetime.datetime.strptime(row["stop_time"], "%Y-%m-%d %H:%M:%S.%f"), |
| 69 | + row['test_name']]) |
| 70 | + |
| 71 | +# Read and parse the log containing the endpoint hits |
| 72 | +hits = [] |
| 73 | +with open('endpoint_hits.log') as log: |
| 74 | + reader = csv.DictReader(log) |
| 75 | + for row in reader: |
| 76 | + hits.append([datetime.datetime.strptime(row["time"], "%Y-%m-%d %H:%M:%S.%f"), |
| 77 | + row['endpoint']]) |
| 78 | + |
| 79 | +# Analyze logs to find out which endpoints are hit by which unit tests |
| 80 | +for h in hits: |
| 81 | + for r in runs: |
| 82 | + if r[0] <= h[0] <= r[1]: |
| 83 | + if {'endpoint': h[1], 'test_name': r[2]} not in data['grouped_tests']: |
| 84 | + data['grouped_tests'].append({'endpoint': h[1], 'test_name': r[2]}) |
| 85 | + break |
| 86 | + |
| 87 | +# Try to send test results and endpoint-grouped unit tests to the dashboard |
| 88 | +if url: |
| 89 | + try: |
| 90 | + requests.post(url, json=data) |
| 91 | + print('Sent unit test results to the dashboard.') |
| 92 | + except Exception as e: |
| 93 | + print('Sending unit test results to the dashboard failed:\n{}'.format(e)) |
0 commit comments