Skip to content

Commit dac31e5

Browse files
committed
Merge branch 'development'
2 parents 13c343c + ec46959 commit dac31e5

23 files changed

+256
-101
lines changed

README.md

+8-8
Original file line numberDiff line numberDiff line change
@@ -49,14 +49,15 @@ The following things can be configured:
4949
GUEST_USERNAME=guest
5050
GUEST_PASSWORD=['dashboardguest!', 'second_pw!']
5151
DATABASE=sqlite:////<path to your project>/dashboard.db
52-
GIT=/<path to your project>/dashboard/.git/
53-
TEST_DIR=/<path to your project>/dashboard/tests/
52+
GIT=/<path to your project>/.git/
53+
TEST_DIR=/<path to your project>/tests/
54+
LOG_DIR=/<path to your project>/
5455
N=5
5556
SUBMIT_RESULTS_URL=http://0.0.0.0:5000/dashboard/submit-test-results
5657
OUTLIER_DETECTION_CONSTANT=2.5
5758
COLORS={'main':[0,97,255], 'static':[255,153,0]}
5859

59-
For more information: [see this file](dashboard/config.py)
60+
For more information, please refer to [this file](dashboard/config.py)
6061

6162
When running your app, the dashboard van be viewed by default in the route:
6263

@@ -68,13 +69,12 @@ To enable Travis to run your unit tests and send the results to the dashboard, f
6869

6970
First off, the file 'collect_performance.py' (which comes with the dashboard) should be copied to the directory where your '.travis.yml' file resides.
7071

71-
Secondly, your config file for the dashboard ('config.cfg') should be updated to include three additional values, TEST_DIR, SUBMIT_RESULTS_URL and N.
72-
The first specifies where your unit tests reside, the second where Travis should upload the test results to, and the third specifies the number of times Travis should run each unit test.
73-
See the sample config file in the section above for more details.
72+
Secondly, your config file for the dashboard ('config.cfg') should be updated to include four additional values, TEST_DIR, LOG_DIR, SUBMIT_RESULTS_URL and N.
73+
The first specifies where your unit tests reside, the second where the logs should be placed, the third where Travis should upload the test results to, and the last specifies the number of times Travis should run each unit test.
74+
See the sample config file in the section above for an example.
7475

75-
Then, a dependency link to the dashboard has to be added to the 'setup.py' file of your app:
76+
Then, the installation requirement for the dashboard has to be added to the 'setup.py' file of your app:
7677

77-
dependency_links=["git+https://github.com/mircealungu/automatic-monitoring-dashboard.git#egg=flask_monitoring_dashboard"],
7878
install_requires=('flask_monitoring_dashboard')
7979

8080
Lastly, in your '.travis.yml' file, two script commands should be added:

dashboard/collect_performance.py

+47-11
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import datetime
55
import os
66
import sys
7+
import csv
78
from unittest import TestLoader
89

910
# Abort if config file is not specified.
@@ -14,6 +15,7 @@
1415
sys.exit(0)
1516

1617
n = 1
18+
url = None
1719
parser = configparser.RawConfigParser()
1820
try:
1921
parser.read(config)
@@ -24,15 +26,19 @@
2426
else:
2527
print('No test directory specified in your config file. Please do so.')
2628
sys.exit(0)
29+
if not parser.has_option('dashboard', 'LOG_DIR'):
30+
print('No log directory specified in your config file. Please do so.')
31+
sys.exit(0)
2732
if parser.has_option('dashboard', 'SUBMIT_RESULTS_URL'):
2833
url = parser.get('dashboard', 'SUBMIT_RESULTS_URL')
2934
else:
3035
print('No url specified in your config file for submitting test results. Please do so.')
31-
sys.exit(0)
32-
except configparser.Error:
33-
raise
36+
except configparser.Error as e:
37+
print("Something went wrong while parsing the configuration file:\n{}".format(e))
3438

35-
data = {'test_runs': []}
39+
data = {'test_runs': [], 'grouped_tests': []}
40+
log = open("test_runs.log", "w")
41+
log.write("\"start_time\",\"stop_time\",\"test_name\"\n")
3642

3743
if test_dir:
3844
suites = TestLoader().discover(test_dir, pattern="*test*.py")
@@ -41,17 +47,47 @@
4147
for case in suite:
4248
for test in case:
4349
result = None
50+
t1 = str(datetime.datetime.now())
4451
time1 = time.time()
4552
result = test.run(result)
4653
time2 = time.time()
54+
t2 = str(datetime.datetime.now())
55+
log.write("\"{}\",\"{}\",\"{}\"\n".format(t1, t2, str(test)))
4756
t = (time2 - time1) * 1000
4857
data['test_runs'].append({'name': str(test), 'exec_time': t, 'time': str(datetime.datetime.now()),
4958
'successful': result.wasSuccessful(), 'iter': i + 1})
5059

51-
# Try to send test results to the dashboard
52-
try:
53-
requests.post(url, json=data)
54-
print('Sent unit test results to the dashboard.')
55-
except:
56-
print('Sending unit test results to the dashboard failed.')
57-
raise
60+
log.close()
61+
62+
# Read and parse the log containing the test runs
63+
runs = []
64+
with open('test_runs.log') as log:
65+
reader = csv.DictReader(log)
66+
for row in reader:
67+
runs.append([datetime.datetime.strptime(row["start_time"], "%Y-%m-%d %H:%M:%S.%f"),
68+
datetime.datetime.strptime(row["stop_time"], "%Y-%m-%d %H:%M:%S.%f"),
69+
row['test_name']])
70+
71+
# Read and parse the log containing the endpoint hits
72+
hits = []
73+
with open('endpoint_hits.log') as log:
74+
reader = csv.DictReader(log)
75+
for row in reader:
76+
hits.append([datetime.datetime.strptime(row["time"], "%Y-%m-%d %H:%M:%S.%f"),
77+
row['endpoint']])
78+
79+
# Analyze logs to find out which endpoints are hit by which unit tests
80+
for h in hits:
81+
for r in runs:
82+
if r[0] <= h[0] <= r[1]:
83+
if {'endpoint': h[1], 'test_name': r[2]} not in data['grouped_tests']:
84+
data['grouped_tests'].append({'endpoint': h[1], 'test_name': r[2]})
85+
break
86+
87+
# Try to send test results and endpoint-grouped unit tests to the dashboard
88+
if url:
89+
try:
90+
requests.post(url, json=data)
91+
print('Sent unit test results to the dashboard.')
92+
except Exception as e:
93+
print('Sending unit test results to the dashboard failed:\n{}'.format(e))

dashboard/colors.py

-1
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,3 @@ def get_color(hash):
1414
else:
1515
rgb = ColorHash(hash).rgb
1616
return 'rgb({0}, {1}, {2})'.format(rgb[0], rgb[1], rgb[2])
17-

dashboard/config.py

+12-1
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ def __init__(self):
2323
self.guest_password = ['guest_password']
2424
self.outlier_detection_constant = 2.5
2525
self.colors = {}
26+
self.log_dir = None
2627

2728
# define a custom function to retrieve the session_id or username
2829
self.get_group_by = None
@@ -56,6 +57,10 @@ def from_file(self, config_file):
5657
:param config_file: a string pointing to the location of the config-file
5758
"""
5859

60+
config = os.getenv('DASHBOARD_CONFIG')
61+
if config:
62+
config_file = config
63+
5964
parser = configparser.RawConfigParser()
6065
try:
6166
parser.read(config_file)
@@ -67,6 +72,11 @@ def from_file(self, config_file):
6772
self.database_name = parser.get('dashboard', 'DATABASE')
6873
if parser.has_option('dashboard', 'TEST_DIR'):
6974
self.test_dir = parser.get('dashboard', 'TEST_DIR')
75+
if parser.has_option('dashboard', 'LOG_DIR'):
76+
self.log_dir = parser.get('dashboard', 'LOG_DIR')
77+
log = open(self.log_dir + "endpoint_hits.log", "w")
78+
log.write("\"time\",\"endpoint\"\n")
79+
log.close()
7080

7181
# For manually defining colors of specific endpoints
7282
if parser.has_option('dashboard', 'COLORS'):
@@ -101,6 +111,7 @@ def from_file(self, config_file):
101111

102112
# when an outlier detection constant has been set up:
103113
if parser.has_option('dashboard', 'OUTLIER_DETECTION_CONSTANT'):
104-
self.outlier_detection_constant = parser.get('dashboard', 'OUTLIER_DETECTION_CONSTANT')
114+
self.outlier_detection_constant = ast.literal_eval(
115+
parser.get('dashboard', 'OUTLIER_DETECTION_CONSTANT'))
105116
except configparser.Error:
106117
raise

dashboard/database/__init__.py

+9
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,15 @@ class Outlier(Base):
102102
time = Column(DateTime)
103103

104104

105+
class TestsGrouped(Base):
106+
""" Table for storing grouped tests on endpoints. """
107+
__tablename__ = 'testsGrouped'
108+
# Name of the endpoint
109+
endpoint = Column(String(250), primary_key=True)
110+
# Name of the unit test
111+
test_name = Column(String(250), primary_key=True)
112+
113+
105114
# define the database
106115
engine = create_engine(config.database_name)
107116

dashboard/database/endpoint.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,8 @@ def get_monitor_rule(endpoint):
8686
return result
8787
except NoResultFound:
8888
with session_scope() as db_session:
89-
db_session.add(MonitorRule(endpoint=endpoint, version_added=config.version, time_added=datetime.datetime.now()))
89+
db_session.add(
90+
MonitorRule(endpoint=endpoint, version_added=config.version, time_added=datetime.datetime.now()))
9091

9192
# return new added row
9293
return get_monitor_rule(endpoint)

dashboard/database/tests_grouped.py

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
"""
2+
Contains all functions that operate on the testsGrouped table
3+
"""
4+
from dashboard.database import session_scope, TestsGrouped
5+
6+
7+
def reset_tests_grouped():
8+
""" Resets the testsGrouped table of the database. """
9+
with session_scope() as db_session:
10+
db_session.query(TestsGrouped).delete()
11+
12+
13+
def add_tests_grouped(json):
14+
""" Adds endpoint - unit tests combinations to the database. """
15+
with session_scope() as db_session:
16+
for combination in json:
17+
db_session.add(TestsGrouped(endpoint=combination['endpoint'], test_name=combination['test_name']))
18+
19+
20+
def get_tests_grouped():
21+
""" Return all existing endpoint - unit tests combinations. """
22+
with session_scope() as db_session:
23+
result = db_session.query(TestsGrouped).all()
24+
db_session.expunge_all()
25+
return result

dashboard/forms.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,4 @@ class ChangeSetting(FlaskForm):
2525

2626
class RunTests(FlaskForm):
2727
""" Used for serving a login form on /{{ link }}/testmonitor. """
28-
submit = SubmitField('Run selected tests')
28+
submit = SubmitField('Run selected tests')

dashboard/main.py

+1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ def get_session_id():
1111
# implement here your own custom function
1212
return '12345'
1313

14+
1415
dashboard.config.get_group_by = get_session_id
1516
dashboard.bind(app=user_app)
1617

dashboard/measurement.py

+12-1
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ def track_performance(func, endpoint):
4444
:param func: the function to be measured
4545
:param endpoint: the name of the endpoint
4646
"""
47+
4748
@wraps(func)
4849
def wrapper(*args, **kwargs):
4950
# compute average
@@ -54,12 +55,19 @@ def wrapper(*args, **kwargs):
5455
# start a thread to log the stacktrace after 'average' ms
5556
stack_info = StackInfo(average)
5657

58+
t1 = str(datetime.datetime.now())
5759
time1 = time.time()
5860
result = func(*args, **kwargs)
5961
time2 = time.time()
60-
t = (time2-time1)*1000
62+
t = (time2 - time1) * 1000
6163
add_function_call(time=t, endpoint=endpoint)
6264

65+
# Logging for grouping unit test results by endpoint
66+
if config.log_dir:
67+
log = open(config.log_dir + "endpoint_hits.log", "a")
68+
log.write("\"{}\",\"{}\"\n".format(t1, endpoint))
69+
log.close()
70+
6371
# outlier detection
6472
endpoint_count[endpoint] += 1
6573
endpoint_sum[endpoint] += t
@@ -68,6 +76,7 @@ def wrapper(*args, **kwargs):
6876
add_outlier(endpoint, t, stack_info)
6977

7078
return result
79+
7180
wrapper.original = func
7281
return wrapper
7382

@@ -78,10 +87,12 @@ def track_last_accessed(func, endpoint):
7887
:param func: the function to be measured
7988
:param endpoint: the name of the endpoint
8089
"""
90+
8191
@wraps(func)
8292
def wrapper(*args, **kwargs):
8393
update_last_accessed(endpoint=endpoint, value=datetime.datetime.now())
8494
return func(*args, **kwargs)
95+
8596
return wrapper
8697

8798

dashboard/outlier.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
Moreover, it logs cpu- and memory-info.
66
"""
77

8-
98
import time
109
import traceback
1110
from threading import Thread, enumerate
@@ -14,15 +13,14 @@
1413

1514

1615
class StackInfo(object):
17-
1816
def __init__(self, average):
1917
self.average = average
2018
self.stacktrace = ''
2119
self.cpu_percent = ''
2220
self.memory = ''
2321

2422
try:
25-
thread = Thread(target=log_stack_trace, args=(self, ))
23+
thread = Thread(target=log_stack_trace, args=(self,))
2624
thread.start()
2725
except Exception:
2826
print('Can\'t log traceback information')
@@ -31,7 +29,7 @@ def __init__(self, average):
3129

3230
def log_stack_trace(stack_info):
3331
# average is in ms, sleep requires seconds
34-
time.sleep(stack_info.average/1000.0)
32+
time.sleep(stack_info.average / 1000.0)
3533

3634
# iterate through every active thread and get the stack-trace
3735
stack_list = []

dashboard/routings/export_data.py

+5
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from dashboard.security import admin_secure
33
from dashboard.database.function_calls import get_data
44
from dashboard.database.tests import add_or_update_test, add_test_result, get_suite_nr
5+
from dashboard.database.tests_grouped import reset_tests_grouped, add_tests_grouped
56
from dashboard import blueprint, config
67

78
import datetime
@@ -42,4 +43,8 @@ def submit_test_results():
4243
time = datetime.datetime.strptime(result['time'], '%Y-%m-%d %H:%M:%S.%f')
4344
add_or_update_test(result['name'], time, result['successful'])
4445
add_test_result(result['name'], result['exec_time'], time, config.version, suite, result['iter'])
46+
47+
reset_tests_grouped()
48+
add_tests_grouped(request.get_json()['grouped_tests'])
49+
4550
return '', 204

dashboard/routings/measurements.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def page_number_of_requests_per_endpoint():
3939
colors = {}
4040
for result in get_times():
4141
colors[result.endpoint] = get_color(result.endpoint)
42-
return render_template('dashboard/measurement.html', link=config.link, curr=2, session=session, index=2,
42+
return render_template('dashboard/measurement.html', link=config.link, curr=2, session=session, index=2,
4343
graph=get_stacked_bar())
4444

4545

dashboard/routings/result.py

-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
from dashboard.database.outlier import get_outliers
1919
from dashboard.colors import get_color
2020

21-
2221
# Constants
2322
BUBBLE_SIZE_RATIO = 1250
2423

dashboard/routings/setup.py

+12-1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from dashboard.database.monitor_rules import reset_monitor_endpoints
66
from dashboard.database.tests import get_tests, get_results, get_suites, get_test_measurements
77
from dashboard.database.tests import get_res_current, get_measurements
8+
from dashboard.database.tests_grouped import get_tests_grouped
89
from dashboard.forms import MonitorDashboard
910
from dashboard.measurement import track_performance
1011
from dashboard.security import secure, admin_secure
@@ -83,8 +84,18 @@ def test_result(test):
8384
@blueprint.route('/testmonitor')
8485
@secure
8586
def testmonitor():
87+
tests = get_tests_grouped()
88+
grouped = {}
89+
cols = {}
90+
for t in tests:
91+
if t.endpoint not in grouped:
92+
grouped[t.endpoint] = []
93+
cols[t.endpoint] = get_color(t.endpoint)
94+
if t.test_name not in grouped[t.endpoint]:
95+
grouped[t.endpoint].append(t.test_name)
96+
8697
return render_template('dashboard/testmonitor.html', link=config.link, session=session, curr=3,
87-
tests=get_tests(), results=get_results(),
98+
tests=get_tests(), results=get_results(), groups=grouped, colors=cols,
8899
res_current_version=get_res_current(config.version), boxplot=get_boxplot(None))
89100

90101

0 commit comments

Comments
 (0)