-
Notifications
You must be signed in to change notification settings - Fork 11
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Auto-round scores down to align scores & medals #74
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,5 @@ | ||
"""Unit tests for the human readable formatter.""" | ||
|
||
|
||
from dbt_score.evaluation import ModelResultsType | ||
from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter | ||
from dbt_score.rule import RuleViolation | ||
|
@@ -29,10 +28,10 @@ def test_human_readable_formatter_model( | |
stdout = capsys.readouterr().out | ||
assert ( | ||
stdout | ||
== """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) | ||
\x1B[1;32mOK \x1B[0m tests.conftest.rule_severity_low | ||
\x1B[1;31mERR \x1B[0m tests.conftest.rule_severity_medium: Oh noes | ||
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error | ||
== """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) | ||
\x1b[1;32mOK \x1b[0m tests.conftest.rule_severity_low | ||
\x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes | ||
\x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Interesting, I know hex is not case sensitive, but curious how this changed 🤔 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Appears it's my local Even though ruff is configured for this project, it's on 0.2.2 and locally have 0.6.1 running. Will revert this change. |
||
|
||
""" | ||
) | ||
|
@@ -45,7 +44,50 @@ def test_human_readable_formatter_project(capsys, default_config, manifest_loade | |
) | ||
formatter.project_evaluated(Score(10.0, "🥇")) | ||
stdout = capsys.readouterr().out | ||
assert stdout == "Project score: \x1B[1m10.0\x1B[0m 🥇\n" | ||
assert stdout == "Project score: \x1b[1m10.0\x1b[0m 🥇\n" | ||
|
||
|
||
def test_human_readable_formatter_near_perfect_model_score( | ||
capsys, | ||
default_config, | ||
manifest_loader, | ||
model1, | ||
rule_severity_low, | ||
rule_severity_medium, | ||
rule_severity_critical, | ||
): | ||
"""Ensure the formatter has the correct output after model evaluation.""" | ||
formatter = HumanReadableFormatter( | ||
manifest_loader=manifest_loader, config=default_config | ||
) | ||
results: ModelResultsType = { | ||
rule_severity_low: None, | ||
rule_severity_medium: Exception("Oh noes"), | ||
rule_severity_critical: RuleViolation("Error"), | ||
} | ||
formatter.model_evaluated(model1, results, Score(9.99, "🥈")) | ||
stdout = capsys.readouterr().out | ||
assert ( | ||
stdout | ||
== """🥈 \x1b[1mmodel1\x1b[0m (score: 9.9) | ||
\x1b[1;32mOK \x1b[0m tests.conftest.rule_severity_low | ||
\x1b[1;31mERR \x1b[0m tests.conftest.rule_severity_medium: Oh noes | ||
\x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error | ||
|
||
""" | ||
) | ||
|
||
|
||
def test_human_readable_formatter_near_perfect_project_score( | ||
capsys, default_config, manifest_loader | ||
): | ||
"""Ensure the formatter has the correct output after project evaluation.""" | ||
formatter = HumanReadableFormatter( | ||
manifest_loader=manifest_loader, config=default_config | ||
) | ||
formatter.project_evaluated(Score(9.99, "🥈")) | ||
stdout = capsys.readouterr().out | ||
assert stdout == "Project score: \x1b[1m9.9\x1b[0m 🥈\n" | ||
|
||
|
||
def test_human_readable_formatter_low_model_score( | ||
|
@@ -68,10 +110,10 @@ def test_human_readable_formatter_low_model_score( | |
print() | ||
assert ( | ||
stdout | ||
== """🚧 \x1B[1mmodel1\x1B[0m (score: 0.0) | ||
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error | ||
== """🚧 \x1b[1mmodel1\x1b[0m (score: 0.0) | ||
\x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error | ||
|
||
Project score: \x1B[1m0.0\x1B[0m 🚧 | ||
Project score: \x1b[1m0.0\x1b[0m 🚧 | ||
|
||
Error: model score too low, fail_any_model_under = 5.0 | ||
Model model1 scored 0.0 | ||
|
@@ -99,10 +141,10 @@ def test_human_readable_formatter_low_project_score( | |
print() | ||
assert ( | ||
stdout | ||
== """🥇 \x1B[1mmodel1\x1B[0m (score: 10.0) | ||
\x1B[1;33mWARN\x1B[0m (critical) tests.conftest.rule_severity_critical: Error | ||
== """🥇 \x1b[1mmodel1\x1b[0m (score: 10.0) | ||
\x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error | ||
|
||
Project score: \x1B[1m0.0\x1B[0m 🚧 | ||
Project score: \x1b[1m0.0\x1b[0m 🚧 | ||
|
||
Error: project score too low, fail_project_under = 5.0 | ||
""" | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We were redefining
score
here. So whilst here, I've updated this.