Skip to content

Commit 2b56bab

Browse files
stinosdpgeorge
authored andcommitted
tests/run-tests.py: Add an option for running only the failed tests.
Implement the typical 're-run the failed tests' most test runners have, for convenience. Accessible via the new --run-failures argument, and implemented using a json file containing a list of the failed tests. Signed-off-by: stijn <[email protected]>
1 parent 0c81ffd commit 2b56bab

File tree

1 file changed

+43
-3
lines changed

1 file changed

+43
-3
lines changed

tests/run-tests.py

Lines changed: 43 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import platform
77
import argparse
88
import inspect
9+
import json
910
import re
1011
from glob import glob
1112
import multiprocessing
@@ -47,6 +48,8 @@ def base_path(*p):
4748
# (not site packages which may clash with u-module names), and improve start up time.
4849
CPYTHON3_CMD = [CPYTHON3, "-BS"]
4950

51+
# File with the test results.
52+
RESULTS_FILE = "_results.json"
5053

5154
# For diff'ing test output
5255
DIFF = os.getenv("MICROPY_DIFF", "diff -u")
@@ -770,7 +773,7 @@ def run_one_test(test_file):
770773
with open(filename_mupy, "wb") as f:
771774
f.write(output_mupy)
772775
print("FAIL ", test_file)
773-
failed_tests.append(test_name)
776+
failed_tests.append((test_name, test_file))
774777

775778
test_count.increment()
776779

@@ -784,6 +787,7 @@ def run_one_test(test_file):
784787
for test in tests:
785788
run_one_test(test)
786789

790+
# Leave RESULTS_FILE untouched here for future runs.
787791
if args.list_tests:
788792
return True
789793

@@ -798,8 +802,26 @@ def run_one_test(test_file):
798802
if len(skipped_tests) > 0:
799803
print("{} tests skipped: {}".format(len(skipped_tests), " ".join(skipped_tests)))
800804
failed_tests = sorted(failed_tests.value)
805+
806+
# Serialize regex added by append_filter.
807+
def to_json(obj):
808+
if isinstance(obj, re.Pattern):
809+
return obj.pattern
810+
return obj
811+
812+
with open(os.path.join(result_dir, RESULTS_FILE), "w") as f:
813+
json.dump(
814+
{"args": vars(args), "failed_tests": [test[1] for test in failed_tests]},
815+
f,
816+
default=to_json,
817+
)
818+
801819
if len(failed_tests) > 0:
802-
print("{} tests failed: {}".format(len(failed_tests), " ".join(failed_tests)))
820+
print(
821+
"{} tests failed: {}".format(
822+
len(failed_tests), " ".join(test[0] for test in failed_tests)
823+
)
824+
)
803825
return False
804826

805827
# all tests succeeded
@@ -915,6 +937,11 @@ def main():
915937
action="store_true",
916938
help="delete the .exp and .out files from failed tests and exit",
917939
)
940+
cmd_parser.add_argument(
941+
"--run-failures",
942+
action="store_true",
943+
help="re-run only the failed tests",
944+
)
918945
args = cmd_parser.parse_args()
919946

920947
if args.print_failures:
@@ -931,6 +958,7 @@ def main():
931958
os.path.join(args.result_dir, "*.out")
932959
):
933960
os.remove(f)
961+
rm_f(os.path.join(args.result_dir, RESULTS_FILE))
934962

935963
sys.exit(0)
936964

@@ -979,7 +1007,19 @@ def main():
9791007
else:
9801008
raise ValueError("target must be one of %s" % ", ".join(LOCAL_TARGETS + EXTERNAL_TARGETS))
9811009

982-
if len(args.files) == 0:
1010+
if args.run_failures and (any(args.files) or args.test_dirs is not None):
1011+
raise ValueError(
1012+
"--run-failures cannot be used together with files or --test-dirs arguments"
1013+
)
1014+
1015+
if args.run_failures:
1016+
results_file = os.path.join(args.result_dir, RESULTS_FILE)
1017+
if os.path.exists(results_file):
1018+
with open(results_file, "r") as f:
1019+
tests = json.load(f)["failed_tests"]
1020+
else:
1021+
tests = []
1022+
elif len(args.files) == 0:
9831023
if args.test_dirs is None:
9841024
test_dirs = (
9851025
"basics",

0 commit comments

Comments
 (0)