@@ -605,7 +605,7 @@ def run_script_on_remote_target(self, args, test_file, is_special):
605
605
def run_tests (pyb , tests , args , result_dir , num_threads = 1 ):
606
606
test_count = ThreadSafeCounter ()
607
607
testcase_count = ThreadSafeCounter ()
608
- passed_count = ThreadSafeCounter ()
608
+ passed_tests = ThreadSafeCounter ([] )
609
609
failed_tests = ThreadSafeCounter ([])
610
610
skipped_tests = ThreadSafeCounter ([])
611
611
@@ -896,7 +896,7 @@ def run_one_test(test_file):
896
896
897
897
if skip_it :
898
898
print ("skip " , test_file )
899
- skipped_tests .append (test_name )
899
+ skipped_tests .append (( test_name , test_file ) )
900
900
return
901
901
902
902
# Run the test on the MicroPython target.
@@ -911,7 +911,7 @@ def run_one_test(test_file):
911
911
# start-up code (eg boot.py) when preparing to run the next test.
912
912
pyb .read_until (1 , b"raw REPL; CTRL-B to exit\r \n " )
913
913
print ("skip " , test_file )
914
- skipped_tests .append (test_name )
914
+ skipped_tests .append (( test_name , test_file ) )
915
915
return
916
916
917
917
# Look at the output of the test to see if unittest was used.
@@ -994,7 +994,7 @@ def run_one_test(test_file):
994
994
# Print test summary, update counters, and save .exp/.out files if needed.
995
995
if test_passed :
996
996
print ("pass " , test_file , extra_info )
997
- passed_count . increment ( )
997
+ passed_tests . append (( test_name , test_file ) )
998
998
rm_f (filename_expected )
999
999
rm_f (filename_mupy )
1000
1000
else :
@@ -1035,17 +1035,30 @@ def run_one_test(test_file):
1035
1035
print (line )
1036
1036
sys .exit (1 )
1037
1037
1038
+ passed_tests = sorted (passed_tests .value )
1039
+ skipped_tests = sorted (skipped_tests .value )
1040
+ failed_tests = sorted (failed_tests .value )
1041
+
1038
1042
print (
1039
1043
"{} tests performed ({} individual testcases)" .format (
1040
1044
test_count .value , testcase_count .value
1041
1045
)
1042
1046
)
1043
- print ("{} tests passed" .format (passed_count . value ))
1047
+ print ("{} tests passed" .format (len ( passed_tests ) ))
1044
1048
1045
- skipped_tests = sorted (skipped_tests .value )
1046
1049
if len (skipped_tests ) > 0 :
1047
- print ("{} tests skipped: {}" .format (len (skipped_tests ), " " .join (skipped_tests )))
1048
- failed_tests = sorted (failed_tests .value )
1050
+ print (
1051
+ "{} tests skipped: {}" .format (
1052
+ len (skipped_tests ), " " .join (test [0 ] for test in skipped_tests )
1053
+ )
1054
+ )
1055
+
1056
+ if len (failed_tests ) > 0 :
1057
+ print (
1058
+ "{} tests failed: {}" .format (
1059
+ len (failed_tests ), " " .join (test [0 ] for test in failed_tests )
1060
+ )
1061
+ )
1049
1062
1050
1063
# Serialize regex added by append_filter.
1051
1064
def to_json (obj ):
@@ -1055,21 +1068,18 @@ def to_json(obj):
1055
1068
1056
1069
with open (os .path .join (result_dir , RESULTS_FILE ), "w" ) as f :
1057
1070
json .dump (
1058
- {"args" : vars (args ), "failed_tests" : [test [1 ] for test in failed_tests ]},
1071
+ {
1072
+ "args" : vars (args ),
1073
+ "passed_tests" : [test [1 ] for test in passed_tests ],
1074
+ "skipped_tests" : [test [1 ] for test in skipped_tests ],
1075
+ "failed_tests" : [test [1 ] for test in failed_tests ],
1076
+ },
1059
1077
f ,
1060
1078
default = to_json ,
1061
1079
)
1062
1080
1063
- if len (failed_tests ) > 0 :
1064
- print (
1065
- "{} tests failed: {}" .format (
1066
- len (failed_tests ), " " .join (test [0 ] for test in failed_tests )
1067
- )
1068
- )
1069
- return False
1070
-
1071
- # all tests succeeded
1072
- return True
1081
+ # Return True only if all tests succeeded.
1082
+ return len (failed_tests ) == 0
1073
1083
1074
1084
1075
1085
class append_filter (argparse .Action ):
0 commit comments