@@ -41,37 +41,37 @@ def run_iterations(
41
41
):
42
42
for iter in range (iters ):
43
43
print (f"running { benchmark .name ()} , iteration { iter } ... " , flush = True )
44
- bench_results = benchmark .run (env_vars )
45
- if bench_results is None :
46
- if options .exit_on_failure :
47
- raise RuntimeError (f"Benchmark { benchmark .name ()} produced no results!" )
48
- else :
49
- failures [benchmark .name ()] = "benchmark produced no results!"
50
- break
51
-
52
- for bench_result in bench_results :
53
- if not bench_result .passed :
44
+ try :
45
+ bench_results = benchmark .run (env_vars )
46
+ if bench_results is None :
54
47
if options .exit_on_failure :
55
- raise RuntimeError (
56
- f"Benchmark { benchmark .name ()} failed: { bench_result .label } verification failed."
57
- )
48
+ raise RuntimeError (f"Benchmark produced no results!" )
58
49
else :
59
- failures [bench_result .label ] = "verification failed"
60
- print (f"complete ({ bench_result .label } : verification failed)." )
61
- continue
62
-
63
- print (
64
- f"{ benchmark .name ()} complete ({ bench_result .label } : { bench_result .value :.3f} { bench_result .unit } )."
65
- )
50
+ failures [benchmark .name ()] = "benchmark produced no results!"
51
+ break
66
52
67
- bench_result .name = bench_result .label
68
- bench_result .lower_is_better = benchmark .lower_is_better ()
69
- bench_result .suite = benchmark .get_suite_name ()
53
+ for bench_result in bench_results :
54
+ print (
55
+ f"{ benchmark .name ()} complete ({ bench_result .label } : { bench_result .value :.3f} { bench_result .unit } )."
56
+ )
57
+ bench_result .name = bench_result .label
58
+ bench_result .lower_is_better = benchmark .lower_is_better ()
59
+ bench_result .suite = benchmark .get_suite_name ()
70
60
71
- if bench_result .label not in results :
72
- results [bench_result .label ] = []
61
+ if bench_result .label not in results :
62
+ results [bench_result .label ] = []
73
63
74
- results [bench_result .label ].append (bench_result )
64
+ results [bench_result .label ].append (bench_result )
65
+ except Exception as e :
66
+ failure_label = f"{ benchmark .name ()} iteration { iter } "
67
+ if options .exit_on_failure :
68
+ raise RuntimeError (
69
+ f"Benchmark failed: { failure_label } verification failed: { str (e )} "
70
+ )
71
+ else :
72
+ failures [failure_label ] = f"verification failed: { str (e )} "
73
+ print (f"complete ({ failure_label } : verification failed: { str (e )} )." )
74
+ continue
75
75
76
76
77
77
# https://www.statology.org/modified-z-score/
0 commit comments