@@ -43,39 +43,39 @@ def run_iterations(
43
43
):
44
44
for iter in range (iters ):
45
45
log .info (f"running { benchmark .name ()} , iteration { iter } ... " )
46
- bench_results = benchmark .run (env_vars )
47
- if bench_results is None :
48
- if options .exit_on_failure :
49
- raise RuntimeError (f"Benchmark { benchmark .name ()} produced no results!" )
50
- else :
51
- failures [benchmark .name ()] = "benchmark produced no results!"
52
- break
53
-
54
- for bench_result in bench_results :
55
- if not bench_result .passed :
46
+ try :
47
+ bench_results = benchmark .run (env_vars )
48
+ if bench_results is None :
56
49
if options .exit_on_failure :
57
50
raise RuntimeError (
58
- f"Benchmark { benchmark .name ()} failed: { bench_result . label } verification failed. "
51
+ f"Benchmark { benchmark .name ()} produced no results! "
59
52
)
60
53
else :
61
- failures [bench_result .label ] = "verification failed"
62
- log .warning (
63
- f"complete ({ bench_result .label } : verification failed)."
64
- )
65
- continue
66
-
67
- log .info (
68
- f"{ benchmark .name ()} complete ({ bench_result .label } : { bench_result .value :.3f} { bench_result .unit } )."
69
- )
54
+ failures [benchmark .name ()] = "benchmark produced no results!"
55
+ break
70
56
71
- bench_result .name = bench_result .label
72
- bench_result .lower_is_better = benchmark .lower_is_better ()
73
- bench_result .suite = benchmark .get_suite_name ()
57
+ for bench_result in bench_results :
58
+ log .info (
59
+ f"{ benchmark .name ()} complete ({ bench_result .label } : { bench_result .value :.3f} { bench_result .unit } )."
60
+ )
61
+ bench_result .name = bench_result .label
62
+ bench_result .lower_is_better = benchmark .lower_is_better ()
63
+ bench_result .suite = benchmark .get_suite_name ()
74
64
75
- if bench_result .label not in results :
76
- results [bench_result .label ] = []
65
+ if bench_result .label not in results :
66
+ results [bench_result .label ] = []
77
67
78
- results [bench_result .label ].append (bench_result )
68
+ results [bench_result .label ].append (bench_result )
69
+ except Exception as e :
70
+ failure_label = f"{ benchmark .name ()} iteration { iter } "
71
+ if options .exit_on_failure :
72
+ raise RuntimeError (
73
+ f"Benchmark failed: { failure_label } verification failed: { str (e )} "
74
+ )
75
+ else :
76
+ failures [failure_label ] = f"verification failed: { str (e )} "
77
+ log .error (f"complete ({ failure_label } : verification failed: { str (e )} )." )
78
+ continue
79
79
80
80
81
81
# https://www.statology.org/modified-z-score/
0 commit comments