-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbench_cli.py
152 lines (136 loc) · 4.94 KB
/
bench_cli.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import pathlib
import shutil
from click.testing import CliRunner
from pineko.cli._base import command
def benchmark_check_cli(test_files):
grid_path = pathlib.Path(
test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
)
wrong_grid_path = pathlib.Path(
test_files / "data/grids/208/HERA_CC_318GEV_EM_SIGMARED.pineappl.lz4"
)
eko_path = pathlib.Path(test_files / "data/ekos/400/HERA_NC_225GEV_EP_SIGMARED.tar")
runner = CliRunner()
result = runner.invoke(
command, ["check", "compatibility", str(grid_path), str(eko_path)]
)
assert "Success: grids and eko are compatible" in result.output
wrong_result = runner.invoke(
command, ["check", "compatibility", str(wrong_grid_path), str(eko_path)]
)
assert (
"Error: Q2 grid in pineappl grid and eko operator are NOT compatible!"
in wrong_result.output
)
wrong_scvar_res = runner.invoke(
command, ["check", "scvar", str(grid_path), "wrong_string", "2", "0"]
)
assert "Invalid value for 'SCALE'" in wrong_scvar_res.output
ren_res = runner.invoke(
command, ["check", "scvar", str(grid_path), "ren", "3", "0"]
)
assert (
"Success: grids contain renormalization scale variations for as"
in ren_res.output
)
fact_res = runner.invoke(
command, ["check", "scvar", str(grid_path), "fact", "3", "0"]
)
assert (
"Success: grids contain factorization scale variations for as"
in fact_res.output
)
def benchmark_opcard_cli(tmp_path, test_files):
grid_path = pathlib.Path(
test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
)
default_card_path = pathlib.Path(
test_files / "data/operator_cards/400/_template.yaml"
)
thcard_path = pathlib.Path(test_files / "data" / "theory_cards" / "400.yaml")
target_path = pathlib.Path(tmp_path / "test_ope_card.yaml")
runner = CliRunner()
result = runner.invoke(
command,
[
"opcard",
str(grid_path),
str(default_card_path),
str(thcard_path),
str(target_path),
],
)
assert "Success" in result.output
def benchmark_compare_cli(lhapdf_path, test_files, test_pdf):
grid_path = pathlib.Path(
test_files / "data/grids/208/LHCB_DY_13TEV_DIMUON.pineappl.lz4"
)
fk_path = pathlib.Path(
test_files / "data/fktables/208/LHCB_DY_13TEV_DIMUON.pineappl.lz4"
)
runner = CliRunner()
with lhapdf_path(test_pdf):
result = runner.invoke(
command,
["compare", str(fk_path), str(grid_path), "2", "0", "NNPDF40_nlo_as_01180"],
)
assert "yll left" in result.output
def benchmark_convolve_cli(test_files, tmp_path):
grid_path = pathlib.Path(
test_files / "data/grids/400/HERA_NC_225GEV_EP_SIGMARED.pineappl.lz4"
)
eko_path = pathlib.Path(test_files / "data/ekos/400/HERA_NC_225GEV_EP_SIGMARED.tar")
fk_path = tmp_path / "testfk.pineappl.lz4"
runner = CliRunner()
result = runner.invoke(
command,
["convolve", str(fk_path), str(grid_path), "2", "0", str(eko_path)],
)
assert "Optimizing for Nf6Ind" in result.output
def benchmark_scaffold_cli(test_empty_proj):
runner = CliRunner()
conf_file = test_empty_proj / "pineko.toml"
# empty project is not correctly configured
res = runner.invoke(command, ["scaffold", "-c", str(conf_file), "check"])
assert "Error: Project is not correctly configured." in res.output
# so we need to create all the folders
res = runner.invoke(command, ["scaffold", "-c", str(conf_file), "new"])
# and then I can check again
res = runner.invoke(command, ["scaffold", "-c", str(conf_file), "check"])
assert "Success: All the folders are correctly configured" in res.output
def benchmark_gen_sv_cli(test_files, tmp_path):
runner = CliRunner()
max_as = "2"
nf = "5"
name_grid = "ATLAS_TTB_8TEV_LJ_TRAP_norensv_fixed.pineappl.lz4"
grid_path = test_files / "data" / "grids" / "400" / name_grid
new_grid_path = tmp_path / name_grid
target_path = tmp_path
shutil.copy(grid_path, new_grid_path)
res = runner.invoke(
command,
["ren_sv_grid", str(new_grid_path), str(target_path), max_as, nf, "False"],
)
assert "ReturnState.SUCCESS" in res.output
def benchmark_kfactor_cli(test_files, tmp_path):
runner = CliRunner()
conf_file = test_files / "pineko.toml"
theory_id = 400
dataset = "ATLAS_TTB_FAKE"
kfolder = test_files / "data" / "kfactors"
order_to_update = "3"
target_path = tmp_path
res = runner.invoke(
command,
[
"kfactor",
"-c",
str(conf_file),
str(theory_id),
str(dataset),
str(kfolder),
str(target_path),
order_to_update,
],
)
assert "The number of bins match the length of the kfactor" in res.output