8
8
from scipy .stats import norm
9
9
10
10
import petab .v1
11
- from petab .v1 import get_simulation_conditions
11
+ from petab .v1 import (
12
+ ESTIMATE ,
13
+ MEASUREMENT ,
14
+ OBJECTIVE_PRIOR_TYPE ,
15
+ OBSERVABLE_ID ,
16
+ SIMULATION ,
17
+ get_simulation_conditions ,
18
+ get_simulation_df ,
19
+ )
12
20
from petab .v1 .priors import priors_to_measurements
13
21
14
22
17
25
)
18
26
def test_priors_to_measurements (problem_id ):
19
27
"""Test the conversion of priors to measurements."""
28
+ # setup
20
29
petab_problem_priors : petab .v1 .Problem = (
21
30
benchmark_models_petab .get_problem (problem_id )
22
31
)
23
32
petab_problem_priors .visualization_df = None
24
33
assert petab .v1 .lint_problem (petab_problem_priors ) is False
25
-
26
34
if problem_id == "Isensee_JCB2018" :
27
35
# required to match the stored simulation results below
28
36
petab .v1 .flatten_timepoint_specific_output_overrides (
29
37
petab_problem_priors
30
38
)
31
39
assert petab .v1 .lint_problem (petab_problem_priors ) is False
40
+
32
41
original_problem = deepcopy (petab_problem_priors )
42
+ # All priors in this test case are defined on parameter scale, hence
43
+ # the dummy measurements will take the scaled nominal values.
44
+ x_scaled_dict = dict (
45
+ zip (
46
+ original_problem .x_free_ids ,
47
+ original_problem .x_nominal_free_scaled ,
48
+ strict = True ,
49
+ )
50
+ )
33
51
52
+ # convert priors to measurements
34
53
petab_problem_measurements = priors_to_measurements (petab_problem_priors )
35
54
36
55
# check that the original problem is not modified
@@ -45,6 +64,7 @@ def test_priors_to_measurements(problem_id):
45
64
getattr (original_problem , attr )
46
65
)
47
66
).empty , diff
67
+
48
68
# check that measurements and observables were added
49
69
assert petab .v1 .lint_problem (petab_problem_measurements ) is False
50
70
assert (
@@ -59,6 +79,7 @@ def test_priors_to_measurements(problem_id):
59
79
petab_problem_measurements .measurement_df .shape [0 ]
60
80
> petab_problem_priors .measurement_df .shape [0 ]
61
81
)
82
+
62
83
# ensure we didn't introduce any new conditions
63
84
assert len (
64
85
get_simulation_conditions (petab_problem_measurements .measurement_df )
@@ -67,26 +88,40 @@ def test_priors_to_measurements(problem_id):
67
88
# verify that the objective function value is the same
68
89
69
90
# load/construct the simulation results
70
- simulation_df_priors = petab . v1 . get_simulation_df (
91
+ simulation_df_priors = get_simulation_df (
71
92
Path (
72
93
benchmark_models_petab .MODELS_DIR ,
73
94
problem_id ,
74
95
f"simulatedData_{ problem_id } .tsv" ,
75
96
)
76
97
)
77
- simulation_df_measurements = pd .concat (
78
- [
79
- petab_problem_measurements .measurement_df .rename (
80
- columns = {petab .v1 .MEASUREMENT : petab .v1 .SIMULATION }
81
- )[
82
- petab_problem_measurements .measurement_df [
83
- petab .v1 .C .OBSERVABLE_ID
84
- ].str .startswith ("prior_" )
85
- ],
86
- simulation_df_priors ,
98
+ # for the prior observables, we need to "simulate" the model with the
99
+ # nominal parameter values
100
+ simulated_prior_observables = (
101
+ petab_problem_measurements .measurement_df .rename (
102
+ columns = {MEASUREMENT : SIMULATION }
103
+ )[
104
+ petab_problem_measurements .measurement_df [
105
+ OBSERVABLE_ID
106
+ ].str .startswith ("prior_" )
87
107
]
88
108
)
89
109
110
+ def apply_parameter_values (row ):
111
+ # apply the parameter values to the observable formula for the prior
112
+ if row [OBSERVABLE_ID ].startswith ("prior_" ):
113
+ row [SIMULATION ] = x_scaled_dict [
114
+ row [OBSERVABLE_ID ].removeprefix ("prior_" )
115
+ ]
116
+ return row
117
+
118
+ simulated_prior_observables = simulated_prior_observables .apply (
119
+ apply_parameter_values , axis = 1
120
+ )
121
+ simulation_df_measurements = pd .concat (
122
+ [simulation_df_priors , simulated_prior_observables ]
123
+ )
124
+
90
125
llh_priors = petab .v1 .calculate_llh_for_table (
91
126
petab_problem_priors .measurement_df ,
92
127
simulation_df_priors ,
@@ -102,10 +137,8 @@ def test_priors_to_measurements(problem_id):
102
137
103
138
# get prior objective function contribution
104
139
parameter_ids = petab_problem_priors .parameter_df .index .values [
105
- (petab_problem_priors .parameter_df [petab .v1 .ESTIMATE ] == 1 )
106
- & petab_problem_priors .parameter_df [
107
- petab .v1 .OBJECTIVE_PRIOR_TYPE
108
- ].notna ()
140
+ (petab_problem_priors .parameter_df [ESTIMATE ] == 1 )
141
+ & petab_problem_priors .parameter_df [OBJECTIVE_PRIOR_TYPE ].notna ()
109
142
]
110
143
priors = petab .v1 .get_priors_from_df (
111
144
petab_problem_priors .parameter_df ,
@@ -117,9 +150,7 @@ def test_priors_to_measurements(problem_id):
117
150
prior_type , prior_pars , par_scale , par_bounds = prior
118
151
if prior_type == petab .v1 .PARAMETER_SCALE_NORMAL :
119
152
prior_contrib += norm .logpdf (
120
- petab_problem_priors .x_nominal_free_scaled [
121
- petab_problem_priors .x_free_ids .index (parameter_id )
122
- ],
153
+ x_scaled_dict [parameter_id ],
123
154
loc = prior_pars [0 ],
124
155
scale = prior_pars [1 ],
125
156
)
@@ -134,4 +165,4 @@ def test_priors_to_measurements(problem_id):
134
165
llh_priors + prior_contrib , llh_measurements , rtol = 1e-3 , atol = 1e-16
135
166
), (llh_priors + prior_contrib , llh_measurements )
136
167
# check that the tolerance is not too high
137
- assert np .abs (prior_contrib ) > 1e-3 * np .abs (llh_priors )
168
+ assert np .abs (prior_contrib ) > 1e-8 * np .abs (llh_priors )
0 commit comments