4
4
__all__ = ["nested_sampling" ]
5
5
6
6
import sys
7
+ import multiprocessing as mp
7
8
import numpy as np
9
+
8
10
from . import stats as ms
9
11
10
12
if sys .version_info .major == 2 :
@@ -95,45 +97,35 @@ def nested_sampling(data, uncert, func, params, indparams, pmin, pmax, pstep,
95
97
"""
96
98
try :
97
99
import dynesty
98
- if ncpu > 1 :
99
- from pathos .multiprocessing import ProcessingPool
100
100
except ImportError as error :
101
101
log .error ("ModuleNotFoundError: {}" .format (error ))
102
102
103
103
nfree = int (np .sum (pstep > 0 ))
104
104
ifree = np .where (pstep > 0 )[0 ]
105
105
ishare = np .where (pstep < 0 )[0 ]
106
- # Can't use multiprocessing.Pool since map can't pickle defs:
107
- pool = ProcessingPool (nodes = ncpu ) if ncpu > 1 else None
108
- queue_size = ncpu if ncpu > 1 else None
109
-
110
- # Setup prior transform:
111
- priors = []
112
- for p0 , plo , pup , min , max , step in zip (prior , priorlow , priorup ,
113
- pmin , pmax , pstep ):
114
- if step <= 0 :
115
- continue
116
- if plo == 0.0 and pup == 0.0 :
117
- priors .append (ms .ppf_uniform (min , max ))
118
- else :
119
- priors .append (ms .ppf_gaussian (p0 , plo , pup ))
120
-
121
- def prior_transform (u ):
122
- return [p (v ) for p ,v in zip (priors ,u )]
123
-
124
- def loglike (pars ):
125
- params [ifree ] = pars
126
- for s in ishare :
127
- params [s ] = params [- int (pstep [s ])- 1 ]
128
- return - 0.5 * np .sum ((data - func (params , * indparams ))** 2 / uncert ** 2 )
106
+
107
+ # Multiprocessing setup:
108
+ if ncpu > 1 :
109
+ pool = mp .Pool (ncpu )
110
+ queue_size = ncpu
111
+ else :
112
+ pool = None
113
+ queue_size = None
129
114
130
115
# Intercept kwargs that go into DynamicNestedSampler():
131
- skip_logp = False
132
116
if 'loglikelihood' in kwargs :
133
117
loglike = kwargs .pop ('loglikelihood' )
118
+ else :
119
+ loglike = ms .Loglike (data , uncert , func , params , indparams , pstep )
120
+
134
121
if 'prior_transform' in kwargs :
135
122
prior_transform = kwargs .pop ('prior_transform' )
136
123
skip_logp = True
124
+ else :
125
+ prior_transform = ms .Prior_transform (prior , priorlow , priorup ,
126
+ pmin , pmax , pstep )
127
+ skip_logp = False
128
+
137
129
if 'ndim' in kwargs :
138
130
nfree = kwargs .pop ('ndim' )
139
131
if 'pool' in kwargs :
@@ -148,7 +140,7 @@ def loglike(pars):
148
140
149
141
weights = np .exp (sampler .results .logwt - sampler .results .logz [- 1 ])
150
142
isample = resample_equal (weights )
151
- posterior = sampler .results .samples [isample ] #[::thinning]
143
+ posterior = sampler .results .samples [isample ]
152
144
chisq = - 2.0 * sampler .results .logl [isample ]
153
145
154
146
# Contribution to chi-square from non-uniform priors:
@@ -173,25 +165,29 @@ def loglike(pars):
173
165
log .msg ("\n Nested Sampling Summary:"
174
166
"\n ------------------------" )
175
167
176
- # Get some stats:
168
+ posterior = posterior [::thinning ]
169
+ chisq = chisq [::thinning ]
170
+ log_post = log_post [::thinning ]
171
+ # Number of evaluated and kept samples:
177
172
nsample = sampler .results ['niter' ]
178
- nZsample = len (posterior ) # Valid samples (after thinning and burning )
173
+ nzsample = len (posterior )
179
174
180
175
fmt = len (str (nsample ))
181
176
log .msg ("Number of evaluated samples: {:{}d}" .
182
177
format (nsample , fmt ), indent = 2 )
183
178
log .msg ("Thinning factor: {:{}d}" .
184
179
format (thinning , fmt ), indent = 2 )
185
180
log .msg ("NS sample size (thinned): {:{}d}" .
186
- format (nZsample , fmt ), indent = 2 )
181
+ format (nzsample , fmt ), indent = 2 )
187
182
log .msg ("Sampling efficiency: {:.2f}%\n " .
188
183
format (sampler .results ['eff' ]), indent = 2 )
189
184
190
185
# Build the output dict:
191
186
output = {
192
187
# The posterior:
193
188
'posterior' :posterior ,
194
- 'zchain' :np .zeros (nsample , int ),
189
+ 'zchain' :np .zeros (nzsample , int ),
190
+ 'zmask' :np .arange (nzsample ),
195
191
'chisq' :chisq ,
196
192
'log_post' :log_post ,
197
193
'burnin' :0 ,
0 commit comments