Skip to content

Commit

Permalink
Merge pull request lenstronomy#362 from sibirrer/main
Browse files Browse the repository at this point in the history
changed the PSO return from chi2_list to log_likelihood list
  • Loading branch information
sibirrer authored Sep 12, 2022
2 parents 826e652 + 80c2df0 commit 2710424
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 17 deletions.
21 changes: 11 additions & 10 deletions lenstronomy/Sampling/Samplers/pso.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ def set_global_best(self, position, velocity, fitness):
def _init_swarm(self):
"""
Initiate the swarm.
:return:
:rtype:
"""
Expand All @@ -125,10 +126,9 @@ def sample(self, max_iter=1000, c1=1.193, c2=1.193, p=0.7, m=1e-3, n=1e-2, early
:param c1: cognitive weight
:param c2: social weight
:param p: stop criterion, percentage of particles to use
:param m: stop criterion, difference between mean fitness and global
best
:param m: stop criterion, difference between mean fitness and global best
:param n: stop criterion, difference between norm of the particle
vector and norm of the global best
vector and norm of the global best
:param early_stop_tolerance: will terminate at the given value (should be specified as a chi^2)
"""

Expand Down Expand Up @@ -194,19 +194,18 @@ def optimize(self, max_iter=1000, verbose=True, c1=1.193, c2=1.193,
:param c1: cognitive weight
:param c2: social weight
:param p: stop criterion, percentage of particles to use
:param m: stop criterion, difference between mean fitness and global
best
:param m: stop criterion, difference between mean fitness and global best
:param n: stop criterion, difference between norm of the particle
vector and norm of the global best
vector and norm of the global best
:param early_stop_tolerance: will terminate at the given value (should be specified as a chi^2)
"""
chi2_list = []
log_likelihood_list = []
vel_list = []
pos_list = []

num_iter = 0
for _ in self.sample(max_iter, c1, c2, p, m, n, early_stop_tolerance):
chi2_list.append(self.global_best.fitness * 2)
log_likelihood_list.append(self.global_best.fitness)
vel_list.append(self.global_best.velocity)
pos_list.append(self.global_best.position)
num_iter += 1
Expand All @@ -215,11 +214,12 @@ def optimize(self, max_iter=1000, verbose=True, c1=1.193, c2=1.193,
if num_iter % 10 == 0:
print(num_iter)

return self.global_best.position, [chi2_list, pos_list, vel_list]
return self.global_best.position, [log_likelihood_list, pos_list, vel_list]

def _get_fitness(self, swarm):
"""
Set fitness (probability) of the particles in swarm.
:param swarm: PSO state
:type swarm: list of Particle() instances of the swarm
:return:
Expand All @@ -239,6 +239,7 @@ def _get_fitness(self, swarm):
def _converged(self, it, p, m, n):
"""
Check for convergence.
:param it:
:type it:
:param p:
Expand Down Expand Up @@ -274,7 +275,7 @@ def _converged_fit(self, it, p, m):
best_sort = np.sort([particle.personal_best.fitness for particle in
self.swarm])[::-1]
mean_fit = np.mean(best_sort[1:int(math.floor(self.particleCount * p))])
#print( "best %f, mean_fit %f, ration %f"%( self.global_best[0],
# print( "best %f, mean_fit %f, ration %f"%( self.global_best[0],
# mean_fit, abs((self.global_best[0]-mean_fit))))
return abs(self.global_best.fitness - mean_fit) < m

Expand Down
16 changes: 9 additions & 7 deletions lenstronomy/Sampling/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def simplex(self, init_pos, n_iterations, method, print_key='SIMPLEX'):
kwargs_return = self.chain.param.args2kwargs(result['x'])
print(-logL * 2 / (max(self.chain.effective_num_data_points(**kwargs_return), 1)),
'reduced X^2 of best position')
print(logL, 'logL')
print(logL, 'log likelihood')
print(self.chain.effective_num_data_points(**kwargs_return), 'effective number of data points')
print(kwargs_return.get('kwargs_lens', None), 'lens result')
print(kwargs_return.get('kwargs_source', None), 'source result')
Expand Down Expand Up @@ -100,13 +100,13 @@ def pso(self, n_particles, n_iterations, lower_start=None, upper_start=None,

time_start = time.time()

result, [chi2_list, pos_list, vel_list] = pso.optimize(n_iterations)
result, [log_likelihood_list, pos_list, vel_list] = pso.optimize(n_iterations)

if pool.is_master():
kwargs_return = self.chain.param.args2kwargs(result)
print(pso.global_best.fitness * 2 / (max(
self.chain.effective_num_data_points(**kwargs_return), 1)), 'reduced X^2 of best position')
print(pso.global_best.fitness, 'logL')
print(pso.global_best.fitness, 'log likelihood')
print(self.chain.effective_num_data_points(**kwargs_return), 'effective number of data points')
print(kwargs_return.get('kwargs_lens', None), 'lens result')
print(kwargs_return.get('kwargs_source', None), 'source result')
Expand All @@ -116,7 +116,7 @@ def pso(self, n_particles, n_iterations, lower_start=None, upper_start=None,
time_end = time.time()
print(time_end - time_start, 'time used for ', print_key)
print('===================')
return result, [chi2_list, pos_list, vel_list]
return result, [log_likelihood_list, pos_list, vel_list]

def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start,
mpi=False, progress=False, threadCount=1,
Expand Down Expand Up @@ -217,12 +217,14 @@ def mcmc_zeus(self, n_walkers, n_run, n_burn, mean_start, sigma_start,
:type mean_start: numpy array of length the number of parameters
:param sigma_start: spread of the parameter values (uncorrelated in each dimension) of the initialising sample
:type sigma_start: numpy array of length the number of parameters
:param mpi: if True, initializes an MPIPool to allow for MPI execution of the sampler
:type mpi: bool
:param progress:
:type progress: bool
:param initpos: initial walker position to start sampling (optional)
:type initpos: numpy array of size num param x num walkser
:param backup_filename: name of the HDF5 file where sampling state is saved (through zeus callback function)
:type backup_filename: string
:param backend_filename: name of the HDF5 file where sampling state is saved (through zeus callback function)
:type backend_filename: string
:return: samples, ln likelihood value of samples
:rtype: numpy 2d array, numpy 1d array
"""
Expand Down Expand Up @@ -251,7 +253,7 @@ def mcmc_zeus(self, n_walkers, n_run, n_burn, mean_start, sigma_start,
blobs_dtype=blobs_dtype, verbose=verbose, check_walkers=check_walkers,
shuffle_ensemble=shuffle_ensemble, light_mode=light_mode)

sampler.run_mcmc(initpos, n_run_eff, progress=progress, callbacks = backend)
sampler.run_mcmc(initpos, n_run_eff, progress=progress, callbacks=backend)

flat_samples = sampler.get_chain(flat=True, thin=1, discard=n_burn)

Expand Down

0 comments on commit 2710424

Please sign in to comment.