Skip to content

Commit

Permalink
Remove nan_to_num and nan_to_high from find_MAP.
Browse files Browse the repository at this point in the history
These functions appear to have been added due to poor handling of NaN in
`scipy.optimize.minimize` for pre-1.4 versions of `scipy`. The most commonly
used SciPy optimizers, including 'L-BFGS-B', were updated to handle this years
ago, and it appears that these limiters are now actively harmful, since they
produce extra discontinuities in the function and Jacobian passed to `minimize`.
  • Loading branch information
quantheory authored and twiecki committed Jun 27, 2022
1 parent be048a4 commit f5d3431
Showing 1 changed file with 3 additions and 7 deletions.
10 changes: 3 additions & 7 deletions pymc/tuning/starting.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
import numpy as np

from fastprogress.fastprogress import ProgressBar, progress_bar
from numpy import isfinite, nan_to_num
from numpy import isfinite
from scipy.optimize import minimize

import pymc as pm
Expand Down Expand Up @@ -181,10 +181,6 @@ def allfinite(x):
return np.all(isfinite(x))


def nan_to_high(x):
return np.where(isfinite(x), x, 1.0e100)


def allinmodel(vars, model):
notin = [v for v in vars if v not in model.value_vars]
if notin:
Expand Down Expand Up @@ -214,12 +210,12 @@ def __init__(self, maxeval=5000, progressbar=True, logp_func=None, dlogp_func=No

def __call__(self, x):
neg_value = np.float64(self.logp_func(pm.floatX(x)))
value = -1.0 * nan_to_high(neg_value)
value = -1.0 * neg_value
if self.use_gradient:
neg_grad = self.dlogp_func(pm.floatX(x))
if np.all(np.isfinite(neg_grad)):
self.previous_x = x
grad = nan_to_num(-1.0 * neg_grad)
grad = -1.0 * neg_grad
grad = grad.astype(np.float64)
else:
self.previous_x = x
Expand Down

0 comments on commit f5d3431

Please sign in to comment.