Skip to content

Commit f5d3431

Browse files
quantheorytwiecki
authored andcommitted
Remove nan_to_num and nan_to_high from find_MAP.
These functions appear to have been added due to poor handling of NaN in `scipy.optimize.minimize` for pre-1.4 versions of `scipy`. The most commonly used SciPy optimizers, including 'L-BFGS-B', were updated to handle this years ago, and it appears that these limiters are now actively harmful, since they produce extra discontinuities in the function and Jacobian passed to `minimize`.
1 parent be048a4 commit f5d3431

File tree

1 file changed

+3
-7
lines changed

1 file changed

+3
-7
lines changed

pymc/tuning/starting.py

+3-7
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
import numpy as np
2626

2727
from fastprogress.fastprogress import ProgressBar, progress_bar
28-
from numpy import isfinite, nan_to_num
28+
from numpy import isfinite
2929
from scipy.optimize import minimize
3030

3131
import pymc as pm
@@ -181,10 +181,6 @@ def allfinite(x):
181181
return np.all(isfinite(x))
182182

183183

184-
def nan_to_high(x):
185-
return np.where(isfinite(x), x, 1.0e100)
186-
187-
188184
def allinmodel(vars, model):
189185
notin = [v for v in vars if v not in model.value_vars]
190186
if notin:
@@ -214,12 +210,12 @@ def __init__(self, maxeval=5000, progressbar=True, logp_func=None, dlogp_func=No
214210

215211
def __call__(self, x):
216212
neg_value = np.float64(self.logp_func(pm.floatX(x)))
217-
value = -1.0 * nan_to_high(neg_value)
213+
value = -1.0 * neg_value
218214
if self.use_gradient:
219215
neg_grad = self.dlogp_func(pm.floatX(x))
220216
if np.all(np.isfinite(neg_grad)):
221217
self.previous_x = x
222-
grad = nan_to_num(-1.0 * neg_grad)
218+
grad = -1.0 * neg_grad
223219
grad = grad.astype(np.float64)
224220
else:
225221
self.previous_x = x

0 commit comments

Comments
 (0)