Skip to content

Commit b1f95e5

Browse files
Replace uses of testval with initval
1 parent aa22bcc commit b1f95e5

13 files changed

+79
-79
lines changed

Diff for: pymc3/distributions/bart.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def __init__(self, X, Y, m=200, alpha=0.25, split_prior=None, *args, **kwargs):
2727

2828
self.X, self.Y, self.missing_data = self.preprocess_XY(X, Y)
2929

30-
super().__init__(shape=X.shape[0], dtype="float64", testval=0, *args, **kwargs)
30+
super().__init__(shape=X.shape[0], dtype="float64", initval=0, *args, **kwargs)
3131

3232
if self.X.ndim != 2:
3333
raise ValueError("The design matrix X must have two dimensions")

Diff for: pymc3/distributions/bound.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def __init__(self, distribution, lower, upper, default, *args, **kwargs):
4242
super().__init__(
4343
shape=self._wrapped.shape,
4444
dtype=self._wrapped.dtype,
45-
testval=self._wrapped.testval,
45+
initval=self._wrapped.initval,
4646
defaults=defaults,
4747
transform=self._wrapped.transform,
4848
)
@@ -252,15 +252,15 @@ class Bound:
252252
253253
with pm.Model():
254254
NegativeNormal = pm.Bound(pm.Normal, upper=0.0)
255-
par1 = NegativeNormal('par`', mu=0.0, sigma=1.0, testval=-0.5)
255+
par1 = NegativeNormal('par`', mu=0.0, sigma=1.0, initval=-0.5)
256256
# you can use the Bound object multiple times to
257257
# create multiple bounded random variables
258-
par1_1 = NegativeNormal('par1_1', mu=-1.0, sigma=1.0, testval=-1.5)
258+
par1_1 = NegativeNormal('par1_1', mu=-1.0, sigma=1.0, initval=-1.5)
259259
260260
# you can also define a Bound implicitly, while applying
261261
# it to a random variable
262262
par2 = pm.Bound(pm.Normal, lower=-1.0, upper=1.0)(
263-
'par2', mu=0.0, sigma=1.0, testval=1.0)
263+
'par2', mu=0.0, sigma=1.0, initval=1.0)
264264
"""
265265

266266
def __init__(self, distribution, lower=None, upper=None):

Diff for: pymc3/distributions/distribution.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -277,14 +277,14 @@ def __init__(
277277
self,
278278
shape,
279279
dtype,
280-
testval=None,
280+
initval=None,
281281
defaults=(),
282282
parent_dist=None,
283283
*args,
284284
**kwargs,
285285
):
286286
super().__init__(
287-
shape=shape, dtype=dtype, testval=testval, defaults=defaults, *args, **kwargs
287+
shape=shape, dtype=dtype, initval=initval, defaults=defaults, *args, **kwargs
288288
)
289289
self.parent_dist = parent_dist
290290

@@ -342,7 +342,7 @@ def __init__(
342342
logp,
343343
shape=(),
344344
dtype=None,
345-
testval=0,
345+
initval=0,
346346
random=None,
347347
wrap_random_with_dist_shape=True,
348348
check_shape_in_random=True,
@@ -363,8 +363,8 @@ def __init__(
363363
a value here.
364364
dtype: None, str (Optional)
365365
The dtype of the distribution.
366-
testval: number or array (Optional)
367-
The ``testval`` of the RV's tensor that follow the ``DensityDist``
366+
initval: number or array (Optional)
367+
The ``initval`` of the RV's tensor that follow the ``DensityDist``
368368
distribution.
369369
args, kwargs: (Optional)
370370
These are passed to the parent class' ``__init__``.
@@ -400,7 +400,7 @@ def __init__(
400400
"""
401401
if dtype is None:
402402
dtype = aesara.config.floatX
403-
super().__init__(shape, dtype, testval, *args, **kwargs)
403+
super().__init__(shape, dtype, initval, *args, **kwargs)
404404
self.logp = logp
405405
if type(self.logp) == types.MethodType:
406406
if PLATFORM != "linux":

Diff for: pymc3/distributions/mixture.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,7 @@ class NormalMixture(Mixture):
609609
10,
610610
shape=n_components,
611611
transform=pm.transforms.ordered,
612-
testval=[1, 2, 3],
612+
initval=[1, 2, 3],
613613
)
614614
σ = pm.HalfNormal("σ", 10, shape=n_components)
615615
weights = pm.Dirichlet("w", np.ones(n_components))
@@ -684,7 +684,7 @@ def __init__(self, w, comp_dists, mixture_axis=-1, *args, **kwargs):
684684
self.mixture_axis = mixture_axis
685685
kwargs.setdefault("dtype", self.comp_dists.dtype)
686686

687-
# Compute the mode so we don't always have to pass a testval
687+
# Compute the mode so we don't always have to pass a initval
688688
defaults = kwargs.pop("defaults", [])
689689
event_shape = self.comp_dists.shape[mixture_axis + 1 :]
690690
_w = at.shape_padleft(

Diff for: pymc3/distributions/multivariate.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -840,7 +840,7 @@ def logp(self, X):
840840
)
841841

842842

843-
def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, testval=None):
843+
def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, initval=None):
844844
R"""
845845
Bartlett decomposition of the Wishart distribution. As the Wishart
846846
distribution requires the matrix to be symmetric positive semi-definite
@@ -875,7 +875,7 @@ def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, testv
875875
Input matrix S is already Cholesky decomposed as S.T * S
876876
return_cholesky: bool (default=False)
877877
Only return the Cholesky decomposed matrix.
878-
testval: ndarray
878+
initval: ndarray
879879
p x p positive definite matrix used to initialize
880880
881881
Notes
@@ -894,21 +894,21 @@ def WishartBartlett(name, S, nu, is_cholesky=False, return_cholesky=False, testv
894894
n_diag = len(diag_idx[0])
895895
n_tril = len(tril_idx[0])
896896

897-
if testval is not None:
897+
if initval is not None:
898898
# Inverse transform
899-
testval = np.dot(np.dot(np.linalg.inv(L), testval), np.linalg.inv(L.T))
900-
testval = linalg.cholesky(testval, lower=True)
901-
diag_testval = testval[diag_idx] ** 2
902-
tril_testval = testval[tril_idx]
899+
initval = np.dot(np.dot(np.linalg.inv(L), initval), np.linalg.inv(L.T))
900+
initval = linalg.cholesky(initval, lower=True)
901+
diag_testval = initval[diag_idx] ** 2
902+
tril_testval = initval[tril_idx]
903903
else:
904904
diag_testval = None
905905
tril_testval = None
906906

907907
c = at.sqrt(
908-
ChiSquared("%s_c" % name, nu - np.arange(2, 2 + n_diag), shape=n_diag, testval=diag_testval)
908+
ChiSquared("%s_c" % name, nu - np.arange(2, 2 + n_diag), shape=n_diag, initval=diag_testval)
909909
)
910910
pm._log.info("Added new variable %s_c to model diagonal of Wishart." % name)
911-
z = Normal("%s_z" % name, 0.0, 1.0, shape=n_tril, testval=tril_testval)
911+
z = Normal("%s_z" % name, 0.0, 1.0, shape=n_tril, initval=tril_testval)
912912
pm._log.info("Added new variable %s_z to model off-diagonals of Wishart." % name)
913913
# Construct A matrix
914914
A = at.zeros(S.shape, dtype=np.float32)

Diff for: pymc3/tests/models.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def simple_model():
3030
mu = -2.1
3131
tau = 1.3
3232
with Model() as model:
33-
Normal("x", mu, tau=tau, size=2, testval=floatX_array([0.1, 0.1]))
33+
Normal("x", mu, tau=tau, size=2, initval=floatX_array([0.1, 0.1]))
3434

3535
return model.initial_point, model, (mu, tau ** -0.5)
3636

@@ -39,7 +39,7 @@ def simple_categorical():
3939
p = floatX_array([0.1, 0.2, 0.3, 0.4])
4040
v = floatX_array([0.0, 1.0, 2.0, 3.0])
4141
with Model() as model:
42-
Categorical("x", p, size=3, testval=[1, 2, 3])
42+
Categorical("x", p, size=3, initval=[1, 2, 3])
4343

4444
mu = np.dot(p, v)
4545
var = np.dot(p, (v - mu) ** 2)
@@ -50,7 +50,7 @@ def multidimensional_model():
5050
mu = -2.1
5151
tau = 1.3
5252
with Model() as model:
53-
Normal("x", mu, tau=tau, size=(3, 2), testval=0.1 * np.ones((3, 2)))
53+
Normal("x", mu, tau=tau, size=(3, 2), initval=0.1 * np.ones((3, 2)))
5454

5555
return model.initial_point, model, (mu, tau ** -0.5)
5656

@@ -81,7 +81,7 @@ def simple_2model():
8181
tau = 1.3
8282
p = 0.4
8383
with Model() as model:
84-
x = pm.Normal("x", mu, tau=tau, testval=0.1)
84+
x = pm.Normal("x", mu, tau=tau, initval=0.1)
8585
pm.Deterministic("logx", at.log(x))
8686
pm.Bernoulli("y", p)
8787
return model.initial_point, model
@@ -91,7 +91,7 @@ def simple_2model_continuous():
9191
mu = -2.1
9292
tau = 1.3
9393
with Model() as model:
94-
x = pm.Normal("x", mu, tau=tau, testval=0.1)
94+
x = pm.Normal("x", mu, tau=tau, initval=0.1)
9595
pm.Deterministic("logx", at.log(x))
9696
pm.Beta("y", alpha=1, beta=1, size=2)
9797
return model.initial_point, model
@@ -106,7 +106,7 @@ def mv_simple():
106106
"x",
107107
at.constant(mu),
108108
tau=at.constant(tau),
109-
testval=floatX_array([0.1, 1.0, 0.8]),
109+
initval=floatX_array([0.1, 1.0, 0.8]),
110110
)
111111
H = tau
112112
C = np.linalg.inv(H)
@@ -122,7 +122,7 @@ def mv_simple_coarse():
122122
"x",
123123
at.constant(mu),
124124
tau=at.constant(tau),
125-
testval=floatX_array([0.1, 1.0, 0.8]),
125+
initval=floatX_array([0.1, 1.0, 0.8]),
126126
)
127127
H = tau
128128
C = np.linalg.inv(H)
@@ -138,7 +138,7 @@ def mv_simple_very_coarse():
138138
"x",
139139
at.constant(mu),
140140
tau=at.constant(tau),
141-
testval=floatX_array([0.1, 1.0, 0.8]),
141+
initval=floatX_array([0.1, 1.0, 0.8]),
142142
)
143143
H = tau
144144
C = np.linalg.inv(H)
@@ -150,7 +150,7 @@ def mv_simple_discrete():
150150
n = 5
151151
p = floatX_array([0.15, 0.85])
152152
with pm.Model() as model:
153-
pm.Multinomial("x", n, at.constant(p), testval=np.array([1, 4]))
153+
pm.Multinomial("x", n, at.constant(p), initval=np.array([1, 4]))
154154
mu = n * p
155155
# covariance matrix
156156
C = np.zeros((d, d))

Diff for: pymc3/tests/test_distributions_timeseries.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -68,13 +68,13 @@ def test_AR_nd():
6868
beta_tp = np.random.randn(p, n)
6969
y_tp = np.random.randn(T, n)
7070
with Model() as t0:
71-
beta = Normal("beta", 0.0, 1.0, shape=(p, n), testval=beta_tp)
72-
AR("y", beta, sigma=1.0, shape=(T, n), testval=y_tp)
71+
beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
72+
AR("y", beta, sigma=1.0, shape=(T, n), initval=y_tp)
7373

7474
with Model() as t1:
75-
beta = Normal("beta", 0.0, 1.0, shape=(p, n), testval=beta_tp)
75+
beta = Normal("beta", 0.0, 1.0, shape=(p, n), initval=beta_tp)
7676
for i in range(n):
77-
AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, testval=y_tp[:, i])
77+
AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, initval=y_tp[:, i])
7878

7979
np.testing.assert_allclose(t0.logp(t0.initial_point), t1.logp(t1.initial_point))
8080

@@ -150,7 +150,7 @@ def test_linear():
150150
# build model
151151
with Model() as model:
152152
lamh = Flat("lamh")
153-
xh = EulerMaruyama("xh", dt, sde, (lamh,), shape=N + 1, testval=x)
153+
xh = EulerMaruyama("xh", dt, sde, (lamh,), shape=N + 1, initval=x)
154154
Normal("zh", mu=xh, sigma=sig2, observed=z)
155155
# invert
156156
with model:

Diff for: pymc3/tests/test_model.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def __init__(self, mean=0, sigma=1, name="", model=None):
5757
super().__init__(name, model)
5858
self.register_rv(Normal.dist(mu=mean, sigma=sigma), "v1")
5959
Normal("v2", mu=mean, sigma=sigma)
60-
Normal("v3", mu=mean, sigma=Normal("sd", mu=10, sigma=1, testval=1.0))
60+
Normal("v3", mu=mean, sigma=Normal("sd", mu=10, sigma=1, initval=1.0))
6161
Deterministic("v3_sq", self.v3 ** 2)
6262
Potential("p1", at.constant(1))
6363

@@ -462,7 +462,7 @@ def test_make_obs_var():
462462
fake_model = pm.Model()
463463
with fake_model:
464464
fake_distribution = pm.Normal.dist(mu=0, sigma=1)
465-
# Create the testval attribute simply for the sake of model testing
465+
# Create the initval attribute simply for the sake of model testing
466466
fake_distribution.name = input_name
467467

468468
# Check function behavior using the various inputs

Diff for: pymc3/tests/test_sampling.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ def test_shared_named(self):
387387
mu=np.atleast_2d(0),
388388
tau=np.atleast_2d(1e20),
389389
size=(1, 1),
390-
testval=np.atleast_2d(0),
390+
initval=np.atleast_2d(0),
391391
)
392392
theta = pm.Normal(
393393
"theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1)
@@ -403,7 +403,7 @@ def test_shared_unnamed(self):
403403
mu=np.atleast_2d(0),
404404
tau=np.atleast_2d(1e20),
405405
size=(1, 1),
406-
testval=np.atleast_2d(0),
406+
initval=np.atleast_2d(0),
407407
)
408408
theta = pm.Normal(
409409
"theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1)
@@ -419,7 +419,7 @@ def test_constant_named(self):
419419
mu=np.atleast_2d(0),
420420
tau=np.atleast_2d(1e20),
421421
size=(1, 1),
422-
testval=np.atleast_2d(0),
422+
initval=np.atleast_2d(0),
423423
)
424424
theta = pm.Normal(
425425
"theta", mu=at.dot(G_var, theta0), tau=np.atleast_2d(1e20), size=(1, 1)
@@ -688,10 +688,10 @@ def test_deterministic_of_observed_modified_interface(self):
688688
meas_in_1 = pm.aesaraf.floatX(2 + 4 * rng.randn(100))
689689
meas_in_2 = pm.aesaraf.floatX(5 + 4 * rng.randn(100))
690690
with pm.Model(rng_seeder=rng) as model:
691-
mu_in_1 = pm.Normal("mu_in_1", 0, 1, testval=0)
692-
sigma_in_1 = pm.HalfNormal("sd_in_1", 1, testval=1)
693-
mu_in_2 = pm.Normal("mu_in_2", 0, 1, testval=0)
694-
sigma_in_2 = pm.HalfNormal("sd__in_2", 1, testval=1)
691+
mu_in_1 = pm.Normal("mu_in_1", 0, 1, initval=0)
692+
sigma_in_1 = pm.HalfNormal("sd_in_1", 1, initval=1)
693+
mu_in_2 = pm.Normal("mu_in_2", 0, 1, initval=0)
694+
sigma_in_2 = pm.HalfNormal("sd__in_2", 1, initval=1)
695695

696696
in_1 = pm.Normal("in_1", mu_in_1, sigma_in_1, observed=meas_in_1)
697697
in_2 = pm.Normal("in_2", mu_in_2, sigma_in_2, observed=meas_in_2)
@@ -882,7 +882,7 @@ def _mocked_init_nuts(*args, **kwargs):
882882

883883

884884
@pytest.mark.parametrize(
885-
"testval, jitter_max_retries, expectation",
885+
"initval, jitter_max_retries, expectation",
886886
[
887887
(0, 0, pytest.raises(SamplingError)),
888888
(0, 1, pytest.raises(SamplingError)),
@@ -891,9 +891,9 @@ def _mocked_init_nuts(*args, **kwargs):
891891
(1, 0, does_not_raise()),
892892
],
893893
)
894-
def test_init_jitter(testval, jitter_max_retries, expectation):
894+
def test_init_jitter(initval, jitter_max_retries, expectation):
895895
with pm.Model() as m:
896-
pm.HalfNormal("x", transform=None, testval=testval)
896+
pm.HalfNormal("x", transform=None, initval=initval)
897897

898898
with expectation:
899899
# Starting value is negative (invalid) when np.random.rand returns 0 (jitter = -1)

Diff for: pymc3/tests/test_step.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -964,25 +964,25 @@ def test_multiple_samplers(self, caplog):
964964

965965
def test_bad_init_nonparallel(self):
966966
with Model():
967-
HalfNormal("a", sigma=1, testval=-1, transform=None)
967+
HalfNormal("a", sigma=1, initval=-1, transform=None)
968968
with pytest.raises(SamplingError) as error:
969969
sample(init=None, chains=1, random_seed=1)
970970
error.match("Initial evaluation")
971971

972972
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
973973
def test_bad_init_parallel(self):
974974
with Model():
975-
HalfNormal("a", sigma=1, testval=-1, transform=None)
975+
HalfNormal("a", sigma=1, initval=-1, transform=None)
976976
with pytest.raises(SamplingError) as error:
977977
sample(init=None, cores=2, random_seed=1)
978978
error.match("Initial evaluation")
979979

980980
def test_linalg(self, caplog):
981981
with Model():
982-
a = Normal("a", size=2, testval=floatX(np.zeros(2)))
982+
a = Normal("a", size=2, initval=floatX(np.zeros(2)))
983983
a = at.switch(a > 0, np.inf, a)
984984
b = at.slinalg.solve(floatX(np.eye(2)), a)
985-
Normal("c", mu=b, size=2, testval=floatX(np.r_[0.0, 0.0]))
985+
Normal("c", mu=b, size=2, initval=floatX(np.r_[0.0, 0.0]))
986986
caplog.clear()
987987
trace = sample(20, init=None, tune=5, chains=2)
988988
warns = [msg.msg for msg in caplog.records]

0 commit comments

Comments
 (0)