diff --git a/pymc3/distributions/continuous.py b/pymc3/distributions/continuous.py index 55e4484dc3..490df563cd 100644 --- a/pymc3/distributions/continuous.py +++ b/pymc3/distributions/continuous.py @@ -17,31 +17,70 @@ A collection of common probability distributions for stochastic nodes in PyMC. """ +import warnings + import numpy as np import theano import theano.tensor as tt -from scipy import stats -from scipy.special import expit -from scipy.interpolate import InterpolatedUnivariateSpline -import warnings -from pymc3.theanof import floatX from . import transforms -from .special import log_i0 -from ..math import invlogit, logit, logdiffexp from .dist_math import ( - alltrue_elemwise, betaln, bound, gammaln, i0e, incomplete_beta, logpow, - normal_lccdf, normal_lcdf, SplineWrapper, std_cdf, zvalue, + alltrue_elemwise, + betaln, + bound, + gammaln, + i0e, + incomplete_beta, + logpow, + normal_lccdf, + normal_lcdf, + SplineWrapper, + std_cdf, + zvalue, clipped_beta_rvs, ) -from .distribution import (Continuous, draw_values, generate_samples) +from .distribution import Continuous, draw_values, generate_samples +from .special import log_i0 +from ..math import invlogit, logit, logdiffexp + +from pymc3.theanof import floatX +from scipy import stats +from scipy.special import expit +from scipy.interpolate import InterpolatedUnivariateSpline -__all__ = ['Uniform', 'Flat', 'HalfFlat', 'Normal', 'TruncatedNormal', 'Beta', - 'Kumaraswamy', 'Exponential', 'Laplace', 'StudentT', 'Cauchy', - 'HalfCauchy', 'Gamma', 'Weibull', 'HalfStudentT', 'Lognormal', - 'ChiSquared', 'HalfNormal', 'Wald', 'Pareto', 'InverseGamma', - 'ExGaussian', 'VonMises', 'SkewNormal', 'Triangular', 'Gumbel', - 'Logistic', 'LogitNormal', 'Interpolated', 'Rice', 'Moyal'] +__all__ = [ + "Uniform", + "Flat", + "HalfFlat", + "Normal", + "TruncatedNormal", + "Beta", + "Kumaraswamy", + "Exponential", + "Laplace", + "StudentT", + "Cauchy", + "HalfCauchy", + "Gamma", + "Weibull", + "HalfStudentT", + "Lognormal", + "ChiSquared", + "HalfNormal", + "Wald", + "Pareto", + "InverseGamma", + "ExGaussian", + "VonMises", + "SkewNormal", + "Triangular", + "Gumbel", + "Logistic", + "LogitNormal", + "Interpolated", + "Rice", + "Moyal", +] class PositiveContinuous(Continuous): @@ -61,13 +100,12 @@ def __init__(self, transform=transforms.logodds, *args, **kwargs): class BoundedContinuous(Continuous): """Base class for bounded continuous distributions""" - def __init__(self, transform='auto', lower=None, upper=None, - *args, **kwargs): + def __init__(self, transform="auto", lower=None, upper=None, *args, **kwargs): lower = tt.as_tensor_variable(lower) if lower is not None else None upper = tt.as_tensor_variable(upper) if upper is not None else None - if transform == 'auto': + if transform == "auto": if lower is None and upper is None: transform = None elif lower is not None and upper is None: @@ -86,8 +124,7 @@ def assert_negative_support(var, label, distname, value=-1e-6): return try: # Transformed distribution - support = np.isfinite(var.transformed.distribution.dist - .logp(value).tag.test_value) + support = np.isfinite(var.transformed.distribution.dist.logp(value).tag.test_value) except AttributeError: try: # Untransformed distribution @@ -98,7 +135,8 @@ def assert_negative_support(var, label, distname, value=-1e-6): if np.any(support): msg = "The variable specified for {0} has negative support for {1}, ".format( - label, distname) + label, distname + ) msg += "likely making it unsuitable for this parameter." warnings.warn(msg) @@ -126,27 +164,27 @@ def get_tau_sigma(tau=None, sigma=None): """ if tau is None: if sigma is None: - sigma = 1. - tau = 1. + sigma = 1.0 + tau = 1.0 else: - tau = sigma**-2. + tau = sigma ** -2.0 else: if sigma is not None: raise ValueError("Can't pass both tau and sigma") else: - sigma = tau**-.5 + sigma = tau ** -0.5 # cast tau and sigma to float in a way that works for both np.arrays # and pure python - tau = 1. * tau - sigma = 1. * sigma + tau = 1.0 * tau + sigma = 1.0 * sigma return floatX(tau), floatX(sigma) class Uniform(BoundedContinuous): - R""" + r""" Continuous uniform log-likelihood. The pdf of this distribution is @@ -190,7 +228,7 @@ class Uniform(BoundedContinuous): def __init__(self, lower=0, upper=1, *args, **kwargs): self.lower = lower = tt.as_tensor_variable(floatX(lower)) self.upper = upper = tt.as_tensor_variable(floatX(upper)) - self.mean = (upper + lower) / 2. + self.mean = (upper + lower) / 2.0 self.median = self.mean super().__init__(lower=lower, upper=upper, *args, **kwargs) @@ -213,12 +251,10 @@ def random(self, point=None, size=None): array """ - lower, upper = draw_values([self.lower, self.upper], - point=point, size=size) - return generate_samples(stats.uniform.rvs, loc=lower, - scale=upper - lower, - dist_shape=self.shape, - size=size) + lower, upper = draw_values([self.lower, self.upper], point=point, size=size) + return generate_samples( + stats.uniform.rvs, loc=lower, scale=upper - lower, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -235,8 +271,7 @@ def logp(self, value): """ lower = self.lower upper = self.upper - return bound(-tt.log(upper - lower), - value >= lower, value <= upper) + return bound(-tt.log(upper - lower), value >= lower, value <= upper) def logcdf(self, value): """ @@ -259,9 +294,8 @@ def logcdf(self, value): tt.switch( tt.eq(value, self.upper), 0, - tt.log((value - self.lower)) - - tt.log((self.upper - self.lower)) - ) + tt.log((value - self.lower)) - tt.log((self.upper - self.lower)), + ), ) @@ -273,7 +307,7 @@ class Flat(Continuous): def __init__(self, *args, **kwargs): self._default = 0 - super().__init__(defaults=('_default',), *args, **kwargs) + super().__init__(defaults=("_default",), *args, **kwargs) def random(self, point=None, size=None): """Raises ValueError as it is not possible to sample from Flat distribution @@ -287,7 +321,7 @@ def random(self, point=None, size=None): ------- ValueError """ - raise ValueError('Cannot sample from Flat distribution') + raise ValueError("Cannot sample from Flat distribution") def logp(self, value): """ @@ -321,13 +355,7 @@ def logcdf(self, value): TensorVariable """ return tt.switch( - tt.eq(value, -np.inf), - -np.inf, - tt.switch( - tt.eq(value, np.inf), - 0, - tt.log(0.5) - ) + tt.eq(value, -np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, tt.log(0.5)) ) @@ -336,7 +364,7 @@ class HalfFlat(PositiveContinuous): def __init__(self, *args, **kwargs): self._default = 1 - super().__init__(defaults=('_default',), *args, **kwargs) + super().__init__(defaults=("_default",), *args, **kwargs) def random(self, point=None, size=None): """Raises ValueError as it is not possible to sample from HalfFlat distribution @@ -350,7 +378,7 @@ def random(self, point=None, size=None): ------- ValueError """ - raise ValueError('Cannot sample from HalfFlat distribution') + raise ValueError("Cannot sample from HalfFlat distribution") def logp(self, value): """ @@ -383,19 +411,11 @@ def logcdf(self, value): ------- TensorVariable """ - return tt.switch( - tt.lt(value, np.inf), - -np.inf, - tt.switch( - tt.eq(value, np.inf), - 0, - -np.inf - ) - ) + return tt.switch(tt.lt(value, np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, -np.inf)) class Normal(Continuous): - R""" + r""" Univariate normal log-likelihood. The pdf of this distribution is @@ -460,19 +480,16 @@ class Normal(Continuous): def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs): if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) self.sigma = self.sd = tt.as_tensor_variable(sigma) self.tau = tt.as_tensor_variable(tau) self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu)) - self.variance = 1. / self.tau + self.variance = 1.0 / self.tau - assert_negative_support(sigma, 'sigma', 'Normal') - assert_negative_support(tau, 'tau', 'Normal') + assert_negative_support(sigma, "sigma", "Normal") + assert_negative_support(tau, "tau", "Normal") super().__init__(**kwargs) @@ -493,11 +510,10 @@ def random(self, point=None, size=None): ------- array """ - mu, tau, _ = draw_values([self.mu, self.tau, self.sigma], - point=point, size=size) - return generate_samples(stats.norm.rvs, loc=mu, scale=tau**-0.5, - dist_shape=self.shape, - size=size) + mu, tau, _ = draw_values([self.mu, self.tau, self.sigma], point=point, size=size) + return generate_samples( + stats.norm.rvs, loc=mu, scale=tau ** -0.5, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -517,8 +533,7 @@ def logp(self, value): tau = self.tau mu = self.mu - return bound((-tau * (value - mu)**2 + tt.log(tau / np.pi / 2.)) / 2., - sigma > 0) + return bound((-tau * (value - mu) ** 2 + tt.log(tau / np.pi / 2.0)) / 2.0, sigma > 0) def _distr_parameters_for_repr(self): return ["mu", "sigma"] @@ -542,7 +557,7 @@ def logcdf(self, value): class TruncatedNormal(BoundedContinuous): - R""" + r""" Univariate truncated normal log-likelihood. The pdf of this distribution is @@ -613,37 +628,58 @@ class TruncatedNormal(BoundedContinuous): """ - def __init__(self, mu=0, sigma=None, tau=None, lower=None, upper=None, - transform='auto', sd=None, *args, **kwargs): + def __init__( + self, + mu=0, + sigma=None, + tau=None, + lower=None, + upper=None, + transform="auto", + sd=None, + *args, + **kwargs + ): if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) self.sigma = self.sd = tt.as_tensor_variable(sigma) self.tau = tt.as_tensor_variable(tau) self.lower_check = tt.as_tensor_variable(floatX(lower)) if lower is not None else lower self.upper_check = tt.as_tensor_variable(floatX(upper)) if upper is not None else upper - self.lower = tt.as_tensor_variable(floatX(lower)) if lower is not None else tt.as_tensor_variable(-np.inf) - self.upper = tt.as_tensor_variable(floatX(upper)) if upper is not None else tt.as_tensor_variable(np.inf) + self.lower = ( + tt.as_tensor_variable(floatX(lower)) + if lower is not None + else tt.as_tensor_variable(-np.inf) + ) + self.upper = ( + tt.as_tensor_variable(floatX(upper)) + if upper is not None + else tt.as_tensor_variable(np.inf) + ) self.mu = tt.as_tensor_variable(floatX(mu)) if self.lower_check is None and self.upper_check is None: self._defaultval = mu elif self.lower_check is None and self.upper_check is not None: - self._defaultval = self.upper - 1. + self._defaultval = self.upper - 1.0 elif self.lower_check is not None and self.upper_check is None: - self._defaultval = self.lower + 1. + self._defaultval = self.lower + 1.0 else: self._defaultval = (self.lower + self.upper) / 2 - assert_negative_support(sigma, 'sigma', 'TruncatedNormal') - assert_negative_support(tau, 'tau', 'TruncatedNormal') + assert_negative_support(sigma, "sigma", "TruncatedNormal") + assert_negative_support(tau, "tau", "TruncatedNormal") - super().__init__(defaults=('_defaultval',), transform=transform, - lower=lower, upper=upper, *args, **kwargs) + super().__init__( + defaults=("_defaultval",), + transform=transform, + lower=lower, + upper=upper, + *args, + **kwargs + ) def random(self, point=None, size=None): """ @@ -663,9 +699,7 @@ def random(self, point=None, size=None): array """ mu, sigma, lower, upper = draw_values( - [self.mu, self.sigma, self.lower, self.upper], - point=point, - size=size + [self.mu, self.sigma, self.lower, self.upper], point=point, size=size ) return generate_samples( self._random, @@ -684,11 +718,7 @@ def _random(self, mu, sigma, lower, upper, size): the scipy.rvs representation. """ return stats.truncnorm.rvs( - a=(lower - mu) / sigma, - b=(upper - mu) / sigma, - loc=mu, - scale=sigma, - size=size, + a=(lower - mu) / sigma, b=(upper - mu) / sigma, loc=mu, scale=sigma, size=size ) def logp(self, value): @@ -722,7 +752,7 @@ def _normalization(self): mu, sigma = self.mu, self.sigma if self.lower_check is None and self.upper_check is None: - return 0. + return 0.0 if self.lower_check is not None and self.upper_check is not None: lcdf_a = normal_lcdf(mu, sigma, self.lower) @@ -730,11 +760,7 @@ def _normalization(self): lsf_a = normal_lccdf(mu, sigma, self.lower) lsf_b = normal_lccdf(mu, sigma, self.upper) - return tt.switch( - self.lower > 0, - logdiffexp(lsf_a, lsf_b), - logdiffexp(lcdf_b, lcdf_a), - ) + return tt.switch(self.lower > 0, logdiffexp(lsf_a, lsf_b), logdiffexp(lcdf_b, lcdf_a)) if self.lower_check is not None: return normal_lccdf(mu, sigma, self.lower) @@ -746,7 +772,7 @@ def _distr_parameters_for_repr(self): class HalfNormal(PositiveContinuous): - R""" + r""" Half-normal log-likelihood. The pdf of this distribution is @@ -811,10 +837,7 @@ class HalfNormal(PositiveContinuous): def __init__(self, sigma=None, tau=None, sd=None, *args, **kwargs): if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) super().__init__(*args, **kwargs) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) @@ -822,10 +845,10 @@ def __init__(self, sigma=None, tau=None, sd=None, *args, **kwargs): self.tau = tau = tt.as_tensor_variable(tau) self.mean = tt.sqrt(2 / (np.pi * self.tau)) - self.variance = (1. - 2 / np.pi) / self.tau + self.variance = (1.0 - 2 / np.pi) / self.tau - assert_negative_support(tau, 'tau', 'HalfNormal') - assert_negative_support(sigma, 'sigma', 'HalfNormal') + assert_negative_support(tau, "tau", "HalfNormal") + assert_negative_support(sigma, "sigma", "HalfNormal") def random(self, point=None, size=None): """ @@ -845,9 +868,9 @@ def random(self, point=None, size=None): array """ sigma = draw_values([self.sigma], point=point, size=size)[0] - return generate_samples(stats.halfnorm.rvs, loc=0., scale=sigma, - dist_shape=self.shape, - size=size) + return generate_samples( + stats.halfnorm.rvs, loc=0.0, scale=sigma, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -865,9 +888,12 @@ def logp(self, value): """ tau = self.tau sigma = self.sigma - return bound(-0.5 * tau * value**2 + 0.5 * tt.log(tau * 2. / np.pi), - value >= 0, - tau > 0, sigma > 0) + return bound( + -0.5 * tau * value ** 2 + 0.5 * tt.log(tau * 2.0 / np.pi), + value >= 0, + tau > 0, + sigma > 0, + ) def _distr_parameters_for_repr(self): return ["sigma"] @@ -891,13 +917,13 @@ def logcdf(self, value): z = zvalue(value, mu=0, sigma=sigma) return tt.switch( tt.lt(z, -1.0), - tt.log(tt.erfcx(-z / tt.sqrt(2.))) - tt.sqr(z), - tt.log1p(-tt.erfc(z / tt.sqrt(2.))) + tt.log(tt.erfcx(-z / tt.sqrt(2.0))) - tt.sqr(z), + tt.log1p(-tt.erfc(z / tt.sqrt(2.0))), ) class Wald(PositiveContinuous): - R""" + r""" Wald log-likelihood. The pdf of this distribution is @@ -974,7 +1000,7 @@ class Wald(PositiveContinuous): statmod: Probability Calculations for the Inverse Gaussian Distribution """ - def __init__(self, mu=None, lam=None, phi=None, alpha=0., *args, **kwargs): + def __init__(self, mu=None, lam=None, phi=None, alpha=0.0, *args, **kwargs): super().__init__(*args, **kwargs) mu, lam, phi = self.get_mu_lam_phi(mu, lam, phi) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) @@ -983,13 +1009,15 @@ def __init__(self, mu=None, lam=None, phi=None, alpha=0., *args, **kwargs): self.phi = phi = tt.as_tensor_variable(floatX(phi)) self.mean = self.mu + self.alpha - self.mode = self.mu * (tt.sqrt(1. + (1.5 * self.mu / self.lam)**2) - - 1.5 * self.mu / self.lam) + self.alpha - self.variance = (self.mu**3) / self.lam + self.mode = ( + self.mu * (tt.sqrt(1.0 + (1.5 * self.mu / self.lam) ** 2) - 1.5 * self.mu / self.lam) + + self.alpha + ) + self.variance = (self.mu ** 3) / self.lam - assert_negative_support(phi, 'phi', 'Wald') - assert_negative_support(mu, 'mu', 'Wald') - assert_negative_support(lam, 'lam', 'Wald') + assert_negative_support(phi, "phi", "Wald") + assert_negative_support(mu, "mu", "Wald") + assert_negative_support(lam, "lam", "Wald") def get_mu_lam_phi(self, mu, lam, phi): if mu is None: @@ -998,23 +1026,28 @@ def get_mu_lam_phi(self, mu, lam, phi): else: if lam is None: if phi is None: - return mu, 1., 1. / mu + return mu, 1.0, 1.0 / mu else: return mu, mu * phi, phi else: if phi is None: return mu, lam, lam / mu - raise ValueError('Wald distribution must specify either mu only, ' - 'mu and lam, mu and phi, or lam and phi.') + raise ValueError( + "Wald distribution must specify either mu only, " + "mu and lam, mu and phi, or lam and phi." + ) def _random(self, mu, lam, alpha, size=None): - v = np.random.normal(size=size)**2 - value = (mu + (mu**2) * v / (2. * lam) - mu / (2. * lam) - * np.sqrt(4. * mu * lam * v + (mu * v)**2)) + v = np.random.normal(size=size) ** 2 + value = ( + mu + + (mu ** 2) * v / (2.0 * lam) + - mu / (2.0 * lam) * np.sqrt(4.0 * mu * lam * v + (mu * v) ** 2) + ) z = np.random.uniform(size=size) i = np.floor(z - mu / (mu + value)) * 2 + 1 - value = (value**-i) * (mu**(i + 1)) + value = (value ** -i) * (mu ** (i + 1)) return value + alpha def random(self, point=None, size=None): @@ -1034,12 +1067,8 @@ def random(self, point=None, size=None): ------- array """ - mu, lam, alpha = draw_values([self.mu, self.lam, self.alpha], - point=point, size=size) - return generate_samples(self._random, - mu, lam, alpha, - dist_shape=self.shape, - size=size) + mu, lam, alpha = draw_values([self.mu, self.lam, self.alpha], point=point, size=size) + return generate_samples(self._random, mu, lam, alpha, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1058,14 +1087,19 @@ def logp(self, value): mu = self.mu lam = self.lam alpha = self.alpha + centered_value = value - alpha # value *must* be iid. Otherwise this is wrong. - return bound(logpow(lam / (2. * np.pi), 0.5) - - logpow(value - alpha, 1.5) - - (0.5 * lam / (value - alpha) - * ((value - alpha - mu) / mu)**2), - # XXX these two are redundant. Please, check. - value > 0, value - alpha > 0, - mu > 0, lam > 0, alpha >= 0) + return bound( + logpow(lam / (2.0 * np.pi), 0.5) + - logpow(centered_value, 1.5) + - (0.5 * lam / centered_value * ((centered_value - mu) / mu) ** 2), + # XXX these two are redundant. Please, check. + value > 0, + centered_value > 0, + mu > 0, + lam > 0, + alpha >= 0, + ) def _distr_parameters_for_repr(self): return ["mu", "lam", "alpha"] @@ -1095,38 +1129,32 @@ def logcdf(self, value): l = lam * mu r = tt.sqrt(value * lam) - a = normal_lcdf(0, 1, (q - 1.)/r) - b = 2./l + normal_lcdf(0, 1, -(q + 1.)/r) + a = normal_lcdf(0, 1, (q - 1.0) / r) + b = 2.0 / l + normal_lcdf(0, 1, -(q + 1.0) / r) + + left_limit = ( + tt.lt(value, 0) + | (tt.eq(value, 0) & tt.gt(mu, 0) & tt.lt(lam, np.inf)) + | (tt.lt(value, mu) & tt.eq(lam, 0)) + ) + right_limit = ( + tt.eq(value, np.inf) + | (tt.eq(lam, 0) & tt.gt(value, mu)) + | (tt.gt(value, 0) & tt.eq(lam, np.inf)) + ) + degenerate_dist = (tt.lt(mu, np.inf) & tt.eq(mu, value) & tt.eq(lam, 0)) | ( + tt.eq(value, 0) & tt.eq(lam, np.inf) + ) + return tt.switch( - ( - # Left limit - tt.lt(value, 0) | - (tt.eq(value, 0) & tt.gt(mu, 0) & tt.lt(lam, np.inf)) | - (tt.lt(value, mu) & tt.eq(lam, 0)) - ), + left_limit, -np.inf, - tt.switch( - ( - # Right limit - tt.eq(value, np.inf) | - (tt.eq(lam, 0) & tt.gt(value, mu)) | - (tt.gt(value, 0) & tt.eq(lam, np.inf)) | - # Degenerate distribution - ( - tt.lt(mu, np.inf) & - tt.eq(mu, value) & - tt.eq(lam, 0) - ) | - (tt.eq(value, 0) & tt.eq(lam, np.inf)) - ), - 0, - a + tt.log1p(tt.exp(b - a)) - ) + tt.switch((right_limit | degenerate_dist), 0, a + tt.log1p(tt.exp(b - a))), ) class Beta(UnitContinuous): - R""" + r""" Beta log-likelihood. The pdf of this distribution is @@ -1188,36 +1216,35 @@ class Beta(UnitContinuous): the binomial distribution. """ - def __init__(self, alpha=None, beta=None, mu=None, sigma=None, - sd=None, *args, **kwargs): + def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = beta = tt.as_tensor_variable(floatX(beta)) self.mean = self.alpha / (self.alpha + self.beta) - self.variance = self.alpha * self.beta / ( - (self.alpha + self.beta)**2 * (self.alpha + self.beta + 1)) + self.variance = ( + self.alpha * self.beta / ((self.alpha + self.beta) ** 2 * (self.alpha + self.beta + 1)) + ) - assert_negative_support(alpha, 'alpha', 'Beta') - assert_negative_support(beta, 'beta', 'Beta') + assert_negative_support(alpha, "alpha", "Beta") + assert_negative_support(beta, "beta", "Beta") def get_alpha_beta(self, alpha=None, beta=None, mu=None, sigma=None): if (alpha is not None) and (beta is not None): pass elif (mu is not None) and (sigma is not None): - kappa = mu * (1 - mu) / sigma**2 - 1 + kappa = mu * (1 - mu) / sigma ** 2 - 1 alpha = mu * kappa beta = (1 - mu) * kappa else: - raise ValueError('Incompatible parameterization. Either use alpha ' - 'and beta, or mu and sigma to specify distribution.') + raise ValueError( + "Incompatible parameterization. Either use alpha " + "and beta, or mu and sigma to specify distribution." + ) return alpha, beta @@ -1238,11 +1265,8 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) - return generate_samples(clipped_beta_rvs, alpha, beta, - dist_shape=self.shape, - size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) + return generate_samples(clipped_beta_rvs, alpha, beta, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1263,13 +1287,13 @@ def logp(self, value): logval = tt.log(value) log1pval = tt.log1p(-value) - logp = (tt.switch(tt.eq(alpha, 1), 0, (alpha - 1) * logval) - + tt.switch(tt.eq(beta, 1), 0, (beta - 1) * log1pval) - - betaln(alpha, beta)) + logp = ( + tt.switch(tt.eq(alpha, 1), 0, (alpha - 1) * logval) + + tt.switch(tt.eq(beta, 1), 0, (beta - 1) * log1pval) + - betaln(alpha, beta) + ) - return bound(logp, - value >= 0, value <= 1, - alpha > 0, beta > 0) + return bound(logp, value >= 0, value <= 1, alpha > 0, beta > 0) def logcdf(self, value): """ @@ -1292,18 +1316,15 @@ def logcdf(self, value): return tt.switch( tt.le(value, 0), -np.inf, - tt.switch( - tt.ge(value, 1), - 0, - tt.log(incomplete_beta(a, b, value)) - ) + tt.switch(tt.ge(value, 1), 0, tt.log(incomplete_beta(a, b, value))), ) def _distr_parameters_for_repr(self): return ["alpha", "beta"] + class Kumaraswamy(UnitContinuous): - R""" + r""" Kumaraswamy log-likelihood. The pdf of this distribution is @@ -1352,11 +1373,13 @@ def __init__(self, a, b, *args, **kwargs): ln_mean = tt.log(b) + tt.gammaln(1 + 1 / a) + tt.gammaln(b) - tt.gammaln(1 + 1 / a + b) self.mean = tt.exp(ln_mean) - ln_2nd_raw_moment = tt.log(b) + tt.gammaln(1 + 2 / a) + tt.gammaln(b) - tt.gammaln(1 + 2 / a + b) + ln_2nd_raw_moment = ( + tt.log(b) + tt.gammaln(1 + 2 / a) + tt.gammaln(b) - tt.gammaln(1 + 2 / a + b) + ) self.variance = tt.exp(ln_2nd_raw_moment) - self.mean ** 2 - assert_negative_support(a, 'a', 'Kumaraswamy') - assert_negative_support(b, 'b', 'Kumaraswamy') + assert_negative_support(a, "a", "Kumaraswamy") + assert_negative_support(b, "b", "Kumaraswamy") def _random(self, a, b, size=None): u = np.random.uniform(size=size) @@ -1379,11 +1402,8 @@ def random(self, point=None, size=None): ------- array """ - a, b = draw_values([self.a, self.b], - point=point, size=size) - return generate_samples(self._random, a, b, - dist_shape=self.shape, - size=size) + a, b = draw_values([self.a, self.b], point=point, size=size) + return generate_samples(self._random, a, b, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1404,13 +1424,11 @@ def logp(self, value): logp = tt.log(a) + tt.log(b) + (a - 1) * tt.log(value) + (b - 1) * tt.log(1 - value ** a) - return bound(logp, - value >= 0, value <= 1, - a > 0, b > 0) + return bound(logp, value >= 0, value <= 1, a > 0, b > 0) class Exponential(PositiveContinuous): - R""" + r""" Exponential log-likelihood. The pdf of this distribution is @@ -1449,13 +1467,13 @@ class Exponential(PositiveContinuous): def __init__(self, lam, *args, **kwargs): super().__init__(*args, **kwargs) self.lam = lam = tt.as_tensor_variable(floatX(lam)) - self.mean = 1. / self.lam + self.mean = 1.0 / self.lam self.median = self.mean * tt.log(2) self.mode = tt.zeros_like(self.lam) - self.variance = self.lam**-2 + self.variance = self.lam ** -2 - assert_negative_support(lam, 'lam', 'Exponential') + assert_negative_support(lam, "lam", "Exponential") def random(self, point=None, size=None): """ @@ -1475,9 +1493,9 @@ def random(self, point=None, size=None): array """ lam = draw_values([self.lam], point=point, size=size)[0] - return generate_samples(np.random.exponential, scale=1. / lam, - dist_shape=self.shape, - size=size) + return generate_samples( + np.random.exponential, scale=1.0 / lam, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -1523,16 +1541,12 @@ def logcdf(self, value): return tt.switch( tt.le(value, 0.0), -np.inf, - tt.switch( - tt.le(a, tt.log(2.0)), - tt.log(-tt.expm1(-a)), - tt.log1p(-tt.exp(-a)), - ) + tt.switch(tt.le(a, tt.log(2.0)), tt.log(-tt.expm1(-a)), tt.log1p(-tt.exp(-a))), ) class Laplace(Continuous): - R""" + r""" Laplace log-likelihood. The pdf of this distribution is @@ -1578,9 +1592,9 @@ def __init__(self, mu, b, *args, **kwargs): self.b = b = tt.as_tensor_variable(floatX(b)) self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu)) - self.variance = 2 * self.b**2 + self.variance = 2 * self.b ** 2 - assert_negative_support(b, 'b', 'Laplace') + assert_negative_support(b, "b", "Laplace") def random(self, point=None, size=None): """ @@ -1600,9 +1614,7 @@ def random(self, point=None, size=None): array """ mu, b = draw_values([self.mu, self.b], point=point, size=size) - return generate_samples(np.random.laplace, mu, b, - dist_shape=self.shape, - size=size) + return generate_samples(np.random.laplace, mu, b, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1644,16 +1656,12 @@ def logcdf(self, value): return tt.switch( tt.le(value, a), tt.log(0.5) + y, - tt.switch( - tt.gt(y, 1), - tt.log1p(-0.5 * tt.exp(-y)), - tt.log(1 - 0.5 * tt.exp(-y)) - ) + tt.switch(tt.gt(y, 1), tt.log1p(-0.5 * tt.exp(-y)), tt.log(1 - 0.5 * tt.exp(-y))), ) class Lognormal(PositiveContinuous): - R""" + r""" Log-normal log-likelihood. Distribution of any random variable whose logarithm is normally @@ -1718,10 +1726,7 @@ def __init__(self, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) @@ -1729,17 +1734,17 @@ def __init__(self, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs): self.tau = tau = tt.as_tensor_variable(tau) self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma) - self.mean = tt.exp(self.mu + 1. / (2 * self.tau)) + self.mean = tt.exp(self.mu + 1.0 / (2 * self.tau)) self.median = tt.exp(self.mu) - self.mode = tt.exp(self.mu - 1. / self.tau) - self.variance = (tt.exp(1. / self.tau) - 1) * tt.exp(2 * self.mu + 1. / self.tau) + self.mode = tt.exp(self.mu - 1.0 / self.tau) + self.variance = (tt.exp(1.0 / self.tau) - 1) * tt.exp(2 * self.mu + 1.0 / self.tau) - assert_negative_support(tau, 'tau', 'Lognormal') - assert_negative_support(sigma, 'sigma', 'Lognormal') + assert_negative_support(tau, "tau", "Lognormal") + assert_negative_support(sigma, "sigma", "Lognormal") def _random(self, mu, tau, size=None): samples = np.random.normal(size=size) - return np.exp(mu + (tau**-0.5) * samples) + return np.exp(mu + (tau ** -0.5) * samples) def random(self, point=None, size=None): """ @@ -1759,9 +1764,7 @@ def random(self, point=None, size=None): array """ mu, tau = draw_values([self.mu, self.tau], point=point, size=size) - return generate_samples(self._random, mu, tau, - dist_shape=self.shape, - size=size) + return generate_samples(self._random, mu, tau, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1779,10 +1782,12 @@ def logp(self, value): """ mu = self.mu tau = self.tau - return bound(-0.5 * tau * (tt.log(value) - mu)**2 - + 0.5 * tt.log(tau / (2. * np.pi)) - - tt.log(value), - tau > 0) + return bound( + -0.5 * tau * (tt.log(value) - mu) ** 2 + + 0.5 * tt.log(tau / (2.0 * np.pi)) + - tt.log(value), + tau > 0, + ) def _distr_parameters_for_repr(self): return ["mu", "tau"] @@ -1811,15 +1816,14 @@ def logcdf(self, value): -np.inf, tt.switch( tt.lt(z, -1.0), - tt.log(tt.erfcx(-z / tt.sqrt(2.)) / 2.) - - tt.sqr(z) / 2, - tt.log1p(-tt.erfc(z / tt.sqrt(2.)) / 2.) - ) + tt.log(tt.erfcx(-z / tt.sqrt(2.0)) / 2.0) - tt.sqr(z) / 2, + tt.log1p(-tt.erfc(z / tt.sqrt(2.0)) / 2.0), + ), ) class StudentT(Continuous): - R""" + r""" Student's T log-likelihood. Describes a normal variable whose precision is gamma distributed. @@ -1886,22 +1890,17 @@ def __init__(self, nu, mu=0, lam=None, sigma=None, sd=None, *args, **kwargs): super(StudentT, self).__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) self.nu = nu = tt.as_tensor_variable(floatX(nu)) lam, sigma = get_tau_sigma(tau=lam, sigma=sigma) self.lam = lam = tt.as_tensor_variable(lam) self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma) self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu) - self.variance = tt.switch((nu > 2) * 1, - (1 / self.lam) * (nu / (nu - 2)), - np.inf) + self.variance = tt.switch((nu > 2) * 1, (1 / self.lam) * (nu / (nu - 2)), np.inf) - assert_negative_support(lam, 'lam (sigma)', 'StudentT') - assert_negative_support(nu, 'nu', 'StudentT') + assert_negative_support(lam, "lam (sigma)", "StudentT") + assert_negative_support(nu, "nu", "StudentT") def random(self, point=None, size=None): """ @@ -1920,11 +1919,10 @@ def random(self, point=None, size=None): ------- array """ - nu, mu, lam = draw_values([self.nu, self.mu, self.lam], - point=point, size=size) - return generate_samples(stats.t.rvs, nu, loc=mu, scale=lam**-0.5, - dist_shape=self.shape, - size=size) + nu, mu, lam = draw_values([self.nu, self.mu, self.lam], point=point, size=size) + return generate_samples( + stats.t.rvs, nu, loc=mu, scale=lam ** -0.5, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -1945,11 +1943,15 @@ def logp(self, value): lam = self.lam sigma = self.sigma - return bound(gammaln((nu + 1.0) / 2.0) - + .5 * tt.log(lam / (nu * np.pi)) - - gammaln(nu / 2.0) - - (nu + 1.0) / 2.0 * tt.log1p(lam * (value - mu)**2 / nu), - lam > 0, nu > 0, sigma > 0) + return bound( + gammaln((nu + 1.0) / 2.0) + + 0.5 * tt.log(lam / (nu * np.pi)) + - gammaln(nu / 2.0) + - (nu + 1.0) / 2.0 * tt.log1p(lam * (value - mu) ** 2 / nu), + lam > 0, + nu > 0, + sigma > 0, + ) def _distr_parameters_for_repr(self): return ["nu", "mu", "lam"] @@ -1972,14 +1974,14 @@ def logcdf(self, value): nu = self.nu mu = self.mu sigma = self.sigma - t = (value - mu)/sigma - sqrt_t2_nu = tt.sqrt(t**2 + nu) - x = (t + sqrt_t2_nu)/(2.0 * sqrt_t2_nu) - return tt.log(incomplete_beta(nu/2., nu/2., x)) + t = (value - mu) / sigma + sqrt_t2_nu = tt.sqrt(t ** 2 + nu) + x = (t + sqrt_t2_nu) / (2.0 * sqrt_t2_nu) + return tt.log(incomplete_beta(nu / 2.0, nu / 2.0, x)) class Pareto(Continuous): - R""" + r""" Pareto log-likelihood. Often used to characterize wealth distribution, or other examples of the @@ -2023,28 +2025,26 @@ class Pareto(Continuous): Scale parameter (m > 0). """ - def __init__(self, alpha, m, transform='lowerbound', *args, **kwargs): + def __init__(self, alpha, m, transform="lowerbound", *args, **kwargs): self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.m = m = tt.as_tensor_variable(floatX(m)) - self.mean = tt.switch(tt.gt(alpha, 1), alpha * - m / (alpha - 1.), np.inf) - self.median = m * 2.**(1. / alpha) + self.mean = tt.switch(tt.gt(alpha, 1), alpha * m / (alpha - 1.0), np.inf) + self.median = m * 2.0 ** (1.0 / alpha) self.variance = tt.switch( - tt.gt(alpha, 2), - (alpha * m**2) / ((alpha - 2.) * (alpha - 1.)**2), - np.inf) + tt.gt(alpha, 2), (alpha * m ** 2) / ((alpha - 2.0) * (alpha - 1.0) ** 2), np.inf + ) - assert_negative_support(alpha, 'alpha', 'Pareto') - assert_negative_support(m, 'm', 'Pareto') + assert_negative_support(alpha, "alpha", "Pareto") + assert_negative_support(m, "m", "Pareto") - if transform == 'lowerbound': + if transform == "lowerbound": transform = transforms.lowerbound(self.m) super().__init__(transform=transform, *args, **kwargs) def _random(self, alpha, m, size=None): u = np.random.uniform(size=size) - return m * (1. - u)**(-1. / alpha) + return m * (1.0 - u) ** (-1.0 / alpha) def random(self, point=None, size=None): """ @@ -2063,11 +2063,8 @@ def random(self, point=None, size=None): ------- array """ - alpha, m = draw_values([self.alpha, self.m], - point=point, size=size) - return generate_samples(self._random, alpha, m, - dist_shape=self.shape, - size=size) + alpha, m = draw_values([self.alpha, self.m], point=point, size=size) + return generate_samples(self._random, alpha, m, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -2085,9 +2082,12 @@ def logp(self, value): """ alpha = self.alpha m = self.m - return bound(tt.log(alpha) + logpow(m, alpha) - - logpow(value, alpha + 1), - value >= m, alpha > 0, m > 0) + return bound( + tt.log(alpha) + logpow(m, alpha) - logpow(value, alpha + 1), + value >= m, + alpha > 0, + m > 0, + ) def _distr_parameters_for_repr(self): return ["alpha", "m"] @@ -2111,18 +2111,12 @@ def logcdf(self, value): alpha = self.alpha arg = (m / value) ** alpha return tt.switch( - tt.lt(value, m), - -np.inf, - tt.switch( - tt.le(arg, 1e-5), - tt.log1p(-arg), - tt.log(1 - arg) - ) + tt.lt(value, m), -np.inf, tt.switch(tt.le(arg, 1e-5), tt.log1p(-arg), tt.log(1 - arg)) ) class Cauchy(Continuous): - R""" + r""" Cauchy log-likelihood. Also known as the Lorentz or the Breit-Wigner distribution. @@ -2171,7 +2165,7 @@ def __init__(self, alpha, beta, *args, **kwargs): self.median = self.mode = self.alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = tt.as_tensor_variable(floatX(beta)) - assert_negative_support(beta, 'beta', 'Cauchy') + assert_negative_support(beta, "beta", "Cauchy") def _random(self, alpha, beta, size=None): u = np.random.uniform(size=size) @@ -2194,11 +2188,8 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) - return generate_samples(self._random, alpha, beta, - dist_shape=self.shape, - size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) + return generate_samples(self._random, alpha, beta, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -2216,9 +2207,9 @@ def logp(self, value): """ alpha = self.alpha beta = self.beta - return bound(- tt.log(np.pi) - tt.log(beta) - - tt.log1p(((value - alpha) / beta)**2), - beta > 0) + return bound( + -tt.log(np.pi) - tt.log(beta) - tt.log1p(((value - alpha) / beta) ** 2), beta > 0 + ) def logcdf(self, value): """ @@ -2235,13 +2226,11 @@ def logcdf(self, value): ------- TensorVariable """ - return tt.log( - 0.5 + tt.arctan((value - self.alpha) / self.beta) / np.pi - ) + return tt.log(0.5 + tt.arctan((value - self.alpha) / self.beta) / np.pi) class HalfCauchy(PositiveContinuous): - R""" + r""" Half-Cauchy log-likelihood. The pdf of this distribution is @@ -2283,7 +2272,7 @@ def __init__(self, beta, *args, **kwargs): self.mode = tt.as_tensor_variable(0) self.median = self.beta = tt.as_tensor_variable(floatX(beta)) - assert_negative_support(beta, 'beta', 'HalfCauchy') + assert_negative_support(beta, "beta", "HalfCauchy") def _random(self, beta, size=None): u = np.random.uniform(size=size) @@ -2307,9 +2296,7 @@ def random(self, point=None, size=None): array """ beta = draw_values([self.beta], point=point, size=size)[0] - return generate_samples(self._random, beta, - dist_shape=self.shape, - size=size) + return generate_samples(self._random, beta, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -2326,9 +2313,11 @@ def logp(self, value): TensorVariable """ beta = self.beta - return bound(tt.log(2) - tt.log(np.pi) - tt.log(beta) - - tt.log1p((value / beta)**2), - value >= 0, beta > 0) + return bound( + tt.log(2) - tt.log(np.pi) - tt.log(beta) - tt.log1p((value / beta) ** 2), + value >= 0, + beta > 0, + ) def logcdf(self, value): """ @@ -2345,16 +2334,11 @@ def logcdf(self, value): ------- TensorVariable """ - return tt.switch( - tt.le(value, 0), - -np.inf, - tt.log( - 2 * tt.arctan(value / self.beta) / np.pi - )) + return tt.switch(tt.le(value, 0), -np.inf, tt.log(2 * tt.arctan(value / self.beta) / np.pi)) class Gamma(PositiveContinuous): - R""" + r""" Gamma log-likelihood. Represents the sum of alpha exponentially distributed random variables, @@ -2411,36 +2395,34 @@ class Gamma(PositiveContinuous): Alternative scale parameter (sigma > 0). """ - def __init__(self, alpha=None, beta=None, mu=None, sigma=None, - sd=None, *args, **kwargs): + def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = beta = tt.as_tensor_variable(floatX(beta)) self.mean = alpha / beta self.mode = tt.maximum((alpha - 1) / beta, 0) - self.variance = alpha / beta**2 + self.variance = alpha / beta ** 2 - assert_negative_support(alpha, 'alpha', 'Gamma') - assert_negative_support(beta, 'beta', 'Gamma') + assert_negative_support(alpha, "alpha", "Gamma") + assert_negative_support(beta, "beta", "Gamma") def get_alpha_beta(self, alpha=None, beta=None, mu=None, sigma=None): if (alpha is not None) and (beta is not None): pass elif (mu is not None) and (sigma is not None): - alpha = mu**2 / sigma**2 - beta = mu / sigma**2 + alpha = mu ** 2 / sigma ** 2 + beta = mu / sigma ** 2 else: - raise ValueError('Incompatible parameterization. Either use ' - 'alpha and beta, or mu and sigma to specify ' - 'distribution.') + raise ValueError( + "Incompatible parameterization. Either use " + "alpha and beta, or mu and sigma to specify " + "distribution." + ) return alpha, beta @@ -2461,11 +2443,10 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) - return generate_samples(stats.gamma.rvs, alpha, scale=1. / beta, - dist_shape=self.shape, - size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) + return generate_samples( + stats.gamma.rvs, alpha, scale=1.0 / beta, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -2484,11 +2465,11 @@ def logp(self, value): alpha = self.alpha beta = self.beta return bound( - -gammaln(alpha) + logpow( - beta, alpha) - beta * value + logpow(value, alpha - 1), + -gammaln(alpha) + logpow(beta, alpha) - beta * value + logpow(value, alpha - 1), value >= 0, alpha > 0, - beta > 0) + beta > 0, + ) def logcdf(self, value): """ @@ -2507,18 +2488,14 @@ def logcdf(self, value): """ alpha = self.alpha beta = self.beta - return bound( - tt.log(tt.gammainc(alpha, beta * value)), - value >= 0, - alpha > 0, - beta > 0) + return bound(tt.log(tt.gammainc(alpha, beta * value)), value >= 0, alpha > 0, beta > 0) def _distr_parameters_for_repr(self): return ["alpha", "beta"] class InverseGamma(PositiveContinuous): - R""" + r""" Inverse gamma log-likelihood, the reciprocal of the gamma distribution. The pdf of this distribution is @@ -2565,31 +2542,27 @@ class InverseGamma(PositiveContinuous): Alternative scale parameter (sigma > 0). """ - def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, - *args, **kwargs): - super().__init__(*args, defaults=('mode',), **kwargs) + def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs): + super().__init__(*args, defaults=("mode",), **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) alpha, beta = InverseGamma._get_alpha_beta(alpha, beta, mu, sigma) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = beta = tt.as_tensor_variable(floatX(beta)) self.mean = self._calculate_mean() - self.mode = beta / (alpha + 1.) - self.variance = tt.switch(tt.gt(alpha, 2), - (beta**2) / ((alpha - 2) * (alpha - 1.)**2), - np.inf) - assert_negative_support(alpha, 'alpha', 'InverseGamma') - assert_negative_support(beta, 'beta', 'InverseGamma') + self.mode = beta / (alpha + 1.0) + self.variance = tt.switch( + tt.gt(alpha, 2), (beta ** 2) / ((alpha - 2) * (alpha - 1.0) ** 2), np.inf + ) + assert_negative_support(alpha, "alpha", "InverseGamma") + assert_negative_support(beta, "beta", "InverseGamma") def _calculate_mean(self): - m = self.beta / (self.alpha - 1.) + m = self.beta / (self.alpha - 1.0) try: return (self.alpha > 1) * m or np.inf except ValueError: # alpha is an array @@ -2598,18 +2571,20 @@ def _calculate_mean(self): @staticmethod def _get_alpha_beta(alpha, beta, mu, sigma): - if (alpha is not None): - if (beta is not None): + if alpha is not None: + if beta is not None: pass else: beta = 1 elif (mu is not None) and (sigma is not None): - alpha = (2 * sigma**2 + mu**2)/sigma**2 - beta = mu * (mu**2 + sigma**2) / sigma**2 + alpha = (2 * sigma ** 2 + mu ** 2) / sigma ** 2 + beta = mu * (mu ** 2 + sigma ** 2) / sigma ** 2 else: - raise ValueError('Incompatible parameterization. Either use ' - 'alpha and (optionally) beta, or mu and sigma to specify ' - 'distribution.') + raise ValueError( + "Incompatible parameterization. Either use " + "alpha and (optionally) beta, or mu and sigma to specify " + "distribution." + ) return alpha, beta @@ -2630,11 +2605,10 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) - return generate_samples(stats.invgamma.rvs, a=alpha, scale=beta, - dist_shape=self.shape, - size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) + return generate_samples( + stats.invgamma.rvs, a=alpha, scale=beta, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -2652,16 +2626,19 @@ def logp(self, value): """ alpha = self.alpha beta = self.beta - return bound(logpow(beta, alpha) - gammaln(alpha) - beta / value - + logpow(value, -alpha - 1), - value > 0, alpha > 0, beta > 0) + return bound( + logpow(beta, alpha) - gammaln(alpha) - beta / value + logpow(value, -alpha - 1), + value > 0, + alpha > 0, + beta > 0, + ) def _distr_parameters_for_repr(self): return ["alpha", "beta"] class ChiSquared(Gamma): - R""" + r""" :math:`\chi^2` log-likelihood. The pdf of this distribution is @@ -2700,11 +2677,11 @@ class ChiSquared(Gamma): def __init__(self, nu, *args, **kwargs): self.nu = nu = tt.as_tensor_variable(floatX(nu)) - super().__init__(alpha=nu / 2., beta=0.5, *args, **kwargs) + super().__init__(alpha=nu / 2.0, beta=0.5, *args, **kwargs) class Weibull(PositiveContinuous): - R""" + r""" Weibull log-likelihood. The pdf of this distribution is @@ -2751,15 +2728,15 @@ def __init__(self, alpha, beta, *args, **kwargs): super().__init__(*args, **kwargs) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = beta = tt.as_tensor_variable(floatX(beta)) - self.mean = beta * tt.exp(gammaln(1 + 1. / alpha)) - self.median = beta * tt.exp(gammaln(tt.log(2)))**(1. / alpha) - self.variance = beta**2 * tt.exp(gammaln(1 + 2. / alpha)) - self.mean**2 - self.mode = tt.switch(alpha >= 1, - beta * ((alpha - 1)/alpha) ** (1 / alpha), - 0) # Reference: https://en.wikipedia.org/wiki/Weibull_distribution + self.mean = beta * tt.exp(gammaln(1 + 1.0 / alpha)) + self.median = beta * tt.exp(gammaln(tt.log(2))) ** (1.0 / alpha) + self.variance = beta ** 2 * tt.exp(gammaln(1 + 2.0 / alpha)) - self.mean ** 2 + self.mode = tt.switch( + alpha >= 1, beta * ((alpha - 1) / alpha) ** (1 / alpha), 0 + ) # Reference: https://en.wikipedia.org/wiki/Weibull_distribution - assert_negative_support(alpha, 'alpha', 'Weibull') - assert_negative_support(beta, 'beta', 'Weibull') + assert_negative_support(alpha, "alpha", "Weibull") + assert_negative_support(beta, "beta", "Weibull") def random(self, point=None, size=None): """ @@ -2778,15 +2755,12 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) def _random(a, b, size=None): - return b * (-np.log(np.random.uniform(size=size)))**(1 / a) + return b * (-np.log(np.random.uniform(size=size))) ** (1 / a) - return generate_samples(_random, alpha, beta, - dist_shape=self.shape, - size=size) + return generate_samples(_random, alpha, beta, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -2804,10 +2778,15 @@ def logp(self, value): """ alpha = self.alpha beta = self.beta - return bound(tt.log(alpha) - tt.log(beta) - + (alpha - 1) * tt.log(value / beta) - - (value / beta)**alpha, - value >= 0, alpha > 0, beta > 0) + return bound( + tt.log(alpha) + - tt.log(beta) + + (alpha - 1) * tt.log(value / beta) + - (value / beta) ** alpha, + value >= 0, + alpha > 0, + beta > 0, + ) def logcdf(self, value): r""" @@ -2832,19 +2811,16 @@ def logcdf(self, value): """ alpha = self.alpha beta = self.beta - a = (value / beta)**alpha + a = (value / beta) ** alpha return tt.switch( tt.le(value, 0.0), -np.inf, - tt.switch( - tt.le(a, tt.log(2.0)), - tt.log(-tt.expm1(-a)), - tt.log1p(-tt.exp(-a))) + tt.switch(tt.le(a, tt.log(2.0)), tt.log(-tt.expm1(-a)), tt.log1p(-tt.exp(-a))), ) class HalfStudentT(PositiveContinuous): - R""" + r""" Half Student's T log-likelihood The pdf of this distribution is @@ -2900,15 +2876,11 @@ class HalfStudentT(PositiveContinuous): x = pm.HalfStudentT('x', lam=4, nu=10) """ - def __init__(self, nu=1, sigma=None, lam=None, sd=None, - *args, **kwargs): + def __init__(self, nu=1, sigma=None, lam=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) self.mode = tt.as_tensor_variable(0) lam, sigma = get_tau_sigma(lam, sigma) @@ -2917,9 +2889,9 @@ def __init__(self, nu=1, sigma=None, lam=None, sd=None, self.lam = tt.as_tensor_variable(lam) self.nu = nu = tt.as_tensor_variable(floatX(nu)) - assert_negative_support(sigma, 'sigma', 'HalfStudentT') - assert_negative_support(lam, 'lam', 'HalfStudentT') - assert_negative_support(nu, 'nu', 'HalfStudentT') + assert_negative_support(sigma, "sigma", "HalfStudentT") + assert_negative_support(lam, "lam", "HalfStudentT") + assert_negative_support(nu, "nu", "HalfStudentT") def random(self, point=None, size=None): """ @@ -2939,9 +2911,9 @@ def random(self, point=None, size=None): array """ nu, sigma = draw_values([self.nu, self.sigma], point=point, size=size) - return np.abs(generate_samples(stats.t.rvs, nu, loc=0, scale=sigma, - dist_shape=self.shape, - size=size)) + return np.abs( + generate_samples(stats.t.rvs, nu, loc=0, scale=sigma, dist_shape=self.shape, size=size) + ) def logp(self, value): """ @@ -2961,18 +2933,24 @@ def logp(self, value): sigma = self.sigma lam = self.lam - return bound(tt.log(2) + gammaln((nu + 1.0) / 2.0) - - gammaln(nu / 2.0) - - .5 * tt.log(nu * np.pi * sigma**2) - - (nu + 1.0) / 2.0 * tt.log1p(value ** 2 / (nu * sigma**2)), - sigma > 0, lam > 0, nu > 0, value >= 0) + return bound( + tt.log(2) + + gammaln((nu + 1.0) / 2.0) + - gammaln(nu / 2.0) + - 0.5 * tt.log(nu * np.pi * sigma ** 2) + - (nu + 1.0) / 2.0 * tt.log1p(value ** 2 / (nu * sigma ** 2)), + sigma > 0, + lam > 0, + nu > 0, + value >= 0, + ) def _distr_parameters_for_repr(self): return ["nu", "lam"] class ExGaussian(Continuous): - R""" + r""" Exponentially modified Gaussian log-likelihood. Results from the convolution of a normal distribution with an exponential @@ -3036,25 +3014,21 @@ class ExGaussian(Continuous): Vol. 4, No. 1, pp 35-45. """ - def __init__(self, mu=0., sigma=None, nu=None, sd=None, - *args, **kwargs): + def __init__(self, mu=0.0, sigma=None, nu=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) self.mu = mu = tt.as_tensor_variable(floatX(mu)) self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma)) self.nu = nu = tt.as_tensor_variable(floatX(nu)) self.mean = mu + nu - self.variance = (sigma**2) + (nu**2) + self.variance = (sigma ** 2) + (nu ** 2) - assert_negative_support(sigma, 'sigma', 'ExGaussian') - assert_negative_support(nu, 'nu', 'ExGaussian') + assert_negative_support(sigma, "sigma", "ExGaussian") + assert_negative_support(nu, "nu", "ExGaussian") def random(self, point=None, size=None): """ @@ -3073,16 +3047,14 @@ def random(self, point=None, size=None): ------- array """ - mu, sigma, nu = draw_values([self.mu, self.sigma, self.nu], - point=point, size=size) + mu, sigma, nu = draw_values([self.mu, self.sigma, self.nu], point=point, size=size) def _random(mu, sigma, nu, size=None): - return (np.random.normal(mu, sigma, size=size) - + np.random.exponential(scale=nu, size=size)) + return np.random.normal(mu, sigma, size=size) + np.random.exponential( + scale=nu, size=size + ) - return generate_samples(_random, mu, sigma, nu, - dist_shape=self.shape, - size=size) + return generate_samples(_random, mu, sigma, nu, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -3109,10 +3081,7 @@ def logp(self, value): # This condition is suggested by exGAUS.R from gamlss lp = tt.switch( tt.gt(nu, 0.05 * sigma), - -tt.log(nu) - + (mu - value) / nu - + 0.5 * (sigma / nu) ** 2 - + logpow(cdf_val_safe, 1.0), + -tt.log(nu) + (mu - value) / nu + 0.5 * (sigma / nu) ** 2 + logpow(cdf_val_safe, 1.0), -tt.log(sigma * tt.sqrt(2 * np.pi)) - 0.5 * standardized_val ** 2, ) @@ -3144,21 +3113,25 @@ def logcdf(self, value): """ mu = self.mu sigma = self.sigma - sigma_2 = sigma**2 + sigma_2 = sigma ** 2 nu = self.nu - z = value - mu - sigma_2/nu + z = value - mu - sigma_2 / nu return tt.switch( tt.gt(nu, 0.05 * sigma), - tt.log(std_cdf((value - mu)/sigma) - - std_cdf(z/sigma) * tt.exp( - ((mu + (sigma_2/nu))**2 - - (mu**2) - - 2 * value * ((sigma_2)/nu))/(2 * sigma_2))), - normal_lcdf(mu, sigma, value)) + tt.log( + std_cdf((value - mu) / sigma) + - std_cdf(z / sigma) + * tt.exp( + ((mu + (sigma_2 / nu)) ** 2 - (mu ** 2) - 2 * value * ((sigma_2) / nu)) + / (2 * sigma_2) + ) + ), + normal_lcdf(mu, sigma, value), + ) class VonMises(Continuous): - R""" + r""" Univariate VonMises log-likelihood. The pdf of this distribution is @@ -3201,15 +3174,14 @@ class VonMises(Continuous): Concentration (\frac{1}{kappa} is analogous to \sigma^2). """ - def __init__(self, mu=0.0, kappa=None, transform='circular', - *args, **kwargs): - if transform == 'circular': + def __init__(self, mu=0.0, kappa=None, transform="circular", *args, **kwargs): + if transform == "circular": transform = transforms.Circular() super().__init__(transform=transform, *args, **kwargs) self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu)) self.kappa = kappa = tt.as_tensor_variable(floatX(kappa)) - assert_negative_support(kappa, 'kappa', 'VonMises') + assert_negative_support(kappa, "kappa", "VonMises") def random(self, point=None, size=None): """ @@ -3228,11 +3200,10 @@ def random(self, point=None, size=None): ------- array """ - mu, kappa = draw_values([self.mu, self.kappa], - point=point, size=size) - return generate_samples(stats.vonmises.rvs, loc=mu, kappa=kappa, - dist_shape=self.shape, - size=size) + mu, kappa = draw_values([self.mu, self.kappa], point=point, size=size) + return generate_samples( + stats.vonmises.rvs, loc=mu, kappa=kappa, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -3250,15 +3221,19 @@ def logp(self, value): """ mu = self.mu kappa = self.kappa - return bound(kappa * tt.cos(mu - value) - (tt.log(2 * np.pi) + log_i0(kappa)), - kappa > 0, value >= -np.pi, value <= np.pi) + return bound( + kappa * tt.cos(mu - value) - (tt.log(2 * np.pi) + log_i0(kappa)), + kappa > 0, + value >= -np.pi, + value <= np.pi, + ) def _distr_parameters_for_repr(self): return ["mu", "kappa"] class SkewNormal(Continuous): - R""" + r""" Univariate skew-normal log-likelihood. The pdf of this distribution is @@ -3315,16 +3290,12 @@ class SkewNormal(Continuous): """ - def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None, - *args, **kwargs): + def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) self.mu = mu = tt.as_tensor_variable(floatX(mu)) @@ -3333,11 +3304,11 @@ def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None, self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) - self.mean = mu + self.sigma * (2 / np.pi)**0.5 * alpha / (1 + alpha**2)**0.5 - self.variance = self.sigma**2 * (1 - (2 * alpha**2) / ((1 + alpha**2) * np.pi)) + self.mean = mu + self.sigma * (2 / np.pi) ** 0.5 * alpha / (1 + alpha ** 2) ** 0.5 + self.variance = self.sigma ** 2 * (1 - (2 * alpha ** 2) / ((1 + alpha ** 2) * np.pi)) - assert_negative_support(tau, 'tau', 'SkewNormal') - assert_negative_support(sigma, 'sigma', 'SkewNormal') + assert_negative_support(tau, "tau", "SkewNormal") + assert_negative_support(sigma, "sigma", "SkewNormal") def random(self, point=None, size=None): """ @@ -3357,11 +3328,11 @@ def random(self, point=None, size=None): array """ mu, tau, _, alpha = draw_values( - [self.mu, self.tau, self.sigma, self.alpha], point=point, size=size) - return generate_samples(stats.skewnorm.rvs, - a=alpha, loc=mu, scale=tau**-0.5, - dist_shape=self.shape, - size=size) + [self.mu, self.tau, self.sigma, self.alpha], point=point, size=size + ) + return generate_samples( + stats.skewnorm.rvs, a=alpha, loc=mu, scale=tau ** -0.5, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -3382,18 +3353,18 @@ def logp(self, value): mu = self.mu alpha = self.alpha return bound( - tt.log(1 + - tt.erf(((value - mu) * tt.sqrt(tau) * alpha) / tt.sqrt(2))) - + (-tau * (value - mu)**2 - + tt.log(tau / np.pi / 2.)) / 2., - tau > 0, sigma > 0) + tt.log(1 + tt.erf(((value - mu) * tt.sqrt(tau) * alpha) / tt.sqrt(2))) + + (-tau * (value - mu) ** 2 + tt.log(tau / np.pi / 2.0)) / 2.0, + tau > 0, + sigma > 0, + ) def _distr_parameters_for_repr(self): return ["mu", "sigma", "alpha"] class Triangular(BoundedContinuous): - R""" + r""" Continuous Triangular log-likelihood The pdf of this distribution is @@ -3446,8 +3417,7 @@ class Triangular(BoundedContinuous): Upper limit. """ - def __init__(self, lower=0, upper=1, c=0.5, - *args, **kwargs): + def __init__(self, lower=0, upper=1, c=0.5, *args, **kwargs): self.median = self.mean = self.c = c = tt.as_tensor_variable(floatX(c)) self.lower = lower = tt.as_tensor_variable(floatX(lower)) self.upper = upper = tt.as_tensor_variable(floatX(upper)) @@ -3471,10 +3441,10 @@ def random(self, point=None, size=None): ------- array """ - c, lower, upper = draw_values([self.c, self.lower, self.upper], - point=point, size=size) - return generate_samples(self._random, c=c, lower=lower, upper=upper, - size=size, dist_shape=self.shape) + c, lower, upper = draw_values([self.c, self.lower, self.upper], point=point, size=size) + return generate_samples( + self._random, c=c, lower=lower, upper=upper, size=size, dist_shape=self.shape + ) def _random(self, c, lower, upper, size): """ Wrapper around stats.triang.rvs that converts Triangular's @@ -3483,12 +3453,7 @@ def _random(self, c, lower, upper, size): the scipy.rvs representation. """ scale = upper - lower - return stats.triang.rvs( - c=(c - lower) / scale, - loc=lower, - scale=scale, - size=size, - ) + return stats.triang.rvs(c=(c - lower) / scale, loc=lower, scale=scale, size=size) def logp(self, value): """ @@ -3507,13 +3472,19 @@ def logp(self, value): c = self.c lower = self.lower upper = self.upper - return tt.switch(alltrue_elemwise([lower <= value, value < c]), - tt.log(2 * (value - lower) / ((upper - lower) * (c - lower))), - tt.switch(tt.eq(value, c), - tt.log(2 / (upper - lower)), - tt.switch(alltrue_elemwise([c < value, value <= upper]), - tt.log(2 * (upper - value) / ((upper - lower) * (upper - c))), - np.inf))) + return tt.switch( + alltrue_elemwise([lower <= value, value < c]), + tt.log(2 * (value - lower) / ((upper - lower) * (c - lower))), + tt.switch( + tt.eq(value, c), + tt.log(2 / (upper - lower)), + tt.switch( + alltrue_elemwise([c < value, value <= upper]), + tt.log(2 * (upper - value) / ((upper - lower) * (upper - c))), + np.inf, + ), + ), + ) def logcdf(self, value): """ @@ -3539,17 +3510,13 @@ def logcdf(self, value): tt.switch( tt.le(value, c), tt.log(((value - l) ** 2) / ((u - l) * (c - l))), - tt.switch( - tt.lt(value, u), - tt.log1p(-((u - value) ** 2) / ((u - l) * (u - c))), - 0 - ) - ) + tt.switch(tt.lt(value, u), tt.log1p(-((u - value) ** 2) / ((u - l) * (u - c))), 0), + ), ) class Gumbel(Continuous): - R""" + r""" Univariate Gumbel log-likelihood The pdf of this distribution is @@ -3600,7 +3567,7 @@ def __init__(self, mu=0, beta=1.0, **kwargs): self.mu = tt.as_tensor_variable(floatX(mu)) self.beta = tt.as_tensor_variable(floatX(beta)) - assert_negative_support(beta, 'beta', 'Gumbel') + assert_negative_support(beta, "beta", "Gumbel") self.mean = self.mu + self.beta * np.euler_gamma self.median = self.mu - self.beta * tt.log(tt.log(2)) @@ -3627,9 +3594,9 @@ def random(self, point=None, size=None): array """ mu, sigma = draw_values([self.mu, self.beta], point=point, size=size) - return generate_samples(stats.gumbel_r.rvs, loc=mu, scale=sigma, - dist_shape=self.shape, - size=size) + return generate_samples( + stats.gumbel_r.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -3666,11 +3633,11 @@ def logcdf(self, value): beta = self.beta mu = self.mu - return -tt.exp(-(value - mu)/beta) + return -tt.exp(-(value - mu) / beta) class Rice(PositiveContinuous): - R""" + r""" Rice distribution. .. math:: @@ -3732,31 +3699,47 @@ def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) nu, b, sigma = self.get_nu_b(nu, b, sigma) self.nu = nu = tt.as_tensor_variable(floatX(nu)) self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma)) self.b = b = tt.as_tensor_variable(floatX(b)) - self.mean = sigma * np.sqrt(np.pi / 2) * tt.exp((-nu**2 / (2 * sigma**2)) / 2) * ((1 - (-nu**2 / (2 * sigma**2))) - * tt.i0(-(-nu**2 / (2 * sigma**2)) / 2) - (-nu**2 / (2 * sigma**2)) * tt.i1(-(-nu**2 / (2 * sigma**2)) / 2)) - self.variance = 2 * sigma**2 + nu**2 - (np.pi * sigma**2 / 2) * (tt.exp((-nu**2 / (2 * sigma**2)) / 2) * ((1 - (-nu**2 / ( - 2 * sigma**2))) * tt.i0(-(-nu**2 / (2 * sigma**2)) / 2) - (-nu**2 / (2 * sigma**2)) * tt.i1(-(-nu**2 / (2 * sigma**2)) / 2)))**2 + + nu_sigma_ratio = -nu ** 2 / (2 * sigma ** 2) + self.mean = ( + sigma + * np.sqrt(np.pi / 2) + * tt.exp(nu_sigma_ratio / 2) + * ( + (1 - nu_sigma_ratio) * tt.i0(-nu_sigma_ratio / 2) + - nu_sigma_ratio * tt.i1(-nu_sigma_ratio / 2) + ) + ) + self.variance = ( + 2 * sigma ** 2 + + nu ** 2 + - (np.pi * sigma ** 2 / 2) + * ( + tt.exp(nu_sigma_ratio / 2) + * ( + (1 - nu_sigma_ratio) * tt.i0(-nu_sigma_ratio / 2) + - nu_sigma_ratio * tt.i1(-nu_sigma_ratio / 2) + ) + ) + ** 2 + ) def get_nu_b(self, nu, b, sigma): if sigma is None: - sigma = 1. + sigma = 1.0 if nu is None and b is not None: nu = b * sigma return nu, b, sigma elif nu is not None and b is None: b = nu / sigma return nu, b, sigma - raise ValueError('Rice distribution must specify either nu' - ' or b.') + raise ValueError("Rice distribution must specify either nu" " or b.") def random(self, point=None, size=None): """ @@ -3775,10 +3758,8 @@ def random(self, point=None, size=None): ------- array """ - nu, sigma = draw_values([self.nu, self.sigma], - point=point, size=size) - return generate_samples(self._random, nu=nu, sigma=sigma, - dist_shape=self.shape, size=size) + nu, sigma = draw_values([self.nu, self.sigma], point=point, size=size) + return generate_samples(self._random, nu=nu, sigma=sigma, dist_shape=self.shape, size=size) def _random(self, nu, sigma, size): """ Wrapper around stats.rice.rvs that converts Rice's @@ -3786,11 +3767,7 @@ def _random(self, nu, sigma, size): been broadcasted properly by generate_samples at this point and size is the scipy.rvs representation. """ - return stats.rice.rvs( - b=nu / sigma, - scale=sigma, - size=size, - ) + return stats.rice.rvs(b=nu / sigma, scale=sigma, size=size) def logp(self, value): """ @@ -3810,18 +3787,19 @@ def logp(self, value): sigma = self.sigma b = self.b x = value / sigma - return bound(tt.log(x * tt.exp((-(x - b) * (x - b)) / 2) * i0e(x * b) / sigma), - sigma >= 0, - nu >= 0, - value > 0, - ) + return bound( + tt.log(x * tt.exp((-(x - b) * (x - b)) / 2) * i0e(x * b) / sigma), + sigma >= 0, + nu >= 0, + value > 0, + ) def _distr_parameters_for_repr(self): return ["nu", "sigma"] class Logistic(Continuous): - R""" + r""" Logistic log-likelihood. The pdf of this distribution is @@ -3863,14 +3841,14 @@ class Logistic(Continuous): Scale (s > 0). """ - def __init__(self, mu=0., s=1., *args, **kwargs): + def __init__(self, mu=0.0, s=1.0, *args, **kwargs): super().__init__(*args, **kwargs) self.mu = tt.as_tensor_variable(floatX(mu)) self.s = tt.as_tensor_variable(floatX(s)) self.mean = self.mode = mu - self.variance = s**2 * np.pi**2 / 3. + self.variance = s ** 2 * np.pi ** 2 / 3.0 def logp(self, value): """ @@ -3889,8 +3867,7 @@ def logp(self, value): mu = self.mu s = self.s - return bound( - -(value - mu) / s - tt.log(s) - 2 * tt.log1p(tt.exp(-(value - mu) / s)), s > 0) + return bound(-(value - mu) / s - tt.log(s) - 2 * tt.log1p(tt.exp(-(value - mu) / s)), s > 0) def random(self, point=None, size=None): """ @@ -3912,10 +3889,8 @@ def random(self, point=None, size=None): mu, s = draw_values([self.mu, self.s], point=point, size=size) return generate_samples( - stats.logistic.rvs, - loc=mu, scale=s, - dist_shape=self.shape, - size=size) + stats.logistic.rvs, loc=mu, scale=s, dist_shape=self.shape, size=size + ) def logcdf(self, value): r""" @@ -3940,21 +3915,18 @@ def logcdf(self, value): """ mu = self.mu s = self.s - a = -(value - mu)/s - return - tt.switch( + a = -(value - mu) / s + return -tt.switch( tt.le(a, -37), tt.exp(a), tt.switch( - tt.le(a, 18), - tt.log1p(tt.exp(a)), - tt.switch( - tt.le(a, 33.3), - tt.exp(-a) + a, - a))) + tt.le(a, 18), tt.log1p(tt.exp(a)), tt.switch(tt.le(a, 33.3), tt.exp(-a) + a, a) + ), + ) class LogitNormal(UnitContinuous): - R""" + r""" Logit-Normal log-likelihood. The pdf of this distribution is @@ -4000,18 +3972,15 @@ class LogitNormal(UnitContinuous): def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs): if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) self.mu = mu = tt.as_tensor_variable(floatX(mu)) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) self.sigma = self.sd = tt.as_tensor_variable(sigma) self.tau = tau = tt.as_tensor_variable(tau) self.median = invlogit(mu) - assert_negative_support(sigma, 'sigma', 'LogitNormal') - assert_negative_support(tau, 'tau', 'LogitNormal') + assert_negative_support(sigma, "sigma", "LogitNormal") + assert_negative_support(tau, "tau", "LogitNormal") super().__init__(**kwargs) @@ -4032,10 +4001,10 @@ def random(self, point=None, size=None): ------- array """ - mu, _, sigma = draw_values( - [self.mu, self.tau, self.sigma], point=point, size=size) - return expit(generate_samples(stats.norm.rvs, loc=mu, scale=sigma, dist_shape=self.shape, - size=size)) + mu, _, sigma = draw_values([self.mu, self.tau, self.sigma], point=point, size=size) + return expit( + generate_samples(stats.norm.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size) + ) def logp(self, value): """ @@ -4054,16 +4023,21 @@ def logp(self, value): sigma = self.sigma mu = self.mu tau = self.tau - return bound(-0.5 * tau * (logit(value) - mu) ** 2 - + 0.5 * tt.log(tau / (2. * np.pi)) - - tt.log(value * (1 - value)), value > 0, value < 1, tau > 0) + return bound( + -0.5 * tau * (logit(value) - mu) ** 2 + + 0.5 * tt.log(tau / (2.0 * np.pi)) + - tt.log(value * (1 - value)), + value > 0, + value < 1, + tau > 0, + ) def _distr_parameters_for_repr(self): return ["mu", "sigma"] class Interpolated(BoundedContinuous): - R""" + r""" Univariate probability distribution defined as a linear interpolation of probability density function evaluated on some lattice of points. @@ -4096,8 +4070,7 @@ def __init__(self, x_points, pdf_points, *args, **kwargs): super().__init__(lower=lower, upper=upper, *args, **kwargs) - interp = InterpolatedUnivariateSpline( - x_points, pdf_points, k=1, ext='zeros') + interp = InterpolatedUnivariateSpline(x_points, pdf_points, k=1, ext="zeros") Z = interp.integral(x_points[0], x_points[-1]) self.Z = tt.as_tensor_variable(Z) @@ -4119,11 +4092,9 @@ def _argcdf(self, p): return x[index] + np.where( np.abs(slope) <= 1e-8, np.where( - np.abs(pdf[index]) <= 1e-8, - np.zeros(index.shape), - (p - cdf[index]) / pdf[index] + np.abs(pdf[index]) <= 1e-8, np.zeros(index.shape), (p - cdf[index]) / pdf[index] ), - (-pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index]))) / slope + (-pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index]))) / slope, ) def _random(self, size=None): @@ -4143,9 +4114,7 @@ def random(self, size=None): ------- array """ - return generate_samples(self._random, - dist_shape=self.shape, - size=size) + return generate_samples(self._random, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -4168,7 +4137,7 @@ def _distr_parameters_for_repr(self): class Moyal(Continuous): - R""" + r""" Moyal log-likelihood. The pdf of this distribution is @@ -4214,16 +4183,16 @@ class Moyal(Continuous): Scale parameter (sigma > 0). """ - def __init__(self, mu=0, sigma=1., *args, **kwargs): + def __init__(self, mu=0, sigma=1.0, *args, **kwargs): self.mu = tt.as_tensor_variable(floatX(mu)) self.sigma = tt.as_tensor_variable(floatX(sigma)) - assert_negative_support(sigma, 'sigma', 'Moyal') + assert_negative_support(sigma, "sigma", "Moyal") self.mean = self.mu + self.sigma * (np.euler_gamma + tt.log(2)) - self.median = self.mu - self.sigma * tt.log(2 * tt.erfcinv(1 / 2)**2) + self.median = self.mu - self.sigma * tt.log(2 * tt.erfcinv(1 / 2) ** 2) self.mode = self.mu - self.variance = (np.pi**2 / 2.0) * self.sigma**2 + self.variance = (np.pi ** 2 / 2.0) * self.sigma ** 2 super().__init__(*args, **kwargs) @@ -4245,9 +4214,9 @@ def random(self, point=None, size=None): array """ mu, sigma = draw_values([self.mu, self.sigma], point=point, size=size) - return generate_samples(stats.moyal.rvs, loc=mu, scale=sigma, - dist_shape=self.shape, - size=size) + return generate_samples( + stats.moyal.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -4264,9 +4233,14 @@ def logp(self, value): TensorVariable """ scaled = (value - self.mu) / self.sigma - return bound((-(1 / 2) * (scaled + tt.exp(-scaled)) - - tt.log(self.sigma) - - (1 / 2) * tt.log(2 * np.pi)), self.sigma > 0) + return bound( + ( + -(1 / 2) * (scaled + tt.exp(-scaled)) + - tt.log(self.sigma) + - (1 / 2) * tt.log(2 * np.pi) + ), + self.sigma > 0, + ) def logcdf(self, value): """ @@ -4284,4 +4258,4 @@ def logcdf(self, value): TensorVariable """ scaled = (value - self.mu) / self.sigma - return tt.log(tt.erfc(tt.exp(-scaled / 2) * (2**-0.5))) + return tt.log(tt.erfc(tt.exp(-scaled / 2) * (2 ** -0.5))) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..aa4949aa1c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.black] +line-length = 100