From dcaaafe3bc69ac65d7ee73e53ec69ba31c9a9e4d Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 14 Aug 2020 17:53:46 +0200 Subject: [PATCH 1/6] Reorder import statements according to pep8 --- pymc3/distributions/continuous.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pymc3/distributions/continuous.py b/pymc3/distributions/continuous.py index b9f0a74b8c..0ba5bb5a25 100644 --- a/pymc3/distributions/continuous.py +++ b/pymc3/distributions/continuous.py @@ -17,25 +17,27 @@ A collection of common probability distributions for stochastic nodes in PyMC. """ +import warnings + import numpy as np import theano import theano.tensor as tt -from scipy import stats -from scipy.special import expit -from scipy.interpolate import InterpolatedUnivariateSpline -import warnings -from pymc3.theanof import floatX from . import transforms -from pymc3.util import get_variable_name -from .special import log_i0 -from ..math import invlogit, logit, logdiffexp from .dist_math import ( alltrue_elemwise, betaln, bound, gammaln, i0e, incomplete_beta, logpow, normal_lccdf, normal_lcdf, SplineWrapper, std_cdf, zvalue, clipped_beta_rvs, ) from .distribution import (Continuous, draw_values, generate_samples) +from .special import log_i0 +from ..math import invlogit, logit, logdiffexp + +from pymc3.theanof import floatX +from pymc3.util import get_variable_name +from scipy import stats +from scipy.special import expit +from scipy.interpolate import InterpolatedUnivariateSpline __all__ = ['Uniform', 'Flat', 'HalfFlat', 'Normal', 'TruncatedNormal', 'Beta', 'Kumaraswamy', 'Exponential', 'Laplace', 'StudentT', 'Cauchy', From c484eeb841d65a9090c35bc70371f15dc6b48a00 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 14 Aug 2020 17:54:55 +0200 Subject: [PATCH 2/6] Blackified continuous.py file --- pymc3/distributions/continuous.py | 1494 ++++++++++++++++------------- 1 file changed, 809 insertions(+), 685 deletions(-) diff --git a/pymc3/distributions/continuous.py b/pymc3/distributions/continuous.py index 0ba5bb5a25..d76935aa4b 100644 --- a/pymc3/distributions/continuous.py +++ b/pymc3/distributions/continuous.py @@ -25,11 +25,21 @@ from . import transforms from .dist_math import ( - alltrue_elemwise, betaln, bound, gammaln, i0e, incomplete_beta, logpow, - normal_lccdf, normal_lcdf, SplineWrapper, std_cdf, zvalue, + alltrue_elemwise, + betaln, + bound, + gammaln, + i0e, + incomplete_beta, + logpow, + normal_lccdf, + normal_lcdf, + SplineWrapper, + std_cdf, + zvalue, clipped_beta_rvs, ) -from .distribution import (Continuous, draw_values, generate_samples) +from .distribution import Continuous, draw_values, generate_samples from .special import log_i0 from ..math import invlogit, logit, logdiffexp @@ -39,12 +49,39 @@ from scipy.special import expit from scipy.interpolate import InterpolatedUnivariateSpline -__all__ = ['Uniform', 'Flat', 'HalfFlat', 'Normal', 'TruncatedNormal', 'Beta', - 'Kumaraswamy', 'Exponential', 'Laplace', 'StudentT', 'Cauchy', - 'HalfCauchy', 'Gamma', 'Weibull', 'HalfStudentT', 'Lognormal', - 'ChiSquared', 'HalfNormal', 'Wald', 'Pareto', 'InverseGamma', - 'ExGaussian', 'VonMises', 'SkewNormal', 'Triangular', 'Gumbel', - 'Logistic', 'LogitNormal', 'Interpolated', 'Rice', 'Moyal'] +__all__ = [ + "Uniform", + "Flat", + "HalfFlat", + "Normal", + "TruncatedNormal", + "Beta", + "Kumaraswamy", + "Exponential", + "Laplace", + "StudentT", + "Cauchy", + "HalfCauchy", + "Gamma", + "Weibull", + "HalfStudentT", + "Lognormal", + "ChiSquared", + "HalfNormal", + "Wald", + "Pareto", + "InverseGamma", + "ExGaussian", + "VonMises", + "SkewNormal", + "Triangular", + "Gumbel", + "Logistic", + "LogitNormal", + "Interpolated", + "Rice", + "Moyal", +] class PositiveContinuous(Continuous): @@ -64,13 +101,12 @@ def __init__(self, transform=transforms.logodds, *args, **kwargs): class BoundedContinuous(Continuous): """Base class for bounded continuous distributions""" - def __init__(self, transform='auto', lower=None, upper=None, - *args, **kwargs): + def __init__(self, transform="auto", lower=None, upper=None, *args, **kwargs): lower = tt.as_tensor_variable(lower) if lower is not None else None upper = tt.as_tensor_variable(upper) if upper is not None else None - if transform == 'auto': + if transform == "auto": if lower is None and upper is None: transform = None elif lower is not None and upper is None: @@ -89,8 +125,9 @@ def assert_negative_support(var, label, distname, value=-1e-6): return try: # Transformed distribution - support = np.isfinite(var.transformed.distribution.dist - .logp(value).tag.test_value) + support = np.isfinite( + var.transformed.distribution.dist.logp(value).tag.test_value + ) except AttributeError: try: # Untransformed distribution @@ -101,7 +138,8 @@ def assert_negative_support(var, label, distname, value=-1e-6): if np.any(support): msg = "The variable specified for {0} has negative support for {1}, ".format( - label, distname) + label, distname + ) msg += "likely making it unsuitable for this parameter." warnings.warn(msg) @@ -129,27 +167,27 @@ def get_tau_sigma(tau=None, sigma=None): """ if tau is None: if sigma is None: - sigma = 1. - tau = 1. + sigma = 1.0 + tau = 1.0 else: - tau = sigma**-2. + tau = sigma ** -2.0 else: if sigma is not None: raise ValueError("Can't pass both tau and sigma") else: - sigma = tau**-.5 + sigma = tau ** -0.5 # cast tau and sigma to float in a way that works for both np.arrays # and pure python - tau = 1. * tau - sigma = 1. * sigma + tau = 1.0 * tau + sigma = 1.0 * sigma return floatX(tau), floatX(sigma) class Uniform(BoundedContinuous): - R""" + r""" Continuous uniform log-likelihood. The pdf of this distribution is @@ -193,7 +231,7 @@ class Uniform(BoundedContinuous): def __init__(self, lower=0, upper=1, *args, **kwargs): self.lower = lower = tt.as_tensor_variable(floatX(lower)) self.upper = upper = tt.as_tensor_variable(floatX(upper)) - self.mean = (upper + lower) / 2. + self.mean = (upper + lower) / 2.0 self.median = self.mean super().__init__(lower=lower, upper=upper, *args, **kwargs) @@ -216,12 +254,14 @@ def random(self, point=None, size=None): array """ - lower, upper = draw_values([self.lower, self.upper], - point=point, size=size) - return generate_samples(stats.uniform.rvs, loc=lower, - scale=upper - lower, - dist_shape=self.shape, - size=size) + lower, upper = draw_values([self.lower, self.upper], point=point, size=size) + return generate_samples( + stats.uniform.rvs, + loc=lower, + scale=upper - lower, + dist_shape=self.shape, + size=size, + ) def logp(self, value): """ @@ -238,17 +278,17 @@ def logp(self, value): """ lower = self.lower upper = self.upper - return bound(-tt.log(upper - lower), - value >= lower, value <= upper) + return bound(-tt.log(upper - lower), value >= lower, value <= upper) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self lower = dist.lower upper = dist.upper - name = r'\text{%s}' % name - return r'${} \sim \text{{Uniform}}(\mathit{{lower}}={},~\mathit{{upper}}={})$'.format( - name, get_variable_name(lower), get_variable_name(upper)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Uniform}}(\mathit{{lower}}={},~\mathit{{upper}}={})$".format( + name, get_variable_name(lower), get_variable_name(upper) + ) def logcdf(self, value): """ @@ -271,9 +311,8 @@ def logcdf(self, value): tt.switch( tt.eq(value, self.upper), 0, - tt.log((value - self.lower)) - - tt.log((self.upper - self.lower)) - ) + tt.log((value - self.lower)) - tt.log((self.upper - self.lower)), + ), ) @@ -285,7 +324,7 @@ class Flat(Continuous): def __init__(self, *args, **kwargs): self._default = 0 - super().__init__(defaults=('_default',), *args, **kwargs) + super().__init__(defaults=("_default",), *args, **kwargs) def random(self, point=None, size=None): """Raises ValueError as it is not possible to sample from Flat distribution @@ -299,7 +338,7 @@ def random(self, point=None, size=None): ------- ValueError """ - raise ValueError('Cannot sample from Flat distribution') + raise ValueError("Cannot sample from Flat distribution") def logp(self, value): """ @@ -318,8 +357,8 @@ def logp(self, value): return tt.zeros_like(value) def _repr_latex_(self, name=None, dist=None): - name = r'\text{%s}' % name - return r'${} \sim \text{{Flat}}()$'.format(name) + name = r"\text{%s}" % name + return r"${} \sim \text{{Flat}}()$".format(name) def logcdf(self, value): """ @@ -339,11 +378,7 @@ def logcdf(self, value): return tt.switch( tt.eq(value, -np.inf), -np.inf, - tt.switch( - tt.eq(value, np.inf), - 0, - tt.log(0.5) - ) + tt.switch(tt.eq(value, np.inf), 0, tt.log(0.5)), ) @@ -352,7 +387,7 @@ class HalfFlat(PositiveContinuous): def __init__(self, *args, **kwargs): self._default = 1 - super().__init__(defaults=('_default',), *args, **kwargs) + super().__init__(defaults=("_default",), *args, **kwargs) def random(self, point=None, size=None): """Raises ValueError as it is not possible to sample from HalfFlat distribution @@ -366,7 +401,7 @@ def random(self, point=None, size=None): ------- ValueError """ - raise ValueError('Cannot sample from HalfFlat distribution') + raise ValueError("Cannot sample from HalfFlat distribution") def logp(self, value): """ @@ -385,8 +420,8 @@ def logp(self, value): return bound(tt.zeros_like(value), value > 0) def _repr_latex_(self, name=None, dist=None): - name = r'\text{%s}' % name - return r'${} \sim \text{{HalfFlat}}()$'.format(name) + name = r"\text{%s}" % name + return r"${} \sim \text{{HalfFlat}}()$".format(name) def logcdf(self, value): """ @@ -404,18 +439,12 @@ def logcdf(self, value): TensorVariable """ return tt.switch( - tt.lt(value, np.inf), - -np.inf, - tt.switch( - tt.eq(value, np.inf), - 0, - -np.inf - ) + tt.lt(value, np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, -np.inf) ) class Normal(Continuous): - R""" + r""" Univariate normal log-likelihood. The pdf of this distribution is @@ -480,19 +509,18 @@ class Normal(Continuous): def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs): if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) self.sigma = self.sd = tt.as_tensor_variable(sigma) self.tau = tt.as_tensor_variable(tau) - self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu)) - self.variance = 1. / self.tau + self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable( + floatX(mu) + ) + self.variance = 1.0 / self.tau - assert_negative_support(sigma, 'sigma', 'Normal') - assert_negative_support(tau, 'tau', 'Normal') + assert_negative_support(sigma, "sigma", "Normal") + assert_negative_support(tau, "tau", "Normal") super().__init__(**kwargs) @@ -513,11 +541,12 @@ def random(self, point=None, size=None): ------- array """ - mu, tau, _ = draw_values([self.mu, self.tau, self.sigma], - point=point, size=size) - return generate_samples(stats.norm.rvs, loc=mu, scale=tau**-0.5, - dist_shape=self.shape, - size=size) + mu, tau, _ = draw_values( + [self.mu, self.tau, self.sigma], point=point, size=size + ) + return generate_samples( + stats.norm.rvs, loc=mu, scale=tau ** -0.5, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -537,18 +566,19 @@ def logp(self, value): tau = self.tau mu = self.mu - return bound((-tau * (value - mu)**2 + tt.log(tau / np.pi / 2.)) / 2., - sigma > 0) + return bound( + (-tau * (value - mu) ** 2 + tt.log(tau / np.pi / 2.0)) / 2.0, sigma > 0 + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self sigma = dist.sigma mu = dist.mu - name = r'\text{%s}' % name - return r'${} \sim \text{{Normal}}(\mathit{{mu}}={},~\mathit{{sigma}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(sigma)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Normal}}(\mathit{{mu}}={},~\mathit{{sigma}}={})$".format( + name, get_variable_name(mu), get_variable_name(sigma) + ) def logcdf(self, value): """ @@ -569,7 +599,7 @@ def logcdf(self, value): class TruncatedNormal(BoundedContinuous): - R""" + r""" Univariate truncated normal log-likelihood. The pdf of this distribution is @@ -640,37 +670,62 @@ class TruncatedNormal(BoundedContinuous): """ - def __init__(self, mu=0, sigma=None, tau=None, lower=None, upper=None, - transform='auto', sd=None, *args, **kwargs): + def __init__( + self, + mu=0, + sigma=None, + tau=None, + lower=None, + upper=None, + transform="auto", + sd=None, + *args, + **kwargs + ): if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) self.sigma = self.sd = tt.as_tensor_variable(sigma) self.tau = tt.as_tensor_variable(tau) - self.lower_check = tt.as_tensor_variable(floatX(lower)) if lower is not None else lower - self.upper_check = tt.as_tensor_variable(floatX(upper)) if upper is not None else upper - self.lower = tt.as_tensor_variable(floatX(lower)) if lower is not None else tt.as_tensor_variable(-np.inf) - self.upper = tt.as_tensor_variable(floatX(upper)) if upper is not None else tt.as_tensor_variable(np.inf) + self.lower_check = ( + tt.as_tensor_variable(floatX(lower)) if lower is not None else lower + ) + self.upper_check = ( + tt.as_tensor_variable(floatX(upper)) if upper is not None else upper + ) + self.lower = ( + tt.as_tensor_variable(floatX(lower)) + if lower is not None + else tt.as_tensor_variable(-np.inf) + ) + self.upper = ( + tt.as_tensor_variable(floatX(upper)) + if upper is not None + else tt.as_tensor_variable(np.inf) + ) self.mu = tt.as_tensor_variable(floatX(mu)) if self.lower_check is None and self.upper_check is None: self._defaultval = mu elif self.lower_check is None and self.upper_check is not None: - self._defaultval = self.upper - 1. + self._defaultval = self.upper - 1.0 elif self.lower_check is not None and self.upper_check is None: - self._defaultval = self.lower + 1. + self._defaultval = self.lower + 1.0 else: self._defaultval = (self.lower + self.upper) / 2 - assert_negative_support(sigma, 'sigma', 'TruncatedNormal') - assert_negative_support(tau, 'tau', 'TruncatedNormal') + assert_negative_support(sigma, "sigma", "TruncatedNormal") + assert_negative_support(tau, "tau", "TruncatedNormal") - super().__init__(defaults=('_defaultval',), transform=transform, - lower=lower, upper=upper, *args, **kwargs) + super().__init__( + defaults=("_defaultval",), + transform=transform, + lower=lower, + upper=upper, + *args, + **kwargs + ) def random(self, point=None, size=None): """ @@ -690,9 +745,7 @@ def random(self, point=None, size=None): array """ mu, sigma, lower, upper = draw_values( - [self.mu, self.sigma, self.lower, self.upper], - point=point, - size=size + [self.mu, self.sigma, self.lower, self.upper], point=point, size=size ) return generate_samples( self._random, @@ -749,7 +802,7 @@ def _normalization(self): mu, sigma = self.mu, self.sigma if self.lower_check is None and self.upper_check is None: - return 0. + return 0.0 if self.lower_check is not None and self.upper_check is not None: lcdf_a = normal_lcdf(mu, sigma, self.lower) @@ -758,9 +811,7 @@ def _normalization(self): lsf_b = normal_lccdf(mu, sigma, self.upper) return tt.switch( - self.lower > 0, - logdiffexp(lsf_a, lsf_b), - logdiffexp(lcdf_b, lcdf_a), + self.lower > 0, logdiffexp(lsf_a, lsf_b), logdiffexp(lcdf_b, lcdf_a) ) if self.lower_check is not None: @@ -771,11 +822,10 @@ def _normalization(self): def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self - name = r'\text{%s}' % name + name = r"\text{%s}" % name return ( - r'${} \sim \text{{TruncatedNormal}}(' - r'\mathit{{mu}}={},~\mathit{{sigma}}={},a={},b={})$' - .format( + r"${} \sim \text{{TruncatedNormal}}(" + r"\mathit{{mu}}={},~\mathit{{sigma}}={},a={},b={})$".format( name, get_variable_name(self.mu), get_variable_name(self.sigma), @@ -786,7 +836,7 @@ def _repr_latex_(self, name=None, dist=None): class HalfNormal(PositiveContinuous): - R""" + r""" Half-normal log-likelihood. The pdf of this distribution is @@ -851,10 +901,7 @@ class HalfNormal(PositiveContinuous): def __init__(self, sigma=None, tau=None, sd=None, *args, **kwargs): if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) super().__init__(*args, **kwargs) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) @@ -862,10 +909,10 @@ def __init__(self, sigma=None, tau=None, sd=None, *args, **kwargs): self.tau = tau = tt.as_tensor_variable(tau) self.mean = tt.sqrt(2 / (np.pi * self.tau)) - self.variance = (1. - 2 / np.pi) / self.tau + self.variance = (1.0 - 2 / np.pi) / self.tau - assert_negative_support(tau, 'tau', 'HalfNormal') - assert_negative_support(sigma, 'sigma', 'HalfNormal') + assert_negative_support(tau, "tau", "HalfNormal") + assert_negative_support(sigma, "sigma", "HalfNormal") def random(self, point=None, size=None): """ @@ -885,9 +932,9 @@ def random(self, point=None, size=None): array """ sigma = draw_values([self.sigma], point=point, size=size)[0] - return generate_samples(stats.halfnorm.rvs, loc=0., scale=sigma, - dist_shape=self.shape, - size=size) + return generate_samples( + stats.halfnorm.rvs, loc=0.0, scale=sigma, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -905,17 +952,21 @@ def logp(self, value): """ tau = self.tau sigma = self.sigma - return bound(-0.5 * tau * value**2 + 0.5 * tt.log(tau * 2. / np.pi), - value >= 0, - tau > 0, sigma > 0) + return bound( + -0.5 * tau * value ** 2 + 0.5 * tt.log(tau * 2.0 / np.pi), + value >= 0, + tau > 0, + sigma > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self sigma = dist.sigma - name = r'\text{%s}' % name - return r'${} \sim \text{{HalfNormal}}(\mathit{{sigma}}={})$'.format(name, - get_variable_name(sigma)) + name = r"\text{%s}" % name + return r"${} \sim \text{{HalfNormal}}(\mathit{{sigma}}={})$".format( + name, get_variable_name(sigma) + ) def logcdf(self, value): """ @@ -936,13 +987,13 @@ def logcdf(self, value): z = zvalue(value, mu=0, sigma=sigma) return tt.switch( tt.lt(z, -1.0), - tt.log(tt.erfcx(-z / tt.sqrt(2.))) - tt.sqr(z), - tt.log1p(-tt.erfc(z / tt.sqrt(2.))) + tt.log(tt.erfcx(-z / tt.sqrt(2.0))) - tt.sqr(z), + tt.log1p(-tt.erfc(z / tt.sqrt(2.0))), ) class Wald(PositiveContinuous): - R""" + r""" Wald log-likelihood. The pdf of this distribution is @@ -1019,7 +1070,7 @@ class Wald(PositiveContinuous): statmod: Probability Calculations for the Inverse Gaussian Distribution """ - def __init__(self, mu=None, lam=None, phi=None, alpha=0., *args, **kwargs): + def __init__(self, mu=None, lam=None, phi=None, alpha=0.0, *args, **kwargs): super().__init__(*args, **kwargs) mu, lam, phi = self.get_mu_lam_phi(mu, lam, phi) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) @@ -1028,13 +1079,19 @@ def __init__(self, mu=None, lam=None, phi=None, alpha=0., *args, **kwargs): self.phi = phi = tt.as_tensor_variable(floatX(phi)) self.mean = self.mu + self.alpha - self.mode = self.mu * (tt.sqrt(1. + (1.5 * self.mu / self.lam)**2) - - 1.5 * self.mu / self.lam) + self.alpha - self.variance = (self.mu**3) / self.lam + self.mode = ( + self.mu + * ( + tt.sqrt(1.0 + (1.5 * self.mu / self.lam) ** 2) + - 1.5 * self.mu / self.lam + ) + + self.alpha + ) + self.variance = (self.mu ** 3) / self.lam - assert_negative_support(phi, 'phi', 'Wald') - assert_negative_support(mu, 'mu', 'Wald') - assert_negative_support(lam, 'lam', 'Wald') + assert_negative_support(phi, "phi", "Wald") + assert_negative_support(mu, "mu", "Wald") + assert_negative_support(lam, "lam", "Wald") def get_mu_lam_phi(self, mu, lam, phi): if mu is None: @@ -1043,23 +1100,28 @@ def get_mu_lam_phi(self, mu, lam, phi): else: if lam is None: if phi is None: - return mu, 1., 1. / mu + return mu, 1.0, 1.0 / mu else: return mu, mu * phi, phi else: if phi is None: return mu, lam, lam / mu - raise ValueError('Wald distribution must specify either mu only, ' - 'mu and lam, mu and phi, or lam and phi.') + raise ValueError( + "Wald distribution must specify either mu only, " + "mu and lam, mu and phi, or lam and phi." + ) def _random(self, mu, lam, alpha, size=None): - v = np.random.normal(size=size)**2 - value = (mu + (mu**2) * v / (2. * lam) - mu / (2. * lam) - * np.sqrt(4. * mu * lam * v + (mu * v)**2)) + v = np.random.normal(size=size) ** 2 + value = ( + mu + + (mu ** 2) * v / (2.0 * lam) + - mu / (2.0 * lam) * np.sqrt(4.0 * mu * lam * v + (mu * v) ** 2) + ) z = np.random.uniform(size=size) i = np.floor(z - mu / (mu + value)) * 2 + 1 - value = (value**-i) * (mu**(i + 1)) + value = (value ** -i) * (mu ** (i + 1)) return value + alpha def random(self, point=None, size=None): @@ -1079,12 +1141,12 @@ def random(self, point=None, size=None): ------- array """ - mu, lam, alpha = draw_values([self.mu, self.lam, self.alpha], - point=point, size=size) - return generate_samples(self._random, - mu, lam, alpha, - dist_shape=self.shape, - size=size) + mu, lam, alpha = draw_values( + [self.mu, self.lam, self.alpha], point=point, size=size + ) + return generate_samples( + self._random, mu, lam, alpha, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -1104,13 +1166,17 @@ def logp(self, value): lam = self.lam alpha = self.alpha # value *must* be iid. Otherwise this is wrong. - return bound(logpow(lam / (2. * np.pi), 0.5) - - logpow(value - alpha, 1.5) - - (0.5 * lam / (value - alpha) - * ((value - alpha - mu) / mu)**2), - # XXX these two are redundant. Please, check. - value > 0, value - alpha > 0, - mu > 0, lam > 0, alpha >= 0) + return bound( + logpow(lam / (2.0 * np.pi), 0.5) + - logpow(value - alpha, 1.5) + - (0.5 * lam / (value - alpha) * ((value - alpha - mu) / mu) ** 2), + # XXX these two are redundant. Please, check. + value > 0, + value - alpha > 0, + mu > 0, + lam > 0, + alpha >= 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: @@ -1118,11 +1184,13 @@ def _repr_latex_(self, name=None, dist=None): lam = dist.lam mu = dist.mu alpha = dist.alpha - name = r'\text{%s}' % name - return r'${} \sim \text{{Wald}}(\mathit{{mu}}={},~\mathit{{lam}}={},~\mathit{{alpha}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(lam), - get_variable_name(alpha)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Wald}}(\mathit{{mu}}={},~\mathit{{lam}}={},~\mathit{{alpha}}={})$".format( + name, + get_variable_name(mu), + get_variable_name(lam), + get_variable_name(alpha), + ) def logcdf(self, value): """ @@ -1149,38 +1217,35 @@ def logcdf(self, value): l = lam * mu r = tt.sqrt(value * lam) - a = normal_lcdf(0, 1, (q - 1.)/r) - b = 2./l + normal_lcdf(0, 1, -(q + 1.)/r) + a = normal_lcdf(0, 1, (q - 1.0) / r) + b = 2.0 / l + normal_lcdf(0, 1, -(q + 1.0) / r) return tt.switch( ( # Left limit - tt.lt(value, 0) | - (tt.eq(value, 0) & tt.gt(mu, 0) & tt.lt(lam, np.inf)) | - (tt.lt(value, mu) & tt.eq(lam, 0)) + tt.lt(value, 0) + | (tt.eq(value, 0) & tt.gt(mu, 0) & tt.lt(lam, np.inf)) + | (tt.lt(value, mu) & tt.eq(lam, 0)) ), -np.inf, tt.switch( ( # Right limit - tt.eq(value, np.inf) | - (tt.eq(lam, 0) & tt.gt(value, mu)) | - (tt.gt(value, 0) & tt.eq(lam, np.inf)) | + tt.eq(value, np.inf) + | (tt.eq(lam, 0) & tt.gt(value, mu)) + | (tt.gt(value, 0) & tt.eq(lam, np.inf)) + | # Degenerate distribution - ( - tt.lt(mu, np.inf) & - tt.eq(mu, value) & - tt.eq(lam, 0) - ) | - (tt.eq(value, 0) & tt.eq(lam, np.inf)) + (tt.lt(mu, np.inf) & tt.eq(mu, value) & tt.eq(lam, 0)) + | (tt.eq(value, 0) & tt.eq(lam, np.inf)) ), 0, - a + tt.log1p(tt.exp(b - a)) - ) + a + tt.log1p(tt.exp(b - a)), + ), ) class Beta(UnitContinuous): - R""" + r""" Beta log-likelihood. The pdf of this distribution is @@ -1242,36 +1307,39 @@ class Beta(UnitContinuous): the binomial distribution. """ - def __init__(self, alpha=None, beta=None, mu=None, sigma=None, - sd=None, *args, **kwargs): + def __init__( + self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs + ): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = beta = tt.as_tensor_variable(floatX(beta)) self.mean = self.alpha / (self.alpha + self.beta) - self.variance = self.alpha * self.beta / ( - (self.alpha + self.beta)**2 * (self.alpha + self.beta + 1)) + self.variance = ( + self.alpha + * self.beta + / ((self.alpha + self.beta) ** 2 * (self.alpha + self.beta + 1)) + ) - assert_negative_support(alpha, 'alpha', 'Beta') - assert_negative_support(beta, 'beta', 'Beta') + assert_negative_support(alpha, "alpha", "Beta") + assert_negative_support(beta, "beta", "Beta") def get_alpha_beta(self, alpha=None, beta=None, mu=None, sigma=None): if (alpha is not None) and (beta is not None): pass elif (mu is not None) and (sigma is not None): - kappa = mu * (1 - mu) / sigma**2 - 1 + kappa = mu * (1 - mu) / sigma ** 2 - 1 alpha = mu * kappa beta = (1 - mu) * kappa else: - raise ValueError('Incompatible parameterization. Either use alpha ' - 'and beta, or mu and sigma to specify distribution.') + raise ValueError( + "Incompatible parameterization. Either use alpha " + "and beta, or mu and sigma to specify distribution." + ) return alpha, beta @@ -1292,11 +1360,10 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) - return generate_samples(clipped_beta_rvs, alpha, beta, - dist_shape=self.shape, - size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) + return generate_samples( + clipped_beta_rvs, alpha, beta, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -1317,13 +1384,13 @@ def logp(self, value): logval = tt.log(value) log1pval = tt.log1p(-value) - logp = (tt.switch(tt.eq(alpha, 1), 0, (alpha - 1) * logval) - + tt.switch(tt.eq(beta, 1), 0, (beta - 1) * log1pval) - - betaln(alpha, beta)) + logp = ( + tt.switch(tt.eq(alpha, 1), 0, (alpha - 1) * logval) + + tt.switch(tt.eq(beta, 1), 0, (beta - 1) * log1pval) + - betaln(alpha, beta) + ) - return bound(logp, - value >= 0, value <= 1, - alpha > 0, beta > 0) + return bound(logp, value >= 0, value <= 1, alpha > 0, beta > 0) def logcdf(self, value): """ @@ -1346,11 +1413,7 @@ def logcdf(self, value): return tt.switch( tt.le(value, 0), -np.inf, - tt.switch( - tt.ge(value, 1), - 0, - tt.log(incomplete_beta(a, b, value)) - ) + tt.switch(tt.ge(value, 1), 0, tt.log(incomplete_beta(a, b, value))), ) def _repr_latex_(self, name=None, dist=None): @@ -1358,13 +1421,14 @@ def _repr_latex_(self, name=None, dist=None): dist = self alpha = dist.alpha beta = dist.beta - name = r'\text{%s}' % name - return r'${} \sim \text{{Beta}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$'.format(name, - get_variable_name(alpha), - get_variable_name(beta)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Beta}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$".format( + name, get_variable_name(alpha), get_variable_name(beta) + ) + class Kumaraswamy(UnitContinuous): - R""" + r""" Kumaraswamy log-likelihood. The pdf of this distribution is @@ -1411,13 +1475,23 @@ def __init__(self, a, b, *args, **kwargs): self.a = a = tt.as_tensor_variable(floatX(a)) self.b = b = tt.as_tensor_variable(floatX(b)) - ln_mean = tt.log(b) + tt.gammaln(1 + 1 / a) + tt.gammaln(b) - tt.gammaln(1 + 1 / a + b) + ln_mean = ( + tt.log(b) + + tt.gammaln(1 + 1 / a) + + tt.gammaln(b) + - tt.gammaln(1 + 1 / a + b) + ) self.mean = tt.exp(ln_mean) - ln_2nd_raw_moment = tt.log(b) + tt.gammaln(1 + 2 / a) + tt.gammaln(b) - tt.gammaln(1 + 2 / a + b) + ln_2nd_raw_moment = ( + tt.log(b) + + tt.gammaln(1 + 2 / a) + + tt.gammaln(b) + - tt.gammaln(1 + 2 / a + b) + ) self.variance = tt.exp(ln_2nd_raw_moment) - self.mean ** 2 - assert_negative_support(a, 'a', 'Kumaraswamy') - assert_negative_support(b, 'b', 'Kumaraswamy') + assert_negative_support(a, "a", "Kumaraswamy") + assert_negative_support(b, "b", "Kumaraswamy") def _random(self, a, b, size=None): u = np.random.uniform(size=size) @@ -1440,11 +1514,8 @@ def random(self, point=None, size=None): ------- array """ - a, b = draw_values([self.a, self.b], - point=point, size=size) - return generate_samples(self._random, a, b, - dist_shape=self.shape, - size=size) + a, b = draw_values([self.a, self.b], point=point, size=size) + return generate_samples(self._random, a, b, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1463,25 +1534,28 @@ def logp(self, value): a = self.a b = self.b - logp = tt.log(a) + tt.log(b) + (a - 1) * tt.log(value) + (b - 1) * tt.log(1 - value ** a) + logp = ( + tt.log(a) + + tt.log(b) + + (a - 1) * tt.log(value) + + (b - 1) * tt.log(1 - value ** a) + ) - return bound(logp, - value >= 0, value <= 1, - a > 0, b > 0) + return bound(logp, value >= 0, value <= 1, a > 0, b > 0) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self a = dist.a b = dist.b - name = r'\text{%s}' % name - return r'${} \sim \text{{Kumaraswamy}}(\mathit{{a}}={},~\mathit{{b}}={})$'.format(name, - get_variable_name(a), - get_variable_name(b)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Kumaraswamy}}(\mathit{{a}}={},~\mathit{{b}}={})$".format( + name, get_variable_name(a), get_variable_name(b) + ) class Exponential(PositiveContinuous): - R""" + r""" Exponential log-likelihood. The pdf of this distribution is @@ -1520,13 +1594,13 @@ class Exponential(PositiveContinuous): def __init__(self, lam, *args, **kwargs): super().__init__(*args, **kwargs) self.lam = lam = tt.as_tensor_variable(floatX(lam)) - self.mean = 1. / self.lam + self.mean = 1.0 / self.lam self.median = self.mean * tt.log(2) self.mode = tt.zeros_like(self.lam) - self.variance = self.lam**-2 + self.variance = self.lam ** -2 - assert_negative_support(lam, 'lam', 'Exponential') + assert_negative_support(lam, "lam", "Exponential") def random(self, point=None, size=None): """ @@ -1546,9 +1620,9 @@ def random(self, point=None, size=None): array """ lam = draw_values([self.lam], point=point, size=size)[0] - return generate_samples(np.random.exponential, scale=1. / lam, - dist_shape=self.shape, - size=size) + return generate_samples( + np.random.exponential, scale=1.0 / lam, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -1571,9 +1645,10 @@ def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self lam = dist.lam - name = r'\text{%s}' % name - return r'${} \sim \text{{Exponential}}(\mathit{{lam}}={})$'.format(name, - get_variable_name(lam)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Exponential}}(\mathit{{lam}}={})$".format( + name, get_variable_name(lam) + ) def logcdf(self, value): r""" @@ -1603,15 +1678,13 @@ def logcdf(self, value): tt.le(value, 0.0), -np.inf, tt.switch( - tt.le(a, tt.log(2.0)), - tt.log(-tt.expm1(-a)), - tt.log1p(-tt.exp(-a)), - ) + tt.le(a, tt.log(2.0)), tt.log(-tt.expm1(-a)), tt.log1p(-tt.exp(-a)) + ), ) class Laplace(Continuous): - R""" + r""" Laplace log-likelihood. The pdf of this distribution is @@ -1655,11 +1728,13 @@ class Laplace(Continuous): def __init__(self, mu, b, *args, **kwargs): super().__init__(*args, **kwargs) self.b = b = tt.as_tensor_variable(floatX(b)) - self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu)) + self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable( + floatX(mu) + ) - self.variance = 2 * self.b**2 + self.variance = 2 * self.b ** 2 - assert_negative_support(b, 'b', 'Laplace') + assert_negative_support(b, "b", "Laplace") def random(self, point=None, size=None): """ @@ -1679,9 +1754,9 @@ def random(self, point=None, size=None): array """ mu, b = draw_values([self.mu, self.b], point=point, size=size) - return generate_samples(np.random.laplace, mu, b, - dist_shape=self.shape, - size=size) + return generate_samples( + np.random.laplace, mu, b, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -1707,10 +1782,10 @@ def _repr_latex_(self, name=None, dist=None): dist = self b = dist.b mu = dist.mu - name = r'\text{%s}' % name - return r'${} \sim \text{{Laplace}}(\mathit{{mu}}={},~\mathit{{b}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(b)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Laplace}}(\mathit{{mu}}={},~\mathit{{b}}={})$".format( + name, get_variable_name(mu), get_variable_name(b) + ) def logcdf(self, value): """ @@ -1734,15 +1809,13 @@ def logcdf(self, value): tt.le(value, a), tt.log(0.5) + y, tt.switch( - tt.gt(y, 1), - tt.log1p(-0.5 * tt.exp(-y)), - tt.log(1 - 0.5 * tt.exp(-y)) - ) + tt.gt(y, 1), tt.log1p(-0.5 * tt.exp(-y)), tt.log(1 - 0.5 * tt.exp(-y)) + ), ) class Lognormal(PositiveContinuous): - R""" + r""" Log-normal log-likelihood. Distribution of any random variable whose logarithm is normally @@ -1807,10 +1880,7 @@ def __init__(self, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) @@ -1818,17 +1888,19 @@ def __init__(self, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs): self.tau = tau = tt.as_tensor_variable(tau) self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma) - self.mean = tt.exp(self.mu + 1. / (2 * self.tau)) + self.mean = tt.exp(self.mu + 1.0 / (2 * self.tau)) self.median = tt.exp(self.mu) - self.mode = tt.exp(self.mu - 1. / self.tau) - self.variance = (tt.exp(1. / self.tau) - 1) * tt.exp(2 * self.mu + 1. / self.tau) + self.mode = tt.exp(self.mu - 1.0 / self.tau) + self.variance = (tt.exp(1.0 / self.tau) - 1) * tt.exp( + 2 * self.mu + 1.0 / self.tau + ) - assert_negative_support(tau, 'tau', 'Lognormal') - assert_negative_support(sigma, 'sigma', 'Lognormal') + assert_negative_support(tau, "tau", "Lognormal") + assert_negative_support(sigma, "sigma", "Lognormal") def _random(self, mu, tau, size=None): samples = np.random.normal(size=size) - return np.exp(mu + (tau**-0.5) * samples) + return np.exp(mu + (tau ** -0.5) * samples) def random(self, point=None, size=None): """ @@ -1848,9 +1920,7 @@ def random(self, point=None, size=None): array """ mu, tau = draw_values([self.mu, self.tau], point=point, size=size) - return generate_samples(self._random, mu, tau, - dist_shape=self.shape, - size=size) + return generate_samples(self._random, mu, tau, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1868,20 +1938,22 @@ def logp(self, value): """ mu = self.mu tau = self.tau - return bound(-0.5 * tau * (tt.log(value) - mu)**2 - + 0.5 * tt.log(tau / (2. * np.pi)) - - tt.log(value), - tau > 0) + return bound( + -0.5 * tau * (tt.log(value) - mu) ** 2 + + 0.5 * tt.log(tau / (2.0 * np.pi)) + - tt.log(value), + tau > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self tau = dist.tau mu = dist.mu - name = r'\text{%s}' % name - return r'${} \sim \text{{Lognormal}}(\mathit{{mu}}={},~\mathit{{tau}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(tau)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Lognormal}}(\mathit{{mu}}={},~\mathit{{tau}}={})$".format( + name, get_variable_name(mu), get_variable_name(tau) + ) def logcdf(self, value): """ @@ -1907,15 +1979,14 @@ def logcdf(self, value): -np.inf, tt.switch( tt.lt(z, -1.0), - tt.log(tt.erfcx(-z / tt.sqrt(2.)) / 2.) - - tt.sqr(z) / 2, - tt.log1p(-tt.erfc(z / tt.sqrt(2.)) / 2.) - ) + tt.log(tt.erfcx(-z / tt.sqrt(2.0)) / 2.0) - tt.sqr(z) / 2, + tt.log1p(-tt.erfc(z / tt.sqrt(2.0)) / 2.0), + ), ) class StudentT(Continuous): - R""" + r""" Student's T log-likelihood. Describes a normal variable whose precision is gamma distributed. @@ -1982,22 +2053,19 @@ def __init__(self, nu, mu=0, lam=None, sigma=None, sd=None, *args, **kwargs): super(StudentT, self).__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) self.nu = nu = tt.as_tensor_variable(floatX(nu)) lam, sigma = get_tau_sigma(tau=lam, sigma=sigma) self.lam = lam = tt.as_tensor_variable(lam) self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma) self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu) - self.variance = tt.switch((nu > 2) * 1, - (1 / self.lam) * (nu / (nu - 2)), - np.inf) + self.variance = tt.switch( + (nu > 2) * 1, (1 / self.lam) * (nu / (nu - 2)), np.inf + ) - assert_negative_support(lam, 'lam (sigma)', 'StudentT') - assert_negative_support(nu, 'nu', 'StudentT') + assert_negative_support(lam, "lam (sigma)", "StudentT") + assert_negative_support(nu, "nu", "StudentT") def random(self, point=None, size=None): """ @@ -2016,11 +2084,10 @@ def random(self, point=None, size=None): ------- array """ - nu, mu, lam = draw_values([self.nu, self.mu, self.lam], - point=point, size=size) - return generate_samples(stats.t.rvs, nu, loc=mu, scale=lam**-0.5, - dist_shape=self.shape, - size=size) + nu, mu, lam = draw_values([self.nu, self.mu, self.lam], point=point, size=size) + return generate_samples( + stats.t.rvs, nu, loc=mu, scale=lam ** -0.5, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -2041,11 +2108,15 @@ def logp(self, value): lam = self.lam sigma = self.sigma - return bound(gammaln((nu + 1.0) / 2.0) - + .5 * tt.log(lam / (nu * np.pi)) - - gammaln(nu / 2.0) - - (nu + 1.0) / 2.0 * tt.log1p(lam * (value - mu)**2 / nu), - lam > 0, nu > 0, sigma > 0) + return bound( + gammaln((nu + 1.0) / 2.0) + + 0.5 * tt.log(lam / (nu * np.pi)) + - gammaln(nu / 2.0) + - (nu + 1.0) / 2.0 * tt.log1p(lam * (value - mu) ** 2 / nu), + lam > 0, + nu > 0, + sigma > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: @@ -2053,11 +2124,10 @@ def _repr_latex_(self, name=None, dist=None): nu = dist.nu mu = dist.mu lam = dist.lam - name = r'\text{%s}' % name - return r'${} \sim \text{{StudentT}}(\mathit{{nu}}={},~\mathit{{mu}}={},~\mathit{{lam}}={})$'.format(name, - get_variable_name(nu), - get_variable_name(mu), - get_variable_name(lam)) + name = r"\text{%s}" % name + return r"${} \sim \text{{StudentT}}(\mathit{{nu}}={},~\mathit{{mu}}={},~\mathit{{lam}}={})$".format( + name, get_variable_name(nu), get_variable_name(mu), get_variable_name(lam) + ) def logcdf(self, value): """ @@ -2077,14 +2147,14 @@ def logcdf(self, value): nu = self.nu mu = self.mu sigma = self.sigma - t = (value - mu)/sigma - sqrt_t2_nu = tt.sqrt(t**2 + nu) - x = (t + sqrt_t2_nu)/(2.0 * sqrt_t2_nu) - return tt.log(incomplete_beta(nu/2., nu/2., x)) + t = (value - mu) / sigma + sqrt_t2_nu = tt.sqrt(t ** 2 + nu) + x = (t + sqrt_t2_nu) / (2.0 * sqrt_t2_nu) + return tt.log(incomplete_beta(nu / 2.0, nu / 2.0, x)) class Pareto(Continuous): - R""" + r""" Pareto log-likelihood. Often used to characterize wealth distribution, or other examples of the @@ -2128,28 +2198,28 @@ class Pareto(Continuous): Scale parameter (m > 0). """ - def __init__(self, alpha, m, transform='lowerbound', *args, **kwargs): + def __init__(self, alpha, m, transform="lowerbound", *args, **kwargs): self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.m = m = tt.as_tensor_variable(floatX(m)) - self.mean = tt.switch(tt.gt(alpha, 1), alpha * - m / (alpha - 1.), np.inf) - self.median = m * 2.**(1. / alpha) + self.mean = tt.switch(tt.gt(alpha, 1), alpha * m / (alpha - 1.0), np.inf) + self.median = m * 2.0 ** (1.0 / alpha) self.variance = tt.switch( tt.gt(alpha, 2), - (alpha * m**2) / ((alpha - 2.) * (alpha - 1.)**2), - np.inf) + (alpha * m ** 2) / ((alpha - 2.0) * (alpha - 1.0) ** 2), + np.inf, + ) - assert_negative_support(alpha, 'alpha', 'Pareto') - assert_negative_support(m, 'm', 'Pareto') + assert_negative_support(alpha, "alpha", "Pareto") + assert_negative_support(m, "m", "Pareto") - if transform == 'lowerbound': + if transform == "lowerbound": transform = transforms.lowerbound(self.m) super().__init__(transform=transform, *args, **kwargs) def _random(self, alpha, m, size=None): u = np.random.uniform(size=size) - return m * (1. - u)**(-1. / alpha) + return m * (1.0 - u) ** (-1.0 / alpha) def random(self, point=None, size=None): """ @@ -2168,11 +2238,10 @@ def random(self, point=None, size=None): ------- array """ - alpha, m = draw_values([self.alpha, self.m], - point=point, size=size) - return generate_samples(self._random, alpha, m, - dist_shape=self.shape, - size=size) + alpha, m = draw_values([self.alpha, self.m], point=point, size=size) + return generate_samples( + self._random, alpha, m, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -2190,19 +2259,22 @@ def logp(self, value): """ alpha = self.alpha m = self.m - return bound(tt.log(alpha) + logpow(m, alpha) - - logpow(value, alpha + 1), - value >= m, alpha > 0, m > 0) + return bound( + tt.log(alpha) + logpow(m, alpha) - logpow(value, alpha + 1), + value >= m, + alpha > 0, + m > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self alpha = dist.alpha m = dist.m - name = r'\text{%s}' % name - return r'${} \sim \text{{Pareto}}(\mathit{{alpha}}={},~\mathit{{m}}={})$'.format(name, - get_variable_name(alpha), - get_variable_name(m)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Pareto}}(\mathit{{alpha}}={},~\mathit{{m}}={})$".format( + name, get_variable_name(alpha), get_variable_name(m) + ) def logcdf(self, value): """ @@ -2225,16 +2297,12 @@ def logcdf(self, value): return tt.switch( tt.lt(value, m), -np.inf, - tt.switch( - tt.le(arg, 1e-5), - tt.log1p(-arg), - tt.log(1 - arg) - ) + tt.switch(tt.le(arg, 1e-5), tt.log1p(-arg), tt.log(1 - arg)), ) class Cauchy(Continuous): - R""" + r""" Cauchy log-likelihood. Also known as the Lorentz or the Breit-Wigner distribution. @@ -2283,7 +2351,7 @@ def __init__(self, alpha, beta, *args, **kwargs): self.median = self.mode = self.alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = tt.as_tensor_variable(floatX(beta)) - assert_negative_support(beta, 'beta', 'Cauchy') + assert_negative_support(beta, "beta", "Cauchy") def _random(self, alpha, beta, size=None): u = np.random.uniform(size=size) @@ -2306,11 +2374,10 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) - return generate_samples(self._random, alpha, beta, - dist_shape=self.shape, - size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) + return generate_samples( + self._random, alpha, beta, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -2328,19 +2395,20 @@ def logp(self, value): """ alpha = self.alpha beta = self.beta - return bound(- tt.log(np.pi) - tt.log(beta) - - tt.log1p(((value - alpha) / beta)**2), - beta > 0) + return bound( + -tt.log(np.pi) - tt.log(beta) - tt.log1p(((value - alpha) / beta) ** 2), + beta > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self alpha = dist.alpha beta = dist.beta - name = r'\text{%s}' % name - return r'${} \sim \text{{Cauchy}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$'.format(name, - get_variable_name(alpha), - get_variable_name(beta)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Cauchy}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$".format( + name, get_variable_name(alpha), get_variable_name(beta) + ) def logcdf(self, value): """ @@ -2357,13 +2425,11 @@ def logcdf(self, value): ------- TensorVariable """ - return tt.log( - 0.5 + tt.arctan((value - self.alpha) / self.beta) / np.pi - ) + return tt.log(0.5 + tt.arctan((value - self.alpha) / self.beta) / np.pi) class HalfCauchy(PositiveContinuous): - R""" + r""" Half-Cauchy log-likelihood. The pdf of this distribution is @@ -2405,7 +2471,7 @@ def __init__(self, beta, *args, **kwargs): self.mode = tt.as_tensor_variable(0) self.median = self.beta = tt.as_tensor_variable(floatX(beta)) - assert_negative_support(beta, 'beta', 'HalfCauchy') + assert_negative_support(beta, "beta", "HalfCauchy") def _random(self, beta, size=None): u = np.random.uniform(size=size) @@ -2429,9 +2495,7 @@ def random(self, point=None, size=None): array """ beta = draw_values([self.beta], point=point, size=size)[0] - return generate_samples(self._random, beta, - dist_shape=self.shape, - size=size) + return generate_samples(self._random, beta, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -2448,17 +2512,20 @@ def logp(self, value): TensorVariable """ beta = self.beta - return bound(tt.log(2) - tt.log(np.pi) - tt.log(beta) - - tt.log1p((value / beta)**2), - value >= 0, beta > 0) + return bound( + tt.log(2) - tt.log(np.pi) - tt.log(beta) - tt.log1p((value / beta) ** 2), + value >= 0, + beta > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self beta = dist.beta - name = r'\text{%s}' % name - return r'${} \sim \text{{HalfCauchy}}(\mathit{{beta}}={})$'.format(name, - get_variable_name(beta)) + name = r"\text{%s}" % name + return r"${} \sim \text{{HalfCauchy}}(\mathit{{beta}}={})$".format( + name, get_variable_name(beta) + ) def logcdf(self, value): """ @@ -2476,15 +2543,12 @@ def logcdf(self, value): TensorVariable """ return tt.switch( - tt.le(value, 0), - -np.inf, - tt.log( - 2 * tt.arctan(value / self.beta) / np.pi - )) + tt.le(value, 0), -np.inf, tt.log(2 * tt.arctan(value / self.beta) / np.pi) + ) class Gamma(PositiveContinuous): - R""" + r""" Gamma log-likelihood. Represents the sum of alpha exponentially distributed random variables, @@ -2541,36 +2605,36 @@ class Gamma(PositiveContinuous): Alternative scale parameter (sigma > 0). """ - def __init__(self, alpha=None, beta=None, mu=None, sigma=None, - sd=None, *args, **kwargs): + def __init__( + self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs + ): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) alpha, beta = self.get_alpha_beta(alpha, beta, mu, sigma) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = beta = tt.as_tensor_variable(floatX(beta)) self.mean = alpha / beta self.mode = tt.maximum((alpha - 1) / beta, 0) - self.variance = alpha / beta**2 + self.variance = alpha / beta ** 2 - assert_negative_support(alpha, 'alpha', 'Gamma') - assert_negative_support(beta, 'beta', 'Gamma') + assert_negative_support(alpha, "alpha", "Gamma") + assert_negative_support(beta, "beta", "Gamma") def get_alpha_beta(self, alpha=None, beta=None, mu=None, sigma=None): if (alpha is not None) and (beta is not None): pass elif (mu is not None) and (sigma is not None): - alpha = mu**2 / sigma**2 - beta = mu / sigma**2 + alpha = mu ** 2 / sigma ** 2 + beta = mu / sigma ** 2 else: - raise ValueError('Incompatible parameterization. Either use ' - 'alpha and beta, or mu and sigma to specify ' - 'distribution.') + raise ValueError( + "Incompatible parameterization. Either use " + "alpha and beta, or mu and sigma to specify " + "distribution." + ) return alpha, beta @@ -2591,11 +2655,10 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) - return generate_samples(stats.gamma.rvs, alpha, scale=1. / beta, - dist_shape=self.shape, - size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) + return generate_samples( + stats.gamma.rvs, alpha, scale=1.0 / beta, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -2614,11 +2677,14 @@ def logp(self, value): alpha = self.alpha beta = self.beta return bound( - -gammaln(alpha) + logpow( - beta, alpha) - beta * value + logpow(value, alpha - 1), + -gammaln(alpha) + + logpow(beta, alpha) + - beta * value + + logpow(value, alpha - 1), value >= 0, alpha > 0, - beta > 0) + beta > 0, + ) def logcdf(self, value): """ @@ -2638,24 +2704,22 @@ def logcdf(self, value): alpha = self.alpha beta = self.beta return bound( - tt.log(tt.gammainc(alpha, beta * value)), - value >= 0, - alpha > 0, - beta > 0) + tt.log(tt.gammainc(alpha, beta * value)), value >= 0, alpha > 0, beta > 0 + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self beta = dist.beta alpha = dist.alpha - name = r'\text{%s}' % name - return r'${} \sim \text{{Gamma}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$'.format(name, - get_variable_name(alpha), - get_variable_name(beta)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Gamma}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$".format( + name, get_variable_name(alpha), get_variable_name(beta) + ) class InverseGamma(PositiveContinuous): - R""" + r""" Inverse gamma log-likelihood, the reciprocal of the gamma distribution. The pdf of this distribution is @@ -2702,31 +2766,29 @@ class InverseGamma(PositiveContinuous): Alternative scale parameter (sigma > 0). """ - def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, - *args, **kwargs): - super().__init__(*args, defaults=('mode',), **kwargs) + def __init__( + self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs + ): + super().__init__(*args, defaults=("mode",), **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) alpha, beta = InverseGamma._get_alpha_beta(alpha, beta, mu, sigma) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = beta = tt.as_tensor_variable(floatX(beta)) self.mean = self._calculate_mean() - self.mode = beta / (alpha + 1.) - self.variance = tt.switch(tt.gt(alpha, 2), - (beta**2) / ((alpha - 2) * (alpha - 1.)**2), - np.inf) - assert_negative_support(alpha, 'alpha', 'InverseGamma') - assert_negative_support(beta, 'beta', 'InverseGamma') + self.mode = beta / (alpha + 1.0) + self.variance = tt.switch( + tt.gt(alpha, 2), (beta ** 2) / ((alpha - 2) * (alpha - 1.0) ** 2), np.inf + ) + assert_negative_support(alpha, "alpha", "InverseGamma") + assert_negative_support(beta, "beta", "InverseGamma") def _calculate_mean(self): - m = self.beta / (self.alpha - 1.) + m = self.beta / (self.alpha - 1.0) try: return (self.alpha > 1) * m or np.inf except ValueError: # alpha is an array @@ -2735,18 +2797,20 @@ def _calculate_mean(self): @staticmethod def _get_alpha_beta(alpha, beta, mu, sigma): - if (alpha is not None): - if (beta is not None): + if alpha is not None: + if beta is not None: pass else: beta = 1 elif (mu is not None) and (sigma is not None): - alpha = (2 * sigma**2 + mu**2)/sigma**2 - beta = mu * (mu**2 + sigma**2) / sigma**2 + alpha = (2 * sigma ** 2 + mu ** 2) / sigma ** 2 + beta = mu * (mu ** 2 + sigma ** 2) / sigma ** 2 else: - raise ValueError('Incompatible parameterization. Either use ' - 'alpha and (optionally) beta, or mu and sigma to specify ' - 'distribution.') + raise ValueError( + "Incompatible parameterization. Either use " + "alpha and (optionally) beta, or mu and sigma to specify " + "distribution." + ) return alpha, beta @@ -2767,11 +2831,10 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) - return generate_samples(stats.invgamma.rvs, a=alpha, scale=beta, - dist_shape=self.shape, - size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) + return generate_samples( + stats.invgamma.rvs, a=alpha, scale=beta, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -2789,23 +2852,29 @@ def logp(self, value): """ alpha = self.alpha beta = self.beta - return bound(logpow(beta, alpha) - gammaln(alpha) - beta / value - + logpow(value, -alpha - 1), - value > 0, alpha > 0, beta > 0) + return bound( + logpow(beta, alpha) + - gammaln(alpha) + - beta / value + + logpow(value, -alpha - 1), + value > 0, + alpha > 0, + beta > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self beta = dist.beta alpha = dist.alpha - name = r'\text{%s}' % name - return r'${} \sim \text{{InverseGamma}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$'.format(name, - get_variable_name(alpha), - get_variable_name(beta)) + name = r"\text{%s}" % name + return r"${} \sim \text{{InverseGamma}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$".format( + name, get_variable_name(alpha), get_variable_name(beta) + ) class ChiSquared(Gamma): - R""" + r""" :math:`\chi^2` log-likelihood. The pdf of this distribution is @@ -2844,19 +2913,18 @@ class ChiSquared(Gamma): def __init__(self, nu, *args, **kwargs): self.nu = nu = tt.as_tensor_variable(floatX(nu)) - super().__init__(alpha=nu / 2., beta=0.5, *args, **kwargs) + super().__init__(alpha=nu / 2.0, beta=0.5, *args, **kwargs) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self nu = dist.nu - name = r'\text{%s}' % name - return r'${} \sim \Chi^2(\mathit{{nu}}={})$'.format(name, - get_variable_name(nu)) + name = r"\text{%s}" % name + return r"${} \sim \Chi^2(\mathit{{nu}}={})$".format(name, get_variable_name(nu)) class Weibull(PositiveContinuous): - R""" + r""" Weibull log-likelihood. The pdf of this distribution is @@ -2903,15 +2971,15 @@ def __init__(self, alpha, beta, *args, **kwargs): super().__init__(*args, **kwargs) self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) self.beta = beta = tt.as_tensor_variable(floatX(beta)) - self.mean = beta * tt.exp(gammaln(1 + 1. / alpha)) - self.median = beta * tt.exp(gammaln(tt.log(2)))**(1. / alpha) - self.variance = beta**2 * tt.exp(gammaln(1 + 2. / alpha)) - self.mean**2 - self.mode = tt.switch(alpha >= 1, - beta * ((alpha - 1)/alpha) ** (1 / alpha), - 0) # Reference: https://en.wikipedia.org/wiki/Weibull_distribution + self.mean = beta * tt.exp(gammaln(1 + 1.0 / alpha)) + self.median = beta * tt.exp(gammaln(tt.log(2))) ** (1.0 / alpha) + self.variance = beta ** 2 * tt.exp(gammaln(1 + 2.0 / alpha)) - self.mean ** 2 + self.mode = tt.switch( + alpha >= 1, beta * ((alpha - 1) / alpha) ** (1 / alpha), 0 + ) # Reference: https://en.wikipedia.org/wiki/Weibull_distribution - assert_negative_support(alpha, 'alpha', 'Weibull') - assert_negative_support(beta, 'beta', 'Weibull') + assert_negative_support(alpha, "alpha", "Weibull") + assert_negative_support(beta, "beta", "Weibull") def random(self, point=None, size=None): """ @@ -2930,15 +2998,12 @@ def random(self, point=None, size=None): ------- array """ - alpha, beta = draw_values([self.alpha, self.beta], - point=point, size=size) + alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) def _random(a, b, size=None): - return b * (-np.log(np.random.uniform(size=size)))**(1 / a) + return b * (-np.log(np.random.uniform(size=size))) ** (1 / a) - return generate_samples(_random, alpha, beta, - dist_shape=self.shape, - size=size) + return generate_samples(_random, alpha, beta, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -2956,20 +3021,25 @@ def logp(self, value): """ alpha = self.alpha beta = self.beta - return bound(tt.log(alpha) - tt.log(beta) - + (alpha - 1) * tt.log(value / beta) - - (value / beta)**alpha, - value >= 0, alpha > 0, beta > 0) + return bound( + tt.log(alpha) + - tt.log(beta) + + (alpha - 1) * tt.log(value / beta) + - (value / beta) ** alpha, + value >= 0, + alpha > 0, + beta > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self beta = dist.beta alpha = dist.alpha - name = r'\text{%s}' % name - return r'${} \sim \text{{Weibull}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$'.format(name, - get_variable_name(alpha), - get_variable_name(beta)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Weibull}}(\mathit{{alpha}}={},~\mathit{{beta}}={})$".format( + name, get_variable_name(alpha), get_variable_name(beta) + ) def logcdf(self, value): r""" @@ -2994,19 +3064,18 @@ def logcdf(self, value): """ alpha = self.alpha beta = self.beta - a = (value / beta)**alpha + a = (value / beta) ** alpha return tt.switch( tt.le(value, 0.0), -np.inf, tt.switch( - tt.le(a, tt.log(2.0)), - tt.log(-tt.expm1(-a)), - tt.log1p(-tt.exp(-a))) + tt.le(a, tt.log(2.0)), tt.log(-tt.expm1(-a)), tt.log1p(-tt.exp(-a)) + ), ) class HalfStudentT(PositiveContinuous): - R""" + r""" Half Student's T log-likelihood The pdf of this distribution is @@ -3062,15 +3131,11 @@ class HalfStudentT(PositiveContinuous): x = pm.HalfStudentT('x', lam=4, nu=10) """ - def __init__(self, nu=1, sigma=None, lam=None, sd=None, - *args, **kwargs): + def __init__(self, nu=1, sigma=None, lam=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) self.mode = tt.as_tensor_variable(0) lam, sigma = get_tau_sigma(lam, sigma) @@ -3079,9 +3144,9 @@ def __init__(self, nu=1, sigma=None, lam=None, sd=None, self.lam = tt.as_tensor_variable(lam) self.nu = nu = tt.as_tensor_variable(floatX(nu)) - assert_negative_support(sigma, 'sigma', 'HalfStudentT') - assert_negative_support(lam, 'lam', 'HalfStudentT') - assert_negative_support(nu, 'nu', 'HalfStudentT') + assert_negative_support(sigma, "sigma", "HalfStudentT") + assert_negative_support(lam, "lam", "HalfStudentT") + assert_negative_support(nu, "nu", "HalfStudentT") def random(self, point=None, size=None): """ @@ -3101,9 +3166,11 @@ def random(self, point=None, size=None): array """ nu, sigma = draw_values([self.nu, self.sigma], point=point, size=size) - return np.abs(generate_samples(stats.t.rvs, nu, loc=0, scale=sigma, - dist_shape=self.shape, - size=size)) + return np.abs( + generate_samples( + stats.t.rvs, nu, loc=0, scale=sigma, dist_shape=self.shape, size=size + ) + ) def logp(self, value): """ @@ -3123,25 +3190,31 @@ def logp(self, value): sigma = self.sigma lam = self.lam - return bound(tt.log(2) + gammaln((nu + 1.0) / 2.0) - - gammaln(nu / 2.0) - - .5 * tt.log(nu * np.pi * sigma**2) - - (nu + 1.0) / 2.0 * tt.log1p(value ** 2 / (nu * sigma**2)), - sigma > 0, lam > 0, nu > 0, value >= 0) + return bound( + tt.log(2) + + gammaln((nu + 1.0) / 2.0) + - gammaln(nu / 2.0) + - 0.5 * tt.log(nu * np.pi * sigma ** 2) + - (nu + 1.0) / 2.0 * tt.log1p(value ** 2 / (nu * sigma ** 2)), + sigma > 0, + lam > 0, + nu > 0, + value >= 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self nu = dist.nu sigma = dist.sigma - name = r'\text{%s}' % name - return r'${} \sim \text{{HalfStudentT}}(\mathit{{nu}}={},~\mathit{{sigma}}={})$'.format(name, - get_variable_name(nu), - get_variable_name(sigma)) + name = r"\text{%s}" % name + return r"${} \sim \text{{HalfStudentT}}(\mathit{{nu}}={},~\mathit{{sigma}}={})$".format( + name, get_variable_name(nu), get_variable_name(sigma) + ) class ExGaussian(Continuous): - R""" + r""" Exponentially modified Gaussian log-likelihood. Results from the convolution of a normal distribution with an exponential @@ -3205,25 +3278,21 @@ class ExGaussian(Continuous): Vol. 4, No. 1, pp 35-45. """ - def __init__(self, mu=0., sigma=None, nu=None, sd=None, - *args, **kwargs): + def __init__(self, mu=0.0, sigma=None, nu=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) self.mu = mu = tt.as_tensor_variable(floatX(mu)) self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma)) self.nu = nu = tt.as_tensor_variable(floatX(nu)) self.mean = mu + nu - self.variance = (sigma**2) + (nu**2) + self.variance = (sigma ** 2) + (nu ** 2) - assert_negative_support(sigma, 'sigma', 'ExGaussian') - assert_negative_support(nu, 'nu', 'ExGaussian') + assert_negative_support(sigma, "sigma", "ExGaussian") + assert_negative_support(nu, "nu", "ExGaussian") def random(self, point=None, size=None): """ @@ -3242,16 +3311,18 @@ def random(self, point=None, size=None): ------- array """ - mu, sigma, nu = draw_values([self.mu, self.sigma, self.nu], - point=point, size=size) + mu, sigma, nu = draw_values( + [self.mu, self.sigma, self.nu], point=point, size=size + ) def _random(mu, sigma, nu, size=None): - return (np.random.normal(mu, sigma, size=size) - + np.random.exponential(scale=nu, size=size)) + return np.random.normal(mu, sigma, size=size) + np.random.exponential( + scale=nu, size=size + ) - return generate_samples(_random, mu, sigma, nu, - dist_shape=self.shape, - size=size) + return generate_samples( + _random, mu, sigma, nu, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -3273,7 +3344,9 @@ def logp(self, value): standardized_val = (value - mu) / sigma cdf_val = std_cdf(standardized_val - sigma / nu) - cdf_val_safe = tt.switch(tt.eq(cdf_val, 0), np.finfo(theano.config.floatX).eps, cdf_val) + cdf_val_safe = tt.switch( + tt.eq(cdf_val, 0), np.finfo(theano.config.floatX).eps, cdf_val + ) # This condition is suggested by exGAUS.R from gamlss lp = tt.switch( @@ -3293,11 +3366,10 @@ def _repr_latex_(self, name=None, dist=None): sigma = dist.sigma mu = dist.mu nu = dist.nu - name = r'\text{%s}' % name - return r'${} \sim \text{{ExGaussian}}(\mathit{{mu}}={},~\mathit{{sigma}}={},~\mathit{{nu}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(sigma), - get_variable_name(nu)) + name = r"\text{%s}" % name + return r"${} \sim \text{{ExGaussian}}(\mathit{{mu}}={},~\mathit{{sigma}}={},~\mathit{{nu}}={})$".format( + name, get_variable_name(mu), get_variable_name(sigma), get_variable_name(nu) + ) def logcdf(self, value): """ @@ -3322,21 +3394,29 @@ def logcdf(self, value): """ mu = self.mu sigma = self.sigma - sigma_2 = sigma**2 + sigma_2 = sigma ** 2 nu = self.nu - z = value - mu - sigma_2/nu + z = value - mu - sigma_2 / nu return tt.switch( tt.gt(nu, 0.05 * sigma), - tt.log(std_cdf((value - mu)/sigma) - - std_cdf(z/sigma) * tt.exp( - ((mu + (sigma_2/nu))**2 - - (mu**2) - - 2 * value * ((sigma_2)/nu))/(2 * sigma_2))), - normal_lcdf(mu, sigma, value)) + tt.log( + std_cdf((value - mu) / sigma) + - std_cdf(z / sigma) + * tt.exp( + ( + (mu + (sigma_2 / nu)) ** 2 + - (mu ** 2) + - 2 * value * ((sigma_2) / nu) + ) + / (2 * sigma_2) + ) + ), + normal_lcdf(mu, sigma, value), + ) class VonMises(Continuous): - R""" + r""" Univariate VonMises log-likelihood. The pdf of this distribution is @@ -3379,15 +3459,16 @@ class VonMises(Continuous): Concentration (\frac{1}{kappa} is analogous to \sigma^2). """ - def __init__(self, mu=0.0, kappa=None, transform='circular', - *args, **kwargs): - if transform == 'circular': + def __init__(self, mu=0.0, kappa=None, transform="circular", *args, **kwargs): + if transform == "circular": transform = transforms.Circular() super().__init__(transform=transform, *args, **kwargs) - self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu)) + self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable( + floatX(mu) + ) self.kappa = kappa = tt.as_tensor_variable(floatX(kappa)) - assert_negative_support(kappa, 'kappa', 'VonMises') + assert_negative_support(kappa, "kappa", "VonMises") def random(self, point=None, size=None): """ @@ -3406,11 +3487,10 @@ def random(self, point=None, size=None): ------- array """ - mu, kappa = draw_values([self.mu, self.kappa], - point=point, size=size) - return generate_samples(stats.vonmises.rvs, loc=mu, kappa=kappa, - dist_shape=self.shape, - size=size) + mu, kappa = draw_values([self.mu, self.kappa], point=point, size=size) + return generate_samples( + stats.vonmises.rvs, loc=mu, kappa=kappa, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -3428,23 +3508,26 @@ def logp(self, value): """ mu = self.mu kappa = self.kappa - return bound(kappa * tt.cos(mu - value) - (tt.log(2 * np.pi) + log_i0(kappa)), - kappa > 0, value >= -np.pi, value <= np.pi) + return bound( + kappa * tt.cos(mu - value) - (tt.log(2 * np.pi) + log_i0(kappa)), + kappa > 0, + value >= -np.pi, + value <= np.pi, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self kappa = dist.kappa mu = dist.mu - name = r'\text{%s}' % name - return r'${} \sim \text{{VonMises}}(\mathit{{mu}}={},~\mathit{{kappa}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(kappa)) - + name = r"\text{%s}" % name + return r"${} \sim \text{{VonMises}}(\mathit{{mu}}={},~\mathit{{kappa}}={})$".format( + name, get_variable_name(mu), get_variable_name(kappa) + ) class SkewNormal(Continuous): - R""" + r""" Univariate skew-normal log-likelihood. The pdf of this distribution is @@ -3501,16 +3584,12 @@ class SkewNormal(Continuous): """ - def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None, - *args, **kwargs): + def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) self.mu = mu = tt.as_tensor_variable(floatX(mu)) @@ -3519,11 +3598,15 @@ def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None, self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) - self.mean = mu + self.sigma * (2 / np.pi)**0.5 * alpha / (1 + alpha**2)**0.5 - self.variance = self.sigma**2 * (1 - (2 * alpha**2) / ((1 + alpha**2) * np.pi)) + self.mean = ( + mu + self.sigma * (2 / np.pi) ** 0.5 * alpha / (1 + alpha ** 2) ** 0.5 + ) + self.variance = self.sigma ** 2 * ( + 1 - (2 * alpha ** 2) / ((1 + alpha ** 2) * np.pi) + ) - assert_negative_support(tau, 'tau', 'SkewNormal') - assert_negative_support(sigma, 'sigma', 'SkewNormal') + assert_negative_support(tau, "tau", "SkewNormal") + assert_negative_support(sigma, "sigma", "SkewNormal") def random(self, point=None, size=None): """ @@ -3543,11 +3626,16 @@ def random(self, point=None, size=None): array """ mu, tau, _, alpha = draw_values( - [self.mu, self.tau, self.sigma, self.alpha], point=point, size=size) - return generate_samples(stats.skewnorm.rvs, - a=alpha, loc=mu, scale=tau**-0.5, - dist_shape=self.shape, - size=size) + [self.mu, self.tau, self.sigma, self.alpha], point=point, size=size + ) + return generate_samples( + stats.skewnorm.rvs, + a=alpha, + loc=mu, + scale=tau ** -0.5, + dist_shape=self.shape, + size=size, + ) def logp(self, value): """ @@ -3568,11 +3656,11 @@ def logp(self, value): mu = self.mu alpha = self.alpha return bound( - tt.log(1 + - tt.erf(((value - mu) * tt.sqrt(tau) * alpha) / tt.sqrt(2))) - + (-tau * (value - mu)**2 - + tt.log(tau / np.pi / 2.)) / 2., - tau > 0, sigma > 0) + tt.log(1 + tt.erf(((value - mu) * tt.sqrt(tau) * alpha) / tt.sqrt(2))) + + (-tau * (value - mu) ** 2 + tt.log(tau / np.pi / 2.0)) / 2.0, + tau > 0, + sigma > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: @@ -3580,15 +3668,17 @@ def _repr_latex_(self, name=None, dist=None): sigma = dist.sigma mu = dist.mu alpha = dist.alpha - name = r'\text{%s}' % name - return r'${} \sim \text{{Skew-Normal}}(\mathit{{mu}}={},~\mathit{{sigma}}={},~\mathit{{alpha}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(sigma), - get_variable_name(alpha)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Skew-Normal}}(\mathit{{mu}}={},~\mathit{{sigma}}={},~\mathit{{alpha}}={})$".format( + name, + get_variable_name(mu), + get_variable_name(sigma), + get_variable_name(alpha), + ) class Triangular(BoundedContinuous): - R""" + r""" Continuous Triangular log-likelihood The pdf of this distribution is @@ -3641,8 +3731,7 @@ class Triangular(BoundedContinuous): Upper limit. """ - def __init__(self, lower=0, upper=1, c=0.5, - *args, **kwargs): + def __init__(self, lower=0, upper=1, c=0.5, *args, **kwargs): self.median = self.mean = self.c = c = tt.as_tensor_variable(floatX(c)) self.lower = lower = tt.as_tensor_variable(floatX(lower)) self.upper = upper = tt.as_tensor_variable(floatX(upper)) @@ -3666,10 +3755,17 @@ def random(self, point=None, size=None): ------- array """ - c, lower, upper = draw_values([self.c, self.lower, self.upper], - point=point, size=size) - return generate_samples(self._random, c=c, lower=lower, upper=upper, - size=size, dist_shape=self.shape) + c, lower, upper = draw_values( + [self.c, self.lower, self.upper], point=point, size=size + ) + return generate_samples( + self._random, + c=c, + lower=lower, + upper=upper, + size=size, + dist_shape=self.shape, + ) def _random(self, c, lower, upper, size): """ Wrapper around stats.triang.rvs that converts Triangular's @@ -3679,10 +3775,7 @@ def _random(self, c, lower, upper, size): """ scale = upper - lower return stats.triang.rvs( - c=(c - lower) / scale, - loc=lower, - scale=scale, - size=size, + c=(c - lower) / scale, loc=lower, scale=scale, size=size ) def logp(self, value): @@ -3702,13 +3795,19 @@ def logp(self, value): c = self.c lower = self.lower upper = self.upper - return tt.switch(alltrue_elemwise([lower <= value, value < c]), - tt.log(2 * (value - lower) / ((upper - lower) * (c - lower))), - tt.switch(tt.eq(value, c), - tt.log(2 / (upper - lower)), - tt.switch(alltrue_elemwise([c < value, value <= upper]), - tt.log(2 * (upper - value) / ((upper - lower) * (upper - c))), - np.inf))) + return tt.switch( + alltrue_elemwise([lower <= value, value < c]), + tt.log(2 * (value - lower) / ((upper - lower) * (c - lower))), + tt.switch( + tt.eq(value, c), + tt.log(2 / (upper - lower)), + tt.switch( + alltrue_elemwise([c < value, value <= upper]), + tt.log(2 * (upper - value) / ((upper - lower) * (upper - c))), + np.inf, + ), + ), + ) def _repr_latex_(self, name=None, dist=None): if dist is None: @@ -3716,11 +3815,13 @@ def _repr_latex_(self, name=None, dist=None): lower = dist.lower upper = dist.upper c = dist.c - name = r'\text{%s}' % name - return r'${} \sim \text{{Triangular}}(\mathit{{c}}={},~\mathit{{lower}}={},~\mathit{{upper}}={})$'.format(name, - get_variable_name(c), - get_variable_name(lower), - get_variable_name(upper)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Triangular}}(\mathit{{c}}={},~\mathit{{lower}}={},~\mathit{{upper}}={})$".format( + name, + get_variable_name(c), + get_variable_name(lower), + get_variable_name(upper), + ) def logcdf(self, value): """ @@ -3749,14 +3850,14 @@ def logcdf(self, value): tt.switch( tt.lt(value, u), tt.log1p(-((u - value) ** 2) / ((u - l) * (u - c))), - 0 - ) - ) + 0, + ), + ), ) class Gumbel(Continuous): - R""" + r""" Univariate Gumbel log-likelihood The pdf of this distribution is @@ -3807,7 +3908,7 @@ def __init__(self, mu=0, beta=1.0, **kwargs): self.mu = tt.as_tensor_variable(floatX(mu)) self.beta = tt.as_tensor_variable(floatX(beta)) - assert_negative_support(beta, 'beta', 'Gumbel') + assert_negative_support(beta, "beta", "Gumbel") self.mean = self.mu + self.beta * np.euler_gamma self.median = self.mu - self.beta * tt.log(tt.log(2)) @@ -3834,9 +3935,9 @@ def random(self, point=None, size=None): array """ mu, sigma = draw_values([self.mu, self.beta], point=point, size=size) - return generate_samples(stats.gumbel_r.rvs, loc=mu, scale=sigma, - dist_shape=self.shape, - size=size) + return generate_samples( + stats.gumbel_r.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -3860,10 +3961,10 @@ def _repr_latex_(self, name=None, dist=None): dist = self beta = dist.beta mu = dist.mu - name = r'\text{%s}' % name - return r'${} \sim \text{{Gumbel}}(\mathit{{mu}}={},~\mathit{{beta}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(beta)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Gumbel}}(\mathit{{mu}}={},~\mathit{{beta}}={})$".format( + name, get_variable_name(mu), get_variable_name(beta) + ) def logcdf(self, value): """ @@ -3883,11 +3984,11 @@ def logcdf(self, value): beta = self.beta mu = self.mu - return -tt.exp(-(value - mu)/beta) + return -tt.exp(-(value - mu) / beta) class Rice(PositiveContinuous): - R""" + r""" Rice distribution. .. math:: @@ -3949,31 +4050,49 @@ def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) nu, b, sigma = self.get_nu_b(nu, b, sigma) self.nu = nu = tt.as_tensor_variable(floatX(nu)) self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma)) self.b = b = tt.as_tensor_variable(floatX(b)) - self.mean = sigma * np.sqrt(np.pi / 2) * tt.exp((-nu**2 / (2 * sigma**2)) / 2) * ((1 - (-nu**2 / (2 * sigma**2))) - * tt.i0(-(-nu**2 / (2 * sigma**2)) / 2) - (-nu**2 / (2 * sigma**2)) * tt.i1(-(-nu**2 / (2 * sigma**2)) / 2)) - self.variance = 2 * sigma**2 + nu**2 - (np.pi * sigma**2 / 2) * (tt.exp((-nu**2 / (2 * sigma**2)) / 2) * ((1 - (-nu**2 / ( - 2 * sigma**2))) * tt.i0(-(-nu**2 / (2 * sigma**2)) / 2) - (-nu**2 / (2 * sigma**2)) * tt.i1(-(-nu**2 / (2 * sigma**2)) / 2)))**2 + self.mean = ( + sigma + * np.sqrt(np.pi / 2) + * tt.exp((-nu ** 2 / (2 * sigma ** 2)) / 2) + * ( + (1 - (-nu ** 2 / (2 * sigma ** 2))) + * tt.i0(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + - (-nu ** 2 / (2 * sigma ** 2)) + * tt.i1(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + ) + ) + self.variance = ( + 2 * sigma ** 2 + + nu ** 2 + - (np.pi * sigma ** 2 / 2) + * ( + tt.exp((-nu ** 2 / (2 * sigma ** 2)) / 2) + * ( + (1 - (-nu ** 2 / (2 * sigma ** 2))) + * tt.i0(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + - (-nu ** 2 / (2 * sigma ** 2)) + * tt.i1(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + ) + ) + ** 2 + ) def get_nu_b(self, nu, b, sigma): if sigma is None: - sigma = 1. + sigma = 1.0 if nu is None and b is not None: nu = b * sigma return nu, b, sigma elif nu is not None and b is None: b = nu / sigma return nu, b, sigma - raise ValueError('Rice distribution must specify either nu' - ' or b.') + raise ValueError("Rice distribution must specify either nu" " or b.") def random(self, point=None, size=None): """ @@ -3992,10 +4111,10 @@ def random(self, point=None, size=None): ------- array """ - nu, sigma = draw_values([self.nu, self.sigma], - point=point, size=size) - return generate_samples(self._random, nu=nu, sigma=sigma, - dist_shape=self.shape, size=size) + nu, sigma = draw_values([self.nu, self.sigma], point=point, size=size) + return generate_samples( + self._random, nu=nu, sigma=sigma, dist_shape=self.shape, size=size + ) def _random(self, nu, sigma, size): """ Wrapper around stats.rice.rvs that converts Rice's @@ -4003,11 +4122,7 @@ def _random(self, nu, sigma, size): been broadcasted properly by generate_samples at this point and size is the scipy.rvs representation. """ - return stats.rice.rvs( - b=nu / sigma, - scale=sigma, - size=size, - ) + return stats.rice.rvs(b=nu / sigma, scale=sigma, size=size) def logp(self, value): """ @@ -4027,15 +4142,16 @@ def logp(self, value): sigma = self.sigma b = self.b x = value / sigma - return bound(tt.log(x * tt.exp((-(x - b) * (x - b)) / 2) * i0e(x * b) / sigma), - sigma >= 0, - nu >= 0, - value > 0, - ) + return bound( + tt.log(x * tt.exp((-(x - b) * (x - b)) / 2) * i0e(x * b) / sigma), + sigma >= 0, + nu >= 0, + value > 0, + ) class Logistic(Continuous): - R""" + r""" Logistic log-likelihood. The pdf of this distribution is @@ -4077,14 +4193,14 @@ class Logistic(Continuous): Scale (s > 0). """ - def __init__(self, mu=0., s=1., *args, **kwargs): + def __init__(self, mu=0.0, s=1.0, *args, **kwargs): super().__init__(*args, **kwargs) self.mu = tt.as_tensor_variable(floatX(mu)) self.s = tt.as_tensor_variable(floatX(s)) self.mean = self.mode = mu - self.variance = s**2 * np.pi**2 / 3. + self.variance = s ** 2 * np.pi ** 2 / 3.0 def logp(self, value): """ @@ -4104,7 +4220,9 @@ def logp(self, value): s = self.s return bound( - -(value - mu) / s - tt.log(s) - 2 * tt.log1p(tt.exp(-(value - mu) / s)), s > 0) + -(value - mu) / s - tt.log(s) - 2 * tt.log1p(tt.exp(-(value - mu) / s)), + s > 0, + ) def random(self, point=None, size=None): """ @@ -4126,20 +4244,18 @@ def random(self, point=None, size=None): mu, s = draw_values([self.mu, self.s], point=point, size=size) return generate_samples( - stats.logistic.rvs, - loc=mu, scale=s, - dist_shape=self.shape, - size=size) + stats.logistic.rvs, loc=mu, scale=s, dist_shape=self.shape, size=size + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self mu = dist.mu s = dist.s - name = r'\text{%s}' % name - return r'${} \sim \text{{Logistic}}(\mathit{{mu}}={},~\mathit{{s}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(s)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Logistic}}(\mathit{{mu}}={},~\mathit{{s}}={})$".format( + name, get_variable_name(mu), get_variable_name(s) + ) def logcdf(self, value): r""" @@ -4164,21 +4280,20 @@ def logcdf(self, value): """ mu = self.mu s = self.s - a = -(value - mu)/s - return - tt.switch( + a = -(value - mu) / s + return -tt.switch( tt.le(a, -37), tt.exp(a), tt.switch( tt.le(a, 18), tt.log1p(tt.exp(a)), - tt.switch( - tt.le(a, 33.3), - tt.exp(-a) + a, - a))) + tt.switch(tt.le(a, 33.3), tt.exp(-a) + a, a), + ), + ) class LogitNormal(UnitContinuous): - R""" + r""" Logit-Normal log-likelihood. The pdf of this distribution is @@ -4224,18 +4339,15 @@ class LogitNormal(UnitContinuous): def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs): if sd is not None: sigma = sd - warnings.warn( - "sd is deprecated, use sigma instead", - DeprecationWarning - ) + warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning) self.mu = mu = tt.as_tensor_variable(floatX(mu)) tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) self.sigma = self.sd = tt.as_tensor_variable(sigma) self.tau = tau = tt.as_tensor_variable(tau) self.median = invlogit(mu) - assert_negative_support(sigma, 'sigma', 'LogitNormal') - assert_negative_support(tau, 'tau', 'LogitNormal') + assert_negative_support(sigma, "sigma", "LogitNormal") + assert_negative_support(tau, "tau", "LogitNormal") super().__init__(**kwargs) @@ -4257,9 +4369,13 @@ def random(self, point=None, size=None): array """ mu, _, sigma = draw_values( - [self.mu, self.tau, self.sigma], point=point, size=size) - return expit(generate_samples(stats.norm.rvs, loc=mu, scale=sigma, dist_shape=self.shape, - size=size)) + [self.mu, self.tau, self.sigma], point=point, size=size + ) + return expit( + generate_samples( + stats.norm.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size + ) + ) def logp(self, value): """ @@ -4278,23 +4394,28 @@ def logp(self, value): sigma = self.sigma mu = self.mu tau = self.tau - return bound(-0.5 * tau * (logit(value) - mu) ** 2 - + 0.5 * tt.log(tau / (2. * np.pi)) - - tt.log(value * (1 - value)), value > 0, value < 1, tau > 0) + return bound( + -0.5 * tau * (logit(value) - mu) ** 2 + + 0.5 * tt.log(tau / (2.0 * np.pi)) + - tt.log(value * (1 - value)), + value > 0, + value < 1, + tau > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self sigma = dist.sigma mu = dist.mu - name = r'\text{%s}' % name - return r'${} \sim \text{{LogitNormal}}(\mathit{{mu}}={},~\mathit{{sigma}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(sigma)) + name = r"\text{%s}" % name + return r"${} \sim \text{{LogitNormal}}(\mathit{{mu}}={},~\mathit{{sigma}}={})$".format( + name, get_variable_name(mu), get_variable_name(sigma) + ) class Interpolated(BoundedContinuous): - R""" + r""" Univariate probability distribution defined as a linear interpolation of probability density function evaluated on some lattice of points. @@ -4327,8 +4448,7 @@ def __init__(self, x_points, pdf_points, *args, **kwargs): super().__init__(lower=lower, upper=upper, *args, **kwargs) - interp = InterpolatedUnivariateSpline( - x_points, pdf_points, k=1, ext='zeros') + interp = InterpolatedUnivariateSpline(x_points, pdf_points, k=1, ext="zeros") Z = interp.integral(x_points[0], x_points[-1]) self.Z = tt.as_tensor_variable(Z) @@ -4352,9 +4472,10 @@ def _argcdf(self, p): np.where( np.abs(pdf[index]) <= 1e-8, np.zeros(index.shape), - (p - cdf[index]) / pdf[index] + (p - cdf[index]) / pdf[index], ), - (-pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index]))) / slope + (-pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index]))) + / slope, ) def _random(self, size=None): @@ -4374,9 +4495,7 @@ def random(self, size=None): ------- array """ - return generate_samples(self._random, - dist_shape=self.shape, - size=size) + return generate_samples(self._random, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -4396,7 +4515,7 @@ def logp(self, value): class Moyal(Continuous): - R""" + r""" Moyal log-likelihood. The pdf of this distribution is @@ -4442,16 +4561,16 @@ class Moyal(Continuous): Scale parameter (sigma > 0). """ - def __init__(self, mu=0, sigma=1., *args, **kwargs): + def __init__(self, mu=0, sigma=1.0, *args, **kwargs): self.mu = tt.as_tensor_variable(floatX(mu)) self.sigma = tt.as_tensor_variable(floatX(sigma)) - assert_negative_support(sigma, 'sigma', 'Moyal') + assert_negative_support(sigma, "sigma", "Moyal") self.mean = self.mu + self.sigma * (np.euler_gamma + tt.log(2)) - self.median = self.mu - self.sigma * tt.log(2 * tt.erfcinv(1 / 2)**2) + self.median = self.mu - self.sigma * tt.log(2 * tt.erfcinv(1 / 2) ** 2) self.mode = self.mu - self.variance = (np.pi**2 / 2.0) * self.sigma**2 + self.variance = (np.pi ** 2 / 2.0) * self.sigma ** 2 super().__init__(*args, **kwargs) @@ -4473,9 +4592,9 @@ def random(self, point=None, size=None): array """ mu, sigma = draw_values([self.mu, self.sigma], point=point, size=size) - return generate_samples(stats.moyal.rvs, loc=mu, scale=sigma, - dist_shape=self.shape, - size=size) + return generate_samples( + stats.moyal.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size + ) def logp(self, value): """ @@ -4492,19 +4611,24 @@ def logp(self, value): TensorVariable """ scaled = (value - self.mu) / self.sigma - return bound((-(1 / 2) * (scaled + tt.exp(-scaled)) - - tt.log(self.sigma) - - (1 / 2) * tt.log(2 * np.pi)), self.sigma > 0) + return bound( + ( + -(1 / 2) * (scaled + tt.exp(-scaled)) + - tt.log(self.sigma) + - (1 / 2) * tt.log(2 * np.pi) + ), + self.sigma > 0, + ) def _repr_latex_(self, name=None, dist=None): if dist is None: dist = self sigma = dist.sigma mu = dist.mu - name = r'\text{%s}' % name - return r'${} \sim \text{{Moyal}}(\mathit{{mu}}={},~\mathit{{sigma}}={})$'.format(name, - get_variable_name(mu), - get_variable_name(sigma)) + name = r"\text{%s}" % name + return r"${} \sim \text{{Moyal}}(\mathit{{mu}}={},~\mathit{{sigma}}={})$".format( + name, get_variable_name(mu), get_variable_name(sigma) + ) def logcdf(self, value): """ @@ -4522,4 +4646,4 @@ def logcdf(self, value): TensorVariable """ scaled = (value - self.mu) / self.sigma - return tt.log(tt.erfc(tt.exp(-scaled / 2) * (2**-0.5))) + return tt.log(tt.erfc(tt.exp(-scaled / 2) * (2 ** -0.5))) From 59b7e1156c928489724c9dd037fc74a81bbadabd Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 14 Aug 2020 19:45:58 +0200 Subject: [PATCH 3/6] Set Black vertical limit to 100 --- pymc3/distributions/continuous.py | 288 +++++++----------------------- pyproject.toml | 2 + 2 files changed, 68 insertions(+), 222 deletions(-) create mode 100644 pyproject.toml diff --git a/pymc3/distributions/continuous.py b/pymc3/distributions/continuous.py index d76935aa4b..0f72f2b154 100644 --- a/pymc3/distributions/continuous.py +++ b/pymc3/distributions/continuous.py @@ -125,9 +125,7 @@ def assert_negative_support(var, label, distname, value=-1e-6): return try: # Transformed distribution - support = np.isfinite( - var.transformed.distribution.dist.logp(value).tag.test_value - ) + support = np.isfinite(var.transformed.distribution.dist.logp(value).tag.test_value) except AttributeError: try: # Untransformed distribution @@ -256,11 +254,7 @@ def random(self, point=None, size=None): lower, upper = draw_values([self.lower, self.upper], point=point, size=size) return generate_samples( - stats.uniform.rvs, - loc=lower, - scale=upper - lower, - dist_shape=self.shape, - size=size, + stats.uniform.rvs, loc=lower, scale=upper - lower, dist_shape=self.shape, size=size ) def logp(self, value): @@ -376,9 +370,7 @@ def logcdf(self, value): TensorVariable """ return tt.switch( - tt.eq(value, -np.inf), - -np.inf, - tt.switch(tt.eq(value, np.inf), 0, tt.log(0.5)), + tt.eq(value, -np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, tt.log(0.5)) ) @@ -438,9 +430,7 @@ def logcdf(self, value): ------- TensorVariable """ - return tt.switch( - tt.lt(value, np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, -np.inf) - ) + return tt.switch(tt.lt(value, np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, -np.inf)) class Normal(Continuous): @@ -514,9 +504,7 @@ def __init__(self, mu=0, sigma=None, tau=None, sd=None, **kwargs): self.sigma = self.sd = tt.as_tensor_variable(sigma) self.tau = tt.as_tensor_variable(tau) - self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable( - floatX(mu) - ) + self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu)) self.variance = 1.0 / self.tau assert_negative_support(sigma, "sigma", "Normal") @@ -541,9 +529,7 @@ def random(self, point=None, size=None): ------- array """ - mu, tau, _ = draw_values( - [self.mu, self.tau, self.sigma], point=point, size=size - ) + mu, tau, _ = draw_values([self.mu, self.tau, self.sigma], point=point, size=size) return generate_samples( stats.norm.rvs, loc=mu, scale=tau ** -0.5, dist_shape=self.shape, size=size ) @@ -566,9 +552,7 @@ def logp(self, value): tau = self.tau mu = self.mu - return bound( - (-tau * (value - mu) ** 2 + tt.log(tau / np.pi / 2.0)) / 2.0, sigma > 0 - ) + return bound((-tau * (value - mu) ** 2 + tt.log(tau / np.pi / 2.0)) / 2.0, sigma > 0) def _repr_latex_(self, name=None, dist=None): if dist is None: @@ -688,12 +672,8 @@ def __init__( tau, sigma = get_tau_sigma(tau=tau, sigma=sigma) self.sigma = self.sd = tt.as_tensor_variable(sigma) self.tau = tt.as_tensor_variable(tau) - self.lower_check = ( - tt.as_tensor_variable(floatX(lower)) if lower is not None else lower - ) - self.upper_check = ( - tt.as_tensor_variable(floatX(upper)) if upper is not None else upper - ) + self.lower_check = tt.as_tensor_variable(floatX(lower)) if lower is not None else lower + self.upper_check = tt.as_tensor_variable(floatX(upper)) if upper is not None else upper self.lower = ( tt.as_tensor_variable(floatX(lower)) if lower is not None @@ -764,11 +744,7 @@ def _random(self, mu, sigma, lower, upper, size): the scipy.rvs representation. """ return stats.truncnorm.rvs( - a=(lower - mu) / sigma, - b=(upper - mu) / sigma, - loc=mu, - scale=sigma, - size=size, + a=(lower - mu) / sigma, b=(upper - mu) / sigma, loc=mu, scale=sigma, size=size ) def logp(self, value): @@ -810,9 +786,7 @@ def _normalization(self): lsf_a = normal_lccdf(mu, sigma, self.lower) lsf_b = normal_lccdf(mu, sigma, self.upper) - return tt.switch( - self.lower > 0, logdiffexp(lsf_a, lsf_b), logdiffexp(lcdf_b, lcdf_a) - ) + return tt.switch(self.lower > 0, logdiffexp(lsf_a, lsf_b), logdiffexp(lcdf_b, lcdf_a)) if self.lower_check is not None: return normal_lccdf(mu, sigma, self.lower) @@ -1080,11 +1054,7 @@ def __init__(self, mu=None, lam=None, phi=None, alpha=0.0, *args, **kwargs): self.mean = self.mu + self.alpha self.mode = ( - self.mu - * ( - tt.sqrt(1.0 + (1.5 * self.mu / self.lam) ** 2) - - 1.5 * self.mu / self.lam - ) + self.mu * (tt.sqrt(1.0 + (1.5 * self.mu / self.lam) ** 2) - 1.5 * self.mu / self.lam) + self.alpha ) self.variance = (self.mu ** 3) / self.lam @@ -1141,12 +1111,8 @@ def random(self, point=None, size=None): ------- array """ - mu, lam, alpha = draw_values( - [self.mu, self.lam, self.alpha], point=point, size=size - ) - return generate_samples( - self._random, mu, lam, alpha, dist_shape=self.shape, size=size - ) + mu, lam, alpha = draw_values([self.mu, self.lam, self.alpha], point=point, size=size) + return generate_samples(self._random, mu, lam, alpha, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1186,10 +1152,7 @@ def _repr_latex_(self, name=None, dist=None): alpha = dist.alpha name = r"\text{%s}" % name return r"${} \sim \text{{Wald}}(\mathit{{mu}}={},~\mathit{{lam}}={},~\mathit{{alpha}}={})$".format( - name, - get_variable_name(mu), - get_variable_name(lam), - get_variable_name(alpha), + name, get_variable_name(mu), get_variable_name(lam), get_variable_name(alpha) ) def logcdf(self, value): @@ -1307,9 +1270,7 @@ class Beta(UnitContinuous): the binomial distribution. """ - def __init__( - self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs - ): + def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd @@ -1320,9 +1281,7 @@ def __init__( self.mean = self.alpha / (self.alpha + self.beta) self.variance = ( - self.alpha - * self.beta - / ((self.alpha + self.beta) ** 2 * (self.alpha + self.beta + 1)) + self.alpha * self.beta / ((self.alpha + self.beta) ** 2 * (self.alpha + self.beta + 1)) ) assert_negative_support(alpha, "alpha", "Beta") @@ -1361,9 +1320,7 @@ def random(self, point=None, size=None): array """ alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) - return generate_samples( - clipped_beta_rvs, alpha, beta, dist_shape=self.shape, size=size - ) + return generate_samples(clipped_beta_rvs, alpha, beta, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1475,18 +1432,10 @@ def __init__(self, a, b, *args, **kwargs): self.a = a = tt.as_tensor_variable(floatX(a)) self.b = b = tt.as_tensor_variable(floatX(b)) - ln_mean = ( - tt.log(b) - + tt.gammaln(1 + 1 / a) - + tt.gammaln(b) - - tt.gammaln(1 + 1 / a + b) - ) + ln_mean = tt.log(b) + tt.gammaln(1 + 1 / a) + tt.gammaln(b) - tt.gammaln(1 + 1 / a + b) self.mean = tt.exp(ln_mean) ln_2nd_raw_moment = ( - tt.log(b) - + tt.gammaln(1 + 2 / a) - + tt.gammaln(b) - - tt.gammaln(1 + 2 / a + b) + tt.log(b) + tt.gammaln(1 + 2 / a) + tt.gammaln(b) - tt.gammaln(1 + 2 / a + b) ) self.variance = tt.exp(ln_2nd_raw_moment) - self.mean ** 2 @@ -1534,12 +1483,7 @@ def logp(self, value): a = self.a b = self.b - logp = ( - tt.log(a) - + tt.log(b) - + (a - 1) * tt.log(value) - + (b - 1) * tt.log(1 - value ** a) - ) + logp = tt.log(a) + tt.log(b) + (a - 1) * tt.log(value) + (b - 1) * tt.log(1 - value ** a) return bound(logp, value >= 0, value <= 1, a > 0, b > 0) @@ -1677,9 +1621,7 @@ def logcdf(self, value): return tt.switch( tt.le(value, 0.0), -np.inf, - tt.switch( - tt.le(a, tt.log(2.0)), tt.log(-tt.expm1(-a)), tt.log1p(-tt.exp(-a)) - ), + tt.switch(tt.le(a, tt.log(2.0)), tt.log(-tt.expm1(-a)), tt.log1p(-tt.exp(-a))), ) @@ -1728,9 +1670,7 @@ class Laplace(Continuous): def __init__(self, mu, b, *args, **kwargs): super().__init__(*args, **kwargs) self.b = b = tt.as_tensor_variable(floatX(b)) - self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable( - floatX(mu) - ) + self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu)) self.variance = 2 * self.b ** 2 @@ -1754,9 +1694,7 @@ def random(self, point=None, size=None): array """ mu, b = draw_values([self.mu, self.b], point=point, size=size) - return generate_samples( - np.random.laplace, mu, b, dist_shape=self.shape, size=size - ) + return generate_samples(np.random.laplace, mu, b, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -1808,9 +1746,7 @@ def logcdf(self, value): return tt.switch( tt.le(value, a), tt.log(0.5) + y, - tt.switch( - tt.gt(y, 1), tt.log1p(-0.5 * tt.exp(-y)), tt.log(1 - 0.5 * tt.exp(-y)) - ), + tt.switch(tt.gt(y, 1), tt.log1p(-0.5 * tt.exp(-y)), tt.log(1 - 0.5 * tt.exp(-y))), ) @@ -1891,9 +1827,7 @@ def __init__(self, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs): self.mean = tt.exp(self.mu + 1.0 / (2 * self.tau)) self.median = tt.exp(self.mu) self.mode = tt.exp(self.mu - 1.0 / self.tau) - self.variance = (tt.exp(1.0 / self.tau) - 1) * tt.exp( - 2 * self.mu + 1.0 / self.tau - ) + self.variance = (tt.exp(1.0 / self.tau) - 1) * tt.exp(2 * self.mu + 1.0 / self.tau) assert_negative_support(tau, "tau", "Lognormal") assert_negative_support(sigma, "sigma", "Lognormal") @@ -2060,9 +1994,7 @@ def __init__(self, nu, mu=0, lam=None, sigma=None, sd=None, *args, **kwargs): self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma) self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu) - self.variance = tt.switch( - (nu > 2) * 1, (1 / self.lam) * (nu / (nu - 2)), np.inf - ) + self.variance = tt.switch((nu > 2) * 1, (1 / self.lam) * (nu / (nu - 2)), np.inf) assert_negative_support(lam, "lam (sigma)", "StudentT") assert_negative_support(nu, "nu", "StudentT") @@ -2205,9 +2137,7 @@ def __init__(self, alpha, m, transform="lowerbound", *args, **kwargs): self.mean = tt.switch(tt.gt(alpha, 1), alpha * m / (alpha - 1.0), np.inf) self.median = m * 2.0 ** (1.0 / alpha) self.variance = tt.switch( - tt.gt(alpha, 2), - (alpha * m ** 2) / ((alpha - 2.0) * (alpha - 1.0) ** 2), - np.inf, + tt.gt(alpha, 2), (alpha * m ** 2) / ((alpha - 2.0) * (alpha - 1.0) ** 2), np.inf ) assert_negative_support(alpha, "alpha", "Pareto") @@ -2239,9 +2169,7 @@ def random(self, point=None, size=None): array """ alpha, m = draw_values([self.alpha, self.m], point=point, size=size) - return generate_samples( - self._random, alpha, m, dist_shape=self.shape, size=size - ) + return generate_samples(self._random, alpha, m, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -2295,9 +2223,7 @@ def logcdf(self, value): alpha = self.alpha arg = (m / value) ** alpha return tt.switch( - tt.lt(value, m), - -np.inf, - tt.switch(tt.le(arg, 1e-5), tt.log1p(-arg), tt.log(1 - arg)), + tt.lt(value, m), -np.inf, tt.switch(tt.le(arg, 1e-5), tt.log1p(-arg), tt.log(1 - arg)) ) @@ -2375,9 +2301,7 @@ def random(self, point=None, size=None): array """ alpha, beta = draw_values([self.alpha, self.beta], point=point, size=size) - return generate_samples( - self._random, alpha, beta, dist_shape=self.shape, size=size - ) + return generate_samples(self._random, alpha, beta, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -2396,8 +2320,7 @@ def logp(self, value): alpha = self.alpha beta = self.beta return bound( - -tt.log(np.pi) - tt.log(beta) - tt.log1p(((value - alpha) / beta) ** 2), - beta > 0, + -tt.log(np.pi) - tt.log(beta) - tt.log1p(((value - alpha) / beta) ** 2), beta > 0 ) def _repr_latex_(self, name=None, dist=None): @@ -2542,9 +2465,7 @@ def logcdf(self, value): ------- TensorVariable """ - return tt.switch( - tt.le(value, 0), -np.inf, tt.log(2 * tt.arctan(value / self.beta) / np.pi) - ) + return tt.switch(tt.le(value, 0), -np.inf, tt.log(2 * tt.arctan(value / self.beta) / np.pi)) class Gamma(PositiveContinuous): @@ -2605,9 +2526,7 @@ class Gamma(PositiveContinuous): Alternative scale parameter (sigma > 0). """ - def __init__( - self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs - ): + def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs): super().__init__(*args, **kwargs) if sd is not None: sigma = sd @@ -2677,10 +2596,7 @@ def logp(self, value): alpha = self.alpha beta = self.beta return bound( - -gammaln(alpha) - + logpow(beta, alpha) - - beta * value - + logpow(value, alpha - 1), + -gammaln(alpha) + logpow(beta, alpha) - beta * value + logpow(value, alpha - 1), value >= 0, alpha > 0, beta > 0, @@ -2703,9 +2619,7 @@ def logcdf(self, value): """ alpha = self.alpha beta = self.beta - return bound( - tt.log(tt.gammainc(alpha, beta * value)), value >= 0, alpha > 0, beta > 0 - ) + return bound(tt.log(tt.gammainc(alpha, beta * value)), value >= 0, alpha > 0, beta > 0) def _repr_latex_(self, name=None, dist=None): if dist is None: @@ -2766,9 +2680,7 @@ class InverseGamma(PositiveContinuous): Alternative scale parameter (sigma > 0). """ - def __init__( - self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs - ): + def __init__(self, alpha=None, beta=None, mu=None, sigma=None, sd=None, *args, **kwargs): super().__init__(*args, defaults=("mode",), **kwargs) if sd is not None: @@ -2853,10 +2765,7 @@ def logp(self, value): alpha = self.alpha beta = self.beta return bound( - logpow(beta, alpha) - - gammaln(alpha) - - beta / value - + logpow(value, -alpha - 1), + logpow(beta, alpha) - gammaln(alpha) - beta / value + logpow(value, -alpha - 1), value > 0, alpha > 0, beta > 0, @@ -3068,9 +2977,7 @@ def logcdf(self, value): return tt.switch( tt.le(value, 0.0), -np.inf, - tt.switch( - tt.le(a, tt.log(2.0)), tt.log(-tt.expm1(-a)), tt.log1p(-tt.exp(-a)) - ), + tt.switch(tt.le(a, tt.log(2.0)), tt.log(-tt.expm1(-a)), tt.log1p(-tt.exp(-a))), ) @@ -3167,9 +3074,7 @@ def random(self, point=None, size=None): """ nu, sigma = draw_values([self.nu, self.sigma], point=point, size=size) return np.abs( - generate_samples( - stats.t.rvs, nu, loc=0, scale=sigma, dist_shape=self.shape, size=size - ) + generate_samples(stats.t.rvs, nu, loc=0, scale=sigma, dist_shape=self.shape, size=size) ) def logp(self, value): @@ -3311,18 +3216,14 @@ def random(self, point=None, size=None): ------- array """ - mu, sigma, nu = draw_values( - [self.mu, self.sigma, self.nu], point=point, size=size - ) + mu, sigma, nu = draw_values([self.mu, self.sigma, self.nu], point=point, size=size) def _random(mu, sigma, nu, size=None): return np.random.normal(mu, sigma, size=size) + np.random.exponential( scale=nu, size=size ) - return generate_samples( - _random, mu, sigma, nu, dist_shape=self.shape, size=size - ) + return generate_samples(_random, mu, sigma, nu, dist_shape=self.shape, size=size) def logp(self, value): """ @@ -3344,17 +3245,12 @@ def logp(self, value): standardized_val = (value - mu) / sigma cdf_val = std_cdf(standardized_val - sigma / nu) - cdf_val_safe = tt.switch( - tt.eq(cdf_val, 0), np.finfo(theano.config.floatX).eps, cdf_val - ) + cdf_val_safe = tt.switch(tt.eq(cdf_val, 0), np.finfo(theano.config.floatX).eps, cdf_val) # This condition is suggested by exGAUS.R from gamlss lp = tt.switch( tt.gt(nu, 0.05 * sigma), - -tt.log(nu) - + (mu - value) / nu - + 0.5 * (sigma / nu) ** 2 - + logpow(cdf_val_safe, 1.0), + -tt.log(nu) + (mu - value) / nu + 0.5 * (sigma / nu) ** 2 + logpow(cdf_val_safe, 1.0), -tt.log(sigma * tt.sqrt(2 * np.pi)) - 0.5 * standardized_val ** 2, ) @@ -3403,11 +3299,7 @@ def logcdf(self, value): std_cdf((value - mu) / sigma) - std_cdf(z / sigma) * tt.exp( - ( - (mu + (sigma_2 / nu)) ** 2 - - (mu ** 2) - - 2 * value * ((sigma_2) / nu) - ) + ((mu + (sigma_2 / nu)) ** 2 - (mu ** 2) - 2 * value * ((sigma_2) / nu)) / (2 * sigma_2) ) ), @@ -3463,9 +3355,7 @@ def __init__(self, mu=0.0, kappa=None, transform="circular", *args, **kwargs): if transform == "circular": transform = transforms.Circular() super().__init__(transform=transform, *args, **kwargs) - self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable( - floatX(mu) - ) + self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(floatX(mu)) self.kappa = kappa = tt.as_tensor_variable(floatX(kappa)) assert_negative_support(kappa, "kappa", "VonMises") @@ -3598,12 +3488,8 @@ def __init__(self, mu=0.0, sigma=None, tau=None, alpha=1, sd=None, *args, **kwar self.alpha = alpha = tt.as_tensor_variable(floatX(alpha)) - self.mean = ( - mu + self.sigma * (2 / np.pi) ** 0.5 * alpha / (1 + alpha ** 2) ** 0.5 - ) - self.variance = self.sigma ** 2 * ( - 1 - (2 * alpha ** 2) / ((1 + alpha ** 2) * np.pi) - ) + self.mean = mu + self.sigma * (2 / np.pi) ** 0.5 * alpha / (1 + alpha ** 2) ** 0.5 + self.variance = self.sigma ** 2 * (1 - (2 * alpha ** 2) / ((1 + alpha ** 2) * np.pi)) assert_negative_support(tau, "tau", "SkewNormal") assert_negative_support(sigma, "sigma", "SkewNormal") @@ -3629,12 +3515,7 @@ def random(self, point=None, size=None): [self.mu, self.tau, self.sigma, self.alpha], point=point, size=size ) return generate_samples( - stats.skewnorm.rvs, - a=alpha, - loc=mu, - scale=tau ** -0.5, - dist_shape=self.shape, - size=size, + stats.skewnorm.rvs, a=alpha, loc=mu, scale=tau ** -0.5, dist_shape=self.shape, size=size ) def logp(self, value): @@ -3670,10 +3551,7 @@ def _repr_latex_(self, name=None, dist=None): alpha = dist.alpha name = r"\text{%s}" % name return r"${} \sim \text{{Skew-Normal}}(\mathit{{mu}}={},~\mathit{{sigma}}={},~\mathit{{alpha}}={})$".format( - name, - get_variable_name(mu), - get_variable_name(sigma), - get_variable_name(alpha), + name, get_variable_name(mu), get_variable_name(sigma), get_variable_name(alpha) ) @@ -3755,16 +3633,9 @@ def random(self, point=None, size=None): ------- array """ - c, lower, upper = draw_values( - [self.c, self.lower, self.upper], point=point, size=size - ) + c, lower, upper = draw_values([self.c, self.lower, self.upper], point=point, size=size) return generate_samples( - self._random, - c=c, - lower=lower, - upper=upper, - size=size, - dist_shape=self.shape, + self._random, c=c, lower=lower, upper=upper, size=size, dist_shape=self.shape ) def _random(self, c, lower, upper, size): @@ -3774,9 +3645,7 @@ def _random(self, c, lower, upper, size): the scipy.rvs representation. """ scale = upper - lower - return stats.triang.rvs( - c=(c - lower) / scale, loc=lower, scale=scale, size=size - ) + return stats.triang.rvs(c=(c - lower) / scale, loc=lower, scale=scale, size=size) def logp(self, value): """ @@ -3817,10 +3686,7 @@ def _repr_latex_(self, name=None, dist=None): c = dist.c name = r"\text{%s}" % name return r"${} \sim \text{{Triangular}}(\mathit{{c}}={},~\mathit{{lower}}={},~\mathit{{upper}}={})$".format( - name, - get_variable_name(c), - get_variable_name(lower), - get_variable_name(upper), + name, get_variable_name(c), get_variable_name(lower), get_variable_name(upper) ) def logcdf(self, value): @@ -3847,11 +3713,7 @@ def logcdf(self, value): tt.switch( tt.le(value, c), tt.log(((value - l) ** 2) / ((u - l) * (c - l))), - tt.switch( - tt.lt(value, u), - tt.log1p(-((u - value) ** 2) / ((u - l) * (u - c))), - 0, - ), + tt.switch(tt.lt(value, u), tt.log1p(-((u - value) ** 2) / ((u - l) * (u - c))), 0), ), ) @@ -4061,10 +3923,8 @@ def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs): * np.sqrt(np.pi / 2) * tt.exp((-nu ** 2 / (2 * sigma ** 2)) / 2) * ( - (1 - (-nu ** 2 / (2 * sigma ** 2))) - * tt.i0(-(-nu ** 2 / (2 * sigma ** 2)) / 2) - - (-nu ** 2 / (2 * sigma ** 2)) - * tt.i1(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + (1 - (-nu ** 2 / (2 * sigma ** 2))) * tt.i0(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + - (-nu ** 2 / (2 * sigma ** 2)) * tt.i1(-(-nu ** 2 / (2 * sigma ** 2)) / 2) ) ) self.variance = ( @@ -4074,10 +3934,8 @@ def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs): * ( tt.exp((-nu ** 2 / (2 * sigma ** 2)) / 2) * ( - (1 - (-nu ** 2 / (2 * sigma ** 2))) - * tt.i0(-(-nu ** 2 / (2 * sigma ** 2)) / 2) - - (-nu ** 2 / (2 * sigma ** 2)) - * tt.i1(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + (1 - (-nu ** 2 / (2 * sigma ** 2))) * tt.i0(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + - (-nu ** 2 / (2 * sigma ** 2)) * tt.i1(-(-nu ** 2 / (2 * sigma ** 2)) / 2) ) ) ** 2 @@ -4112,9 +3970,7 @@ def random(self, point=None, size=None): array """ nu, sigma = draw_values([self.nu, self.sigma], point=point, size=size) - return generate_samples( - self._random, nu=nu, sigma=sigma, dist_shape=self.shape, size=size - ) + return generate_samples(self._random, nu=nu, sigma=sigma, dist_shape=self.shape, size=size) def _random(self, nu, sigma, size): """ Wrapper around stats.rice.rvs that converts Rice's @@ -4219,10 +4075,7 @@ def logp(self, value): mu = self.mu s = self.s - return bound( - -(value - mu) / s - tt.log(s) - 2 * tt.log1p(tt.exp(-(value - mu) / s)), - s > 0, - ) + return bound(-(value - mu) / s - tt.log(s) - 2 * tt.log1p(tt.exp(-(value - mu) / s)), s > 0) def random(self, point=None, size=None): """ @@ -4285,9 +4138,7 @@ def logcdf(self, value): tt.le(a, -37), tt.exp(a), tt.switch( - tt.le(a, 18), - tt.log1p(tt.exp(a)), - tt.switch(tt.le(a, 33.3), tt.exp(-a) + a, a), + tt.le(a, 18), tt.log1p(tt.exp(a)), tt.switch(tt.le(a, 33.3), tt.exp(-a) + a, a) ), ) @@ -4368,13 +4219,9 @@ def random(self, point=None, size=None): ------- array """ - mu, _, sigma = draw_values( - [self.mu, self.tau, self.sigma], point=point, size=size - ) + mu, _, sigma = draw_values([self.mu, self.tau, self.sigma], point=point, size=size) return expit( - generate_samples( - stats.norm.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size - ) + generate_samples(stats.norm.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size) ) def logp(self, value): @@ -4470,12 +4317,9 @@ def _argcdf(self, p): return x[index] + np.where( np.abs(slope) <= 1e-8, np.where( - np.abs(pdf[index]) <= 1e-8, - np.zeros(index.shape), - (p - cdf[index]) / pdf[index], + np.abs(pdf[index]) <= 1e-8, np.zeros(index.shape), (p - cdf[index]) / pdf[index] ), - (-pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index]))) - / slope, + (-pdf[index] + np.sqrt(pdf[index] ** 2 + 2 * slope * (p - cdf[index]))) / slope, ) def _random(self, size=None): diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..aa4949aa1c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.black] +line-length = 100 From 395f2e700ae1f22d75608ee00922bd5688fec7e8 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Fri, 28 Aug 2020 20:50:19 +0200 Subject: [PATCH 4/6] Added reformatting with Junpeng's comments --- pymc3/distributions/continuous.py | 62 +++++++++++++++---------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/pymc3/distributions/continuous.py b/pymc3/distributions/continuous.py index 0f72f2b154..835fa23fe1 100644 --- a/pymc3/distributions/continuous.py +++ b/pymc3/distributions/continuous.py @@ -369,9 +369,7 @@ def logcdf(self, value): ------- TensorVariable """ - return tt.switch( - tt.eq(value, -np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, tt.log(0.5)) - ) + return tt.eq(value, -np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, tt.log(0.5)) class HalfFlat(PositiveContinuous): @@ -1131,14 +1129,15 @@ def logp(self, value): mu = self.mu lam = self.lam alpha = self.alpha + centered_value = value - alpha # value *must* be iid. Otherwise this is wrong. return bound( logpow(lam / (2.0 * np.pi), 0.5) - - logpow(value - alpha, 1.5) - - (0.5 * lam / (value - alpha) * ((value - alpha - mu) / mu) ** 2), + - logpow(centered_value, 1.5) + - (0.5 * lam / centered_value * ((centered_value - mu) / mu) ** 2), # XXX these two are redundant. Please, check. value > 0, - value - alpha > 0, + centered_value > 0, mu > 0, lam > 0, alpha >= 0, @@ -1182,28 +1181,25 @@ def logcdf(self, value): a = normal_lcdf(0, 1, (q - 1.0) / r) b = 2.0 / l + normal_lcdf(0, 1, -(q + 1.0) / r) + + left_limit = ( + tt.lt(value, 0) + | (tt.eq(value, 0) & tt.gt(mu, 0) & tt.lt(lam, np.inf)) + | (tt.lt(value, mu) & tt.eq(lam, 0)) + ) + right_limit = ( + tt.eq(value, np.inf) + | (tt.eq(lam, 0) & tt.gt(value, mu)) + | (tt.gt(value, 0) & tt.eq(lam, np.inf)) + ) + degenerate_dist = (tt.lt(mu, np.inf) & tt.eq(mu, value) & tt.eq(lam, 0)) | ( + tt.eq(value, 0) & tt.eq(lam, np.inf) + ) + return tt.switch( - ( - # Left limit - tt.lt(value, 0) - | (tt.eq(value, 0) & tt.gt(mu, 0) & tt.lt(lam, np.inf)) - | (tt.lt(value, mu) & tt.eq(lam, 0)) - ), + left_limit, -np.inf, - tt.switch( - ( - # Right limit - tt.eq(value, np.inf) - | (tt.eq(lam, 0) & tt.gt(value, mu)) - | (tt.gt(value, 0) & tt.eq(lam, np.inf)) - | - # Degenerate distribution - (tt.lt(mu, np.inf) & tt.eq(mu, value) & tt.eq(lam, 0)) - | (tt.eq(value, 0) & tt.eq(lam, np.inf)) - ), - 0, - a + tt.log1p(tt.exp(b - a)), - ), + tt.switch((right_limit | degenerate_dist), 0, a + tt.log1p(tt.exp(b - a))), ) @@ -3918,13 +3914,15 @@ def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs): self.nu = nu = tt.as_tensor_variable(floatX(nu)) self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma)) self.b = b = tt.as_tensor_variable(floatX(b)) + + nu_sigma_ratio = -nu ** 2 / (2 * sigma ** 2) self.mean = ( sigma * np.sqrt(np.pi / 2) - * tt.exp((-nu ** 2 / (2 * sigma ** 2)) / 2) + * tt.exp(nu_sigma_ratio / 2) * ( - (1 - (-nu ** 2 / (2 * sigma ** 2))) * tt.i0(-(-nu ** 2 / (2 * sigma ** 2)) / 2) - - (-nu ** 2 / (2 * sigma ** 2)) * tt.i1(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + (1 - nu_sigma_ratio) * tt.i0(-nu_sigma_ratio / 2) + - nu_sigma_ratio * tt.i1(-nu_sigma_ratio / 2) ) ) self.variance = ( @@ -3932,10 +3930,10 @@ def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs): + nu ** 2 - (np.pi * sigma ** 2 / 2) * ( - tt.exp((-nu ** 2 / (2 * sigma ** 2)) / 2) + tt.exp(nu_sigma_ratio / 2) * ( - (1 - (-nu ** 2 / (2 * sigma ** 2))) * tt.i0(-(-nu ** 2 / (2 * sigma ** 2)) / 2) - - (-nu ** 2 / (2 * sigma ** 2)) * tt.i1(-(-nu ** 2 / (2 * sigma ** 2)) / 2) + (1 - nu_sigma_ratio) * tt.i0(-nu_sigma_ratio / 2) + - nu_sigma_ratio * tt.i1(-nu_sigma_ratio / 2) ) ) ** 2 From fb3e21ad2c7da78ece11ee346a4dcd8ac985f091 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Sun, 30 Aug 2020 22:56:56 +0200 Subject: [PATCH 5/6] Reinstated outer tt.switch in logcdf of Flat distribution --- pymc3/distributions/continuous.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pymc3/distributions/continuous.py b/pymc3/distributions/continuous.py index 835fa23fe1..3c2e544fc8 100644 --- a/pymc3/distributions/continuous.py +++ b/pymc3/distributions/continuous.py @@ -369,7 +369,9 @@ def logcdf(self, value): ------- TensorVariable """ - return tt.eq(value, -np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, tt.log(0.5)) + return tt.switch( + tt.eq(value, -np.inf), -np.inf, tt.switch(tt.eq(value, np.inf), 0, tt.log(0.5)) + ) class HalfFlat(PositiveContinuous): From d7e2ce7eb4e5b53a76fbc3f4ff9e38c90551bdb9 Mon Sep 17 00:00:00 2001 From: AlexAndorra Date: Mon, 7 Sep 2020 13:16:23 +0200 Subject: [PATCH 6/6] Deleted import of get_variable_name --- pymc3/distributions/continuous.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pymc3/distributions/continuous.py b/pymc3/distributions/continuous.py index 03afa34474..490df563cd 100644 --- a/pymc3/distributions/continuous.py +++ b/pymc3/distributions/continuous.py @@ -44,7 +44,6 @@ from ..math import invlogit, logit, logdiffexp from pymc3.theanof import floatX -from pymc3.util import get_variable_name from scipy import stats from scipy.special import expit from scipy.interpolate import InterpolatedUnivariateSpline @@ -1322,7 +1321,7 @@ def logcdf(self, value): def _distr_parameters_for_repr(self): return ["alpha", "beta"] - + class Kumaraswamy(UnitContinuous): r"""