Skip to content

Formatted 15 Files of test Dir #4149

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Oct 5, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 5 additions & 7 deletions pymc3/distributions/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,9 +134,7 @@ def assert_negative_support(var, label, distname, value=-1e-6):
support = False

if np.any(support):
msg = "The variable specified for {} has negative support for {}, ".format(
label, distname
)
msg = f"The variable specified for {label} has negative support for {distname}, "
msg += "likely making it unsuitable for this parameter."
warnings.warn(msg)

Expand Down Expand Up @@ -712,7 +710,7 @@ def random(self, point=None, size=None):
)

def _random(self, mu, sigma, lower, upper, size):
""" Wrapper around stats.truncnorm.rvs that converts TruncatedNormal's
"""Wrapper around stats.truncnorm.rvs that converts TruncatedNormal's
parametrization to scipy.truncnorm. All parameter arrays should have
been broadcasted properly by generate_samples at this point and size is
the scipy.rvs representation.
Expand Down Expand Up @@ -3447,7 +3445,7 @@ def random(self, point=None, size=None):
)

def _random(self, c, lower, upper, size):
""" Wrapper around stats.triang.rvs that converts Triangular's
"""Wrapper around stats.triang.rvs that converts Triangular's
parametrization to scipy.triang. All parameter arrays should have
been broadcasted properly by generate_samples at this point and size is
the scipy.rvs representation.
Expand Down Expand Up @@ -3706,7 +3704,7 @@ def __init__(self, nu=None, sigma=None, b=None, sd=None, *args, **kwargs):
self.sigma = self.sd = sigma = tt.as_tensor_variable(floatX(sigma))
self.b = b = tt.as_tensor_variable(floatX(b))

nu_sigma_ratio = -nu ** 2 / (2 * sigma ** 2)
nu_sigma_ratio = -(nu ** 2) / (2 * sigma ** 2)
self.mean = (
sigma
* np.sqrt(np.pi / 2)
Expand Down Expand Up @@ -3762,7 +3760,7 @@ def random(self, point=None, size=None):
return generate_samples(self._random, nu=nu, sigma=sigma, dist_shape=self.shape, size=size)

def _random(self, nu, sigma, size):
""" Wrapper around stats.rice.rvs that converts Rice's
"""Wrapper around stats.rice.rvs that converts Rice's
parametrization to scipy.rice. All parameter arrays should have
been broadcasted properly by generate_samples at this point and size is
the scipy.rvs representation.
Expand Down
218 changes: 108 additions & 110 deletions pymc3/tests/backend_fixtures.py

Large diffs are not rendered by default.

16 changes: 7 additions & 9 deletions pymc3/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,31 +20,29 @@

@pytest.fixture(scope="function", autouse=True)
def theano_config():
config = theano.configparser.change_flags(compute_test_value='raise')
config = theano.configparser.change_flags(compute_test_value="raise")
with config:
yield


@pytest.fixture(scope='function', autouse=True)
@pytest.fixture(scope="function", autouse=True)
def exception_verbosity():
config = theano.configparser.change_flags(
exception_verbosity='high')
config = theano.configparser.change_flags(exception_verbosity="high")
with config:
yield


@pytest.fixture(scope='function', autouse=False)
@pytest.fixture(scope="function", autouse=False)
def strict_float32():
if theano.config.floatX == 'float32':
config = theano.configparser.change_flags(
warn_float64='raise')
if theano.config.floatX == "float32":
config = theano.configparser.change_flags(warn_float64="raise")
with config:
yield
else:
yield


@pytest.fixture(scope='function', autouse=False)
@pytest.fixture(scope="function", autouse=False)
def seeded_test():
# TODO: use this instead of SeededTest
np.random.seed(42)
Expand Down
4 changes: 2 additions & 2 deletions pymc3/tests/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def matches(self, **kwargs):

class Matcher:

_partial_matches = ('msg', 'message')
_partial_matches = ("msg", "message")

def matches(self, d, **kwargs):
"""
Expand All @@ -91,7 +91,7 @@ def match_value(self, k, dv, v):
if isinstance(v, type(dv)):
result = False
elif not isinstance(dv, str) or k not in self._partial_matches:
result = (v == dv)
result = v == dv
else:
result = dv.find(v) >= 0
return result
Expand Down
67 changes: 34 additions & 33 deletions pymc3/tests/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,16 @@ def simple_model():
mu = -2.1
tau = 1.3
with Model() as model:
Normal('x', mu, tau=tau, shape=2, testval=tt.ones(2) * .1)
Normal("x", mu, tau=tau, shape=2, testval=tt.ones(2) * 0.1)

return model.test_point, model, (mu, tau ** -.5)
return model.test_point, model, (mu, tau ** -0.5)


def simple_categorical():
p = floatX_array([0.1, 0.2, 0.3, 0.4])
v = floatX_array([0.0, 1.0, 2.0, 3.0])
with Model() as model:
Categorical('x', p, shape=3, testval=[1, 2, 3])
Categorical("x", p, shape=3, testval=[1, 2, 3])

mu = np.dot(p, v)
var = np.dot(p, (v - mu) ** 2)
Expand All @@ -46,9 +46,9 @@ def multidimensional_model():
mu = -2.1
tau = 1.3
with Model() as model:
Normal('x', mu, tau=tau, shape=(3, 2), testval=.1 * tt.ones((3, 2)))
Normal("x", mu, tau=tau, shape=(3, 2), testval=0.1 * tt.ones((3, 2)))

return model.test_point, model, (mu, tau ** -.5)
return model.test_point, model, (mu, tau ** -0.5)


def simple_arbitrary_det():
Expand All @@ -59,50 +59,52 @@ def arbitrary_det(value):
return value

with Model() as model:
a = Normal('a')
a = Normal("a")
b = arbitrary_det(a)
Normal('obs', mu=b.astype('float64'), observed=floatX_array([1, 3, 5]))
Normal("obs", mu=b.astype("float64"), observed=floatX_array([1, 3, 5]))

return model.test_point, model


def simple_init():
start, model, moments = simple_model()
step = Metropolis(model.vars, np.diag([1.]), model=model)
step = Metropolis(model.vars, np.diag([1.0]), model=model)
return model, start, step, moments


def simple_2model():
mu = -2.1
tau = 1.3
p = .4
p = 0.4
with Model() as model:
x = pm.Normal('x', mu, tau=tau, testval=.1)
pm.Deterministic('logx', tt.log(x))
pm.Bernoulli('y', p)
x = pm.Normal("x", mu, tau=tau, testval=0.1)
pm.Deterministic("logx", tt.log(x))
pm.Bernoulli("y", p)
return model.test_point, model


def simple_2model_continuous():
mu = -2.1
tau = 1.3
with Model() as model:
x = pm.Normal('x', mu, tau=tau, testval=.1)
pm.Deterministic('logx', tt.log(x))
pm.Beta('y', alpha=1, beta=1, shape=2)
x = pm.Normal("x", mu, tau=tau, testval=0.1)
pm.Deterministic("logx", tt.log(x))
pm.Beta("y", alpha=1, beta=1, shape=2)
return model.test_point, model


def mv_simple():
mu = floatX_array([-.1, .5, 1.1])
p = floatX_array([
[2., 0, 0],
[.05, .1, 0],
[1., -0.05, 5.5]])
mu = floatX_array([-0.1, 0.5, 1.1])
p = floatX_array([[2.0, 0, 0], [0.05, 0.1, 0], [1.0, -0.05, 5.5]])
tau = np.dot(p, p.T)
with pm.Model() as model:
pm.MvNormal('x', tt.constant(mu), tau=tt.constant(tau),
shape=3, testval=floatX_array([.1, 1., .8]))
pm.MvNormal(
"x",
tt.constant(mu),
tau=tt.constant(tau),
shape=3,
testval=floatX_array([0.1, 1.0, 0.8]),
)
H = tau
C = np.linalg.inv(H)
return model.test_point, model, (mu, C)
Expand Down Expand Up @@ -145,9 +147,9 @@ def mv_simple_very_coarse():
def mv_simple_discrete():
d = 2
n = 5
p = floatX_array([.15, .85])
p = floatX_array([0.15, 0.85])
with pm.Model() as model:
pm.Multinomial('x', n, tt.constant(p), shape=d, testval=np.array([1, 4]))
pm.Multinomial("x", n, tt.constant(p), shape=d, testval=np.array([1, 4]))
mu = n * p
# covariance matrix
C = np.zeros((d, d))
Expand Down Expand Up @@ -180,30 +182,29 @@ def mv_prior_simple():
std_post = (K - np.dot(v.T, v)).diagonal() ** 0.5

with pm.Model() as model:
x = pm.Flat('x', shape=n)
x_obs = pm.MvNormal('x_obs', observed=obs, mu=x,
cov=noise * np.eye(n), shape=n)
x = pm.Flat("x", shape=n)
x_obs = pm.MvNormal("x_obs", observed=obs, mu=x, cov=noise * np.eye(n), shape=n)

return model.test_point, model, (K, L, mu_post, std_post, noise)


def non_normal(n=2):
with pm.Model() as model:
pm.Beta('x', 3, 3, shape=n, transform=None)
return model.test_point, model, (np.tile([.5], n), None)
pm.Beta("x", 3, 3, shape=n, transform=None)
return model.test_point, model, (np.tile([0.5], n), None)


def exponential_beta(n=2):
with pm.Model() as model:
pm.Beta('x', 3, 1, shape=n, transform=None)
pm.Exponential('y', 1, shape=n, transform=None)
pm.Beta("x", 3, 1, shape=n, transform=None)
pm.Exponential("y", 1, shape=n, transform=None)
return model.test_point, model, None


def beta_bernoulli(n=2):
with pm.Model() as model:
pm.Beta('x', 3, 1, shape=n, transform=None)
pm.Bernoulli('y', 0.5)
pm.Beta("x", 3, 1, shape=n, transform=None)
pm.Bernoulli("y", 0.5)
return model.test_point, model, None


Expand Down
Loading