Skip to content

Formatted Next 15 Files #4160

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 8, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 4 additions & 12 deletions pymc3/distributions/discrete.py
Original file line number Diff line number Diff line change
Expand Up @@ -644,25 +644,17 @@ def get_mu_alpha(self, mu=None, alpha=None, p=None, n=None):
if n is not None:
alpha = n
else:
raise ValueError(
"Incompatible parametrization. Must specify either alpha or n."
)
raise ValueError("Incompatible parametrization. Must specify either alpha or n.")
elif n is not None:
raise ValueError(
"Incompatible parametrization. Can't specify both alpha and n."
)
raise ValueError("Incompatible parametrization. Can't specify both alpha and n.")

if mu is None:
if p is not None:
mu = alpha * (1 - p) / p
else:
raise ValueError(
"Incompatible parametrization. Must specify either mu or p."
)
raise ValueError("Incompatible parametrization. Must specify either mu or p.")
elif p is not None:
raise ValueError(
"Incompatible parametrization. Can't specify both mu and p."
)
raise ValueError("Incompatible parametrization. Can't specify both mu and p.")

return mu, alpha

Expand Down
10 changes: 2 additions & 8 deletions pymc3/distributions/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,21 +179,15 @@ def _str_repr(self, name=None, dist=None, formatting="plain"):

if formatting == "latex":
param_string = ",~".join(
[
fr"\mathit{{{name}}}={value}"
for name, value in zip(param_names, param_values)
]
[fr"\mathit{{{name}}}={value}" for name, value in zip(param_names, param_values)]
)
return r"$\text{{{var_name}}} \sim \text{{{distr_name}}}({params})$".format(
var_name=name, distr_name=dist._distr_name_for_repr(), params=param_string
)
else:
# 'plain' is default option
param_string = ", ".join(
[
f"{name}={value}"
for name, value in zip(param_names, param_values)
]
[f"{name}={value}" for name, value in zip(param_names, param_values)]
)
return "{var_name} ~ {distr_name}({params})".format(
var_name=name, distr_name=dist._distr_name_for_repr(), params=param_string
Expand Down
9 changes: 5 additions & 4 deletions pymc3/distributions/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,8 +440,9 @@ class StickBreaking(Transform):

def __init__(self, eps=None):
if eps is not None:
warnings.warn("The argument `eps` is deprecated and will not be used.",
DeprecationWarning)
warnings.warn(
"The argument `eps` is deprecated and will not be used.", DeprecationWarning
)

def forward(self, x_):
x = x_.T
Expand Down Expand Up @@ -471,9 +472,9 @@ def jacobian_det(self, y_):
y = y_.T
Km1 = y.shape[0] + 1
sy = tt.sum(y, 0, keepdims=True)
r = tt.concatenate([y+sy, tt.zeros(sy.shape)])
r = tt.concatenate([y + sy, tt.zeros(sy.shape)])
sr = logsumexp(r, 0, keepdims=True)
d = tt.log(Km1) + (Km1*sy) - (Km1*sr)
d = tt.log(Km1) + (Km1 * sy) - (Km1 * sr)
return tt.sum(d, 0).T


Expand Down
61 changes: 22 additions & 39 deletions pymc3/gp/cov.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,13 @@ def full(self, X, Xs):

def _slice(self, X, Xs):
if self.input_dim != X.shape[-1]:
warnings.warn(f"Only {self.input_dim} column(s) out of {X.shape[-1]} are"
" being used to compute the covariance function. If this"
" is not intended, increase 'input_dim' parameter to"
" the number of columns to use. Ignore otherwise.", UserWarning)
warnings.warn(
f"Only {self.input_dim} column(s) out of {X.shape[-1]} are"
" being used to compute the covariance function. If this"
" is not intended, increase 'input_dim' parameter to"
" the number of columns to use. Ignore otherwise.",
UserWarning,
)
X = tt.as_tensor_variable(X[:, self.active_dims])
if Xs is not None:
Xs = tt.as_tensor_variable(Xs[:, self.active_dims])
Expand All @@ -109,9 +112,9 @@ def __rmul__(self, other):
return self.__mul__(other)

def __pow__(self, other):
if(
isinstance(other, theano.compile.SharedVariable) and
other.get_value().squeeze().shape == ()
if (
isinstance(other, theano.compile.SharedVariable)
and other.get_value().squeeze().shape == ()
):
other = tt.squeeze(other)
return Exponentiated(self, other)
Expand All @@ -123,7 +126,6 @@ def __pow__(self, other):

raise ValueError("A covariance function can only be exponentiated by a scalar value")


def __array_wrap__(self, result):
"""
Required to allow radd/rmul by numpy arrays.
Expand All @@ -132,7 +134,9 @@ def __array_wrap__(self, result):
if len(result.shape) <= 1:
result = result.reshape(1, 1)
elif len(result.shape) > 2:
raise ValueError(f"cannot combine a covariance function with array of shape {result.shape}")
raise ValueError(
f"cannot combine a covariance function with array of shape {result.shape}"
)
r, c = result.shape
A = np.zeros((r, c))
for i in range(r):
Expand All @@ -149,11 +153,7 @@ def __array_wrap__(self, result):
class Combination(Covariance):
def __init__(self, factor_list):
input_dim = max(
[
factor.input_dim
for factor in factor_list
if isinstance(factor, Covariance)
]
[factor.input_dim for factor in factor_list if isinstance(factor, Covariance)]
)
super().__init__(input_dim=input_dim)
self.factor_list = []
Expand Down Expand Up @@ -205,10 +205,7 @@ class Exponentiated(Covariance):
def __init__(self, kernel, power):
self.kernel = kernel
self.power = power
super().__init__(
input_dim=self.kernel.input_dim,
active_dims=self.kernel.active_dims
)
super().__init__(input_dim=self.kernel.input_dim, active_dims=self.kernel.active_dims)

def __call__(self, X, Xs=None, diag=False):
return self.kernel(X, Xs, diag=diag) ** self.power
Expand Down Expand Up @@ -247,9 +244,7 @@ def _split(self, X, Xs):

def __call__(self, X, Xs=None, diag=False):
X_split, Xs_split = self._split(X, Xs)
covs = [
cov(x, xs, diag) for cov, x, xs in zip(self.factor_list, X_split, Xs_split)
]
covs = [cov(x, xs, diag) for cov, x, xs in zip(self.factor_list, X_split, Xs_split)]
return reduce(mul, covs)


Expand Down Expand Up @@ -431,9 +426,7 @@ class Matern52(Stationary):
def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
r = self.euclidean_dist(X, Xs)
return (1.0 + np.sqrt(5.0) * r + 5.0 / 3.0 * tt.square(r)) * tt.exp(
-1.0 * np.sqrt(5.0) * r
)
return (1.0 + np.sqrt(5.0) * r + 5.0 / 3.0 * tt.square(r)) * tt.exp(-1.0 * np.sqrt(5.0) * r)


class Matern32(Stationary):
Expand Down Expand Up @@ -605,14 +598,10 @@ def __init__(self, input_dim, lengthscale_func, args=None, active_dims=None):
super().__init__(input_dim, active_dims)
if active_dims is not None:
if len(active_dims) > 1:
raise NotImplementedError(
("Higher dimensional inputs ", "are untested")
)
raise NotImplementedError(("Higher dimensional inputs ", "are untested"))
else:
if input_dim != 1:
raise NotImplementedError(
("Higher dimensional inputs ", "are untested")
)
raise NotImplementedError(("Higher dimensional inputs ", "are untested"))
if not callable(lengthscale_func):
raise TypeError("lengthscale_func must be callable")
self.lfunc = handle_args(lengthscale_func, args)
Expand Down Expand Up @@ -642,9 +631,7 @@ def full(self, X, Xs=None):
r2 = self.square_dist(X, Xs)
rx2 = tt.reshape(tt.square(rx), (-1, 1))
rz2 = tt.reshape(tt.square(rz), (1, -1))
return tt.sqrt((2.0 * tt.outer(rx, rz)) / (rx2 + rz2)) * tt.exp(
-1.0 * r2 / (rx2 + rz2)
)
return tt.sqrt((2.0 * tt.outer(rx, rz)) / (rx2 + rz2)) * tt.exp(-1.0 * r2 / (rx2 + rz2))

def diag(self, X):
return tt.alloc(1.0, X.shape[0])
Expand Down Expand Up @@ -734,19 +721,15 @@ def __init__(self, input_dim, W=None, kappa=None, B=None, active_dims=None):
raise ValueError("Coregion requires exactly one dimension to be active")
make_B = W is not None or kappa is not None
if make_B and B is not None:
raise ValueError(
"Exactly one of (W, kappa) and B must be provided to Coregion"
)
raise ValueError("Exactly one of (W, kappa) and B must be provided to Coregion")
if make_B:
self.W = tt.as_tensor_variable(W)
self.kappa = tt.as_tensor_variable(kappa)
self.B = tt.dot(self.W, self.W.T) + tt.diag(self.kappa)
elif B is not None:
self.B = tt.as_tensor_variable(B)
else:
raise ValueError(
"Exactly one of (W, kappa) and B must be provided to Coregion"
)
raise ValueError("Exactly one of (W, kappa) and B must be provided to Coregion")

def full(self, X, Xs=None):
X, Xs = self._slice(X, Xs)
Expand Down
Loading