Skip to content

Renamed model.recompute_initial_point to model.compute_initial_point #5360

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Jan 17, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added .coverage
Binary file not shown.
4 changes: 2 additions & 2 deletions pymc/backends/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ def __init__(self, name, model=None, vars=None, test_point=None):
# Get variable shapes. Most backends will need this
# information.
if test_point is None:
test_point = model.recompute_initial_point()
test_point = model.compute_initial_point()
else:
test_point_ = model.recompute_initial_point().copy()
test_point_ = model.compute_initial_point().copy()
test_point_.update(test_point)
test_point = test_point_
var_values = list(zip(self.varnames, self.fn(test_point)))
Expand Down
2 changes: 1 addition & 1 deletion pymc/bart/pgbart.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class PGBART(ArrayStepShared):
def __init__(self, vars=None, num_particles=40, max_stages=100, batch="auto", model=None):
_log.warning("BART is experimental. Use with caution.")
model = modelcontext(model)
initial_values = model.recompute_initial_point()
initial_values = model.compute_initial_point()
value_bart = inputvars(vars)[0]
self.bart = model.values_to_rvs[value_bart].owner.op

Expand Down
22 changes: 11 additions & 11 deletions pymc/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,7 @@ def logp_dlogp_function(self, grad_vars=None, tempered=False, **kwargs):

input_vars = {i for i in graph_inputs(costs) if not isinstance(i, Constant)}
extra_vars = [self.rvs_to_values.get(var, var) for var in self.free_RVs]
ip = self.recompute_initial_point(0)
ip = self.compute_initial_point(0)
extra_vars_and_values = {
var: ip[var.name] for var in extra_vars if var in input_vars and var not in grad_vars
}
Expand Down Expand Up @@ -987,24 +987,24 @@ def cont_vars(self):

@property
def test_point(self) -> Dict[str, np.ndarray]:
"""Deprecated alias for `Model.recompute_initial_point(seed=None)`."""
"""Deprecated alias for `Model.compute_initial_point(seed=None)`."""
warnings.warn(
"`Model.test_point` has been deprecated. Use `Model.recompute_initial_point(seed=None)`.",
"`Model.test_point` has been deprecated. Use `Model.compute_initial_point(seed=None)`.",
FutureWarning,
)
return self.recompute_initial_point()
return self.compute_initial_point()

@property
def initial_point(self) -> Dict[str, np.ndarray]:
"""Deprecated alias for `Model.recompute_initial_point(seed=None)`."""
"""Deprecated alias for `Model.compute_initial_point(seed=None)`."""
warnings.warn(
"`Model.initial_point` has been deprecated. Use `Model.recompute_initial_point(seed=None)`.",
"`Model.initial_point` has been deprecated. Use `Model.compute_initial_point(seed=None)`.",
FutureWarning,
)
return self.recompute_initial_point()
return self.compute_initial_point()

def recompute_initial_point(self, seed=None) -> Dict[str, np.ndarray]:
"""Recomputes the initial point of the model.
def compute_initial_point(self, seed=None) -> Dict[str, np.ndarray]:
"""Computes the initial point of the model.

Returns
-------
Expand Down Expand Up @@ -1540,7 +1540,7 @@ def profile(self, outs, *, n=1000, point=None, profile=True, **kwargs):
kwargs.setdefault("on_unused_input", "ignore")
f = self.compile_fn(outs, inputs=self.value_vars, point_fn=False, profile=profile, **kwargs)
if point is None:
point = self.recompute_initial_point()
point = self.compute_initial_point()

for _ in range(n):
f(**point)
Expand Down Expand Up @@ -1699,7 +1699,7 @@ def point_logps(self, point=None, round_vals=2):
Pandas Series
"""
if point is None:
point = self.recompute_initial_point()
point = self.compute_initial_point()

factors = self.basic_RVs + self.potentials
return Series(
Expand Down
2 changes: 1 addition & 1 deletion pymc/sampling_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def sample_numpyro_nuts(
print("Compiling...", file=sys.stdout)

rv_names = [rv.name for rv in model.value_vars]
initial_point = model.recompute_initial_point()
initial_point = model.compute_initial_point()
init_state = [initial_point[rv_name] for rv_name in rv_names]
init_state_batched = jax.tree_map(lambda x: np.repeat(x[None, ...], chains, axis=0), init_state)

Expand Down
2 changes: 1 addition & 1 deletion pymc/smc/smc.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def _initialize_kernel(self):

"""
# Create dictionary that stores original variables shape and size
initial_point = self.model.recompute_initial_point(seed=self.rng.integers(2 ** 30))
initial_point = self.model.compute_initial_point(seed=self.rng.integers(2 ** 30))
for v in self.variables:
self.var_info[v.name] = (initial_point[v.name].shape, initial_point[v.name].size)
# Create particles bijection map
Expand Down
2 changes: 1 addition & 1 deletion pymc/step_methods/hmc/base_hmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def __init__(
# size.
# XXX: If the dimensions of these terms change, the step size
# dimension-scaling should change as well, no?
test_point = self._model.recompute_initial_point()
test_point = self._model.compute_initial_point()

nuts_vars = [test_point[v.name] for v in vars]
size = sum(v.size for v in nuts_vars)
Expand Down
10 changes: 5 additions & 5 deletions pymc/step_methods/metropolis.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def __init__(
"""

model = pm.modelcontext(model)
initial_values = model.recompute_initial_point()
initial_values = model.compute_initial_point()

if vars is None:
vars = model.value_vars
Expand Down Expand Up @@ -425,7 +425,7 @@ def __init__(self, vars, order="random", transit_p=0.8, model=None):
# transition probabilities
self.transit_p = transit_p

initial_point = model.recompute_initial_point()
initial_point = model.compute_initial_point()
vars = [model.rvs_to_values.get(var, var) for var in vars]
self.dim = sum(initial_point[v.name].size for v in vars)

Expand Down Expand Up @@ -510,7 +510,7 @@ def __init__(self, vars, proposal="uniform", order="random", model=None):
vars = [model.rvs_to_values.get(var, var) for var in vars]
vars = pm.inputvars(vars)

initial_point = model.recompute_initial_point()
initial_point = model.compute_initial_point()

dimcats = []
# The above variable is a list of pairs (aggregate dimension, number
Expand Down Expand Up @@ -712,7 +712,7 @@ def __init__(
):

model = pm.modelcontext(model)
initial_values = model.recompute_initial_point()
initial_values = model.compute_initial_point()
initial_values_size = sum(initial_values[n.name].size for n in model.value_vars)

if vars is None:
Expand Down Expand Up @@ -863,7 +863,7 @@ def __init__(
**kwargs
):
model = pm.modelcontext(model)
initial_values = model.recompute_initial_point()
initial_values = model.compute_initial_point()
initial_values_size = sum(initial_values[n.name].size for n in model.value_vars)

if vars is None:
Expand Down
6 changes: 3 additions & 3 deletions pymc/step_methods/mlda.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(self, *args, **kwargs):
and some extra code specific for MLDA.
"""
model = pm.modelcontext(kwargs.get("model", None))
initial_values = model.recompute_initial_point()
initial_values = model.compute_initial_point()

# flag to that variance reduction is activated - forces MetropolisMLDA
# to store quantities of interest in a register if True
Expand Down Expand Up @@ -114,7 +114,7 @@ def __init__(self, *args, **kwargs):
self.tuning_end_trigger = False

model = pm.modelcontext(kwargs.get("model", None))
initial_values = model.recompute_initial_point()
initial_values = model.compute_initial_point()

# flag to that variance reduction is activated - forces DEMetropolisZMLDA
# to store quantities of interest in a register if True
Expand Down Expand Up @@ -381,7 +381,7 @@ def __init__(

# assign internal state
model = pm.modelcontext(model)
initial_values = model.recompute_initial_point()
initial_values = model.compute_initial_point()
self.model = model
self.coarse_models = coarse_models
self.model_below = self.coarse_models[-1]
Expand Down
30 changes: 15 additions & 15 deletions pymc/tests/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def simple_model():
with Model() as model:
Normal("x", mu, tau=tau, size=2, initval=floatX_array([0.1, 0.1]))

return model.recompute_initial_point(), model, (mu, tau ** -0.5)
return model.compute_initial_point(), model, (mu, tau ** -0.5)


def simple_categorical():
Expand All @@ -43,7 +43,7 @@ def simple_categorical():

mu = np.dot(p, v)
var = np.dot(p, (v - mu) ** 2)
return model.recompute_initial_point(), model, (mu, var)
return model.compute_initial_point(), model, (mu, var)


def multidimensional_model():
Expand All @@ -52,7 +52,7 @@ def multidimensional_model():
with Model() as model:
Normal("x", mu, tau=tau, size=(3, 2), initval=0.1 * np.ones((3, 2)))

return model.recompute_initial_point(), model, (mu, tau ** -0.5)
return model.compute_initial_point(), model, (mu, tau ** -0.5)


def simple_arbitrary_det():
Expand All @@ -67,7 +67,7 @@ def arbitrary_det(value):
b = arbitrary_det(a)
Normal("obs", mu=b.astype("float64"), observed=floatX_array([1, 3, 5]))

return model.recompute_initial_point(), model
return model.compute_initial_point(), model


def simple_init():
Expand All @@ -84,7 +84,7 @@ def simple_2model():
x = pm.Normal("x", mu, tau=tau, initval=0.1)
pm.Deterministic("logx", at.log(x))
pm.Bernoulli("y", p)
return model.recompute_initial_point(), model
return model.compute_initial_point(), model


def simple_2model_continuous():
Expand All @@ -94,7 +94,7 @@ def simple_2model_continuous():
x = pm.Normal("x", mu, tau=tau, initval=0.1)
pm.Deterministic("logx", at.log(x))
pm.Beta("y", alpha=1, beta=1, size=2)
return model.recompute_initial_point(), model
return model.compute_initial_point(), model


def mv_simple():
Expand All @@ -110,7 +110,7 @@ def mv_simple():
)
H = tau
C = np.linalg.inv(H)
return model.recompute_initial_point(), model, (mu, C)
return model.compute_initial_point(), model, (mu, C)


def mv_simple_coarse():
Expand All @@ -126,7 +126,7 @@ def mv_simple_coarse():
)
H = tau
C = np.linalg.inv(H)
return model.recompute_initial_point(), model, (mu, C)
return model.compute_initial_point(), model, (mu, C)


def mv_simple_very_coarse():
Expand All @@ -142,7 +142,7 @@ def mv_simple_very_coarse():
)
H = tau
C = np.linalg.inv(H)
return model.recompute_initial_point(), model, (mu, C)
return model.compute_initial_point(), model, (mu, C)


def mv_simple_discrete():
Expand All @@ -160,7 +160,7 @@ def mv_simple_discrete():
else:
C[i, j] = -n * p[i] * p[j]

return model.recompute_initial_point(), model, (mu, C)
return model.compute_initial_point(), model, (mu, C)


def mv_prior_simple():
Expand All @@ -186,27 +186,27 @@ def mv_prior_simple():
x = pm.Flat("x", size=n)
x_obs = pm.MvNormal("x_obs", observed=obs, mu=x, cov=noise * np.eye(n))

return model.recompute_initial_point(), model, (K, L, mu_post, std_post, noise)
return model.compute_initial_point(), model, (K, L, mu_post, std_post, noise)


def non_normal(n=2):
with pm.Model() as model:
pm.Beta("x", 3, 3, size=n, transform=None)
return model.recompute_initial_point(), model, (np.tile([0.5], n), None)
return model.compute_initial_point(), model, (np.tile([0.5], n), None)


def exponential_beta(n=2):
with pm.Model() as model:
pm.Beta("x", 3, 1, size=n, transform=None)
pm.Exponential("y", 1, size=n, transform=None)
return model.recompute_initial_point(), model, None
return model.compute_initial_point(), model, None


def beta_bernoulli(n=2):
with pm.Model() as model:
pm.Beta("x", 3, 1, size=n, transform=None)
pm.Bernoulli("y", 0.5)
return model.recompute_initial_point(), model, None
return model.compute_initial_point(), model, None


def simple_normal(bounded_prior=False):
Expand All @@ -222,4 +222,4 @@ def simple_normal(bounded_prior=False):
mu_i = pm.Flat("mu_i")
pm.Normal("X_obs", mu=mu_i, sigma=sd, observed=x0)

return model.recompute_initial_point(), model, None
return model.compute_initial_point(), model, None
2 changes: 1 addition & 1 deletion pymc/tests/test_aesaraf.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def test_make_shared_replacements(self):

# Replace test1 with a shared variable, keep test 2 the same
replacement = pm.make_shared_replacements(
test_model.recompute_initial_point(), [test_model.test2], test_model
test_model.compute_initial_point(), [test_model.test2], test_model
)
assert (
test_model.test1.broadcastable
Expand Down
2 changes: 1 addition & 1 deletion pymc/tests/test_data_container.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_deterministic(self):
with pm.Model() as model:
X = pm.MutableData("X", data_values)
pm.Normal("y", 0, 1, observed=X)
model.compile_logp()(model.recompute_initial_point())
model.compile_logp()(model.compute_initial_point())

def test_sample(self):
x = np.random.normal(size=100)
Expand Down
2 changes: 1 addition & 1 deletion pymc/tests/test_distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2670,7 +2670,7 @@ def test_bound_shapes(self):
bound_shaped = Bound("boundedshaped", dist, lower=1, upper=10, shape=(3, 5))
bound_dims = Bound("boundeddims", dist, lower=1, upper=10, dims="sample")

initial_point = m.recompute_initial_point()
initial_point = m.compute_initial_point()
dist_size = initial_point["boundedsized_interval__"].shape
dist_shape = initial_point["boundedshaped_interval__"].shape
dist_dims = initial_point["boundeddims_interval__"].shape
Expand Down
2 changes: 1 addition & 1 deletion pymc/tests/test_distributions_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -1810,7 +1810,7 @@ def test_mixture_random_shape():
assert rand3.shape == (100, 20)

with m:
ppc = pm.sample_posterior_predictive([m.recompute_initial_point()], samples=200)
ppc = pm.sample_posterior_predictive([m.compute_initial_point()], samples=200)
assert ppc["like0"].shape == (200, 20)
assert ppc["like1"].shape == (200, 20)
assert ppc["like2"].shape == (200, 20)
Expand Down
4 changes: 2 additions & 2 deletions pymc/tests/test_distributions_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def test_AR():
rho = Normal("rho", 0.0, 1.0)
y1 = AR1("y1", rho, 1.0, observed=data)
y2 = AR("y2", rho, 1.0, init=Normal.dist(0, 1), observed=data)
initial_point = t.recompute_initial_point()
initial_point = t.compute_initial_point()
np.testing.assert_allclose(y1.logp(initial_point), y2.logp(initial_point))

# AR1 + constant
Expand Down Expand Up @@ -78,7 +78,7 @@ def test_AR_nd():
AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, initval=y_tp[:, i])

np.testing.assert_allclose(
t0.logp(t0.recompute_initial_point()), t1.logp(t1.recompute_initial_point())
t0.logp(t0.compute_initial_point()), t1.logp(t1.compute_initial_point())
)


Expand Down
Loading