-
-
Notifications
You must be signed in to change notification settings - Fork 2.1k
Improve sampling coverage #4270
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -13,14 +13,9 @@ | |
# limitations under the License. | ||
|
||
from itertools import combinations | ||
import packaging | ||
from typing import Tuple | ||
import numpy as np | ||
|
||
try: | ||
import unittest.mock as mock # py3 | ||
except ImportError: | ||
from unittest import mock | ||
import unittest.mock as mock | ||
michaelosthege marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
import numpy.testing as npt | ||
import arviz as az | ||
|
@@ -180,13 +175,9 @@ def test_trace_report_bart(self): | |
assert var_imp[0] > var_imp[1:].sum() | ||
npt.assert_almost_equal(var_imp.sum(), 1) | ||
|
||
def test_return_inferencedata(self): | ||
def test_return_inferencedata(self, monkeypatch): | ||
michaelosthege marked this conversation as resolved.
Show resolved
Hide resolved
|
||
with self.model: | ||
kwargs = dict(draws=100, tune=50, cores=1, chains=2, step=pm.Metropolis()) | ||
v = packaging.version.parse(pm.__version__) | ||
if v.major > 3 or v.minor >= 10: | ||
with pytest.warns(FutureWarning, match="pass return_inferencedata"): | ||
result = pm.sample(**kwargs) | ||
|
||
# trace with tuning | ||
with pytest.warns(UserWarning, match="will be included"): | ||
|
@@ -203,12 +194,25 @@ def test_return_inferencedata(self): | |
assert result.posterior.sizes["chain"] == 2 | ||
assert len(result._groups_warmup) > 0 | ||
|
||
# inferencedata without tuning | ||
result = pm.sample(**kwargs, return_inferencedata=True, discard_tuned_samples=True) | ||
# inferencedata without tuning, with idata_kwargs | ||
prior = pm.sample_prior_predictive() | ||
result = pm.sample( | ||
**kwargs, | ||
return_inferencedata=True, | ||
discard_tuned_samples=True, | ||
idata_kwargs={"prior": prior}, | ||
random_seed=-1 | ||
) | ||
assert "prior" in result | ||
michaelosthege marked this conversation as resolved.
Show resolved
Hide resolved
|
||
assert isinstance(result, az.InferenceData) | ||
assert result.posterior.sizes["draw"] == 100 | ||
assert result.posterior.sizes["chain"] == 2 | ||
assert len(result._groups_warmup) == 0 | ||
|
||
# check warning for version 3.10 onwards | ||
monkeypatch.setattr("pymc3.__version__", "3.10") | ||
with pytest.warns(FutureWarning, match="pass return_inferencedata"): | ||
result = pm.sample(**kwargs) | ||
pass | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. unnecessary There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I like to put them there to mark the end of a test/function (just like an unnecessary In my opinion they have a few advantages:
Not saying you must put it back in - just showing a maybe new perspective :) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To me that looks weird and confusing. |
||
|
||
@pytest.mark.parametrize("cores", [1, 2]) | ||
|
Uh oh!
There was an error while loading. Please reload this page.