@@ -230,7 +230,11 @@ def build_model(distfam, valuedomain, vardomains, extra_args=None):
230
230
v_at .name = v
231
231
param_vars [v ] = v_at
232
232
param_vars .update (extra_args )
233
- distfam ("value" , ** param_vars , transform = None )
233
+ distfam (
234
+ "value" ,
235
+ ** param_vars ,
236
+ transform = None ,
237
+ )
234
238
return m , param_vars
235
239
236
240
@@ -1473,6 +1477,9 @@ def test_beta_binomial(self):
1473
1477
{"alpha" : Rplus , "beta" : Rplus , "n" : NatSmall },
1474
1478
lambda value , alpha , beta , n : sp .betabinom .logcdf (value , a = alpha , b = beta , n = n ),
1475
1479
)
1480
+
1481
+ @pytest .mark .xfail (reason = "Distribution not refactored yet" )
1482
+ def test_beta_binomial_selfconsistency (self ):
1476
1483
self .check_selfconsistency_discrete_logcdf (
1477
1484
BetaBinomial ,
1478
1485
Nat ,
@@ -1611,14 +1618,14 @@ def test_zeroinflatedbinomial(self):
1611
1618
n_samples = 10 ,
1612
1619
)
1613
1620
1614
- @pytest .mark .xfail (reason = "Distribution not refactored yet" )
1615
1621
@pytest .mark .parametrize ("n" , [1 , 2 , 3 ])
1616
1622
def test_mvnormal (self , n ):
1617
1623
self .check_logp (
1618
1624
MvNormal ,
1619
1625
RealMatrix (5 , n ),
1620
1626
{"mu" : Vector (R , n ), "tau" : PdMatrix (n )},
1621
1627
normal_logpdf_tau ,
1628
+ extra_args = {"size" : 5 },
1622
1629
)
1623
1630
self .check_logp (
1624
1631
MvNormal ,
@@ -1631,6 +1638,7 @@ def test_mvnormal(self, n):
1631
1638
RealMatrix (5 , n ),
1632
1639
{"mu" : Vector (R , n ), "cov" : PdMatrix (n )},
1633
1640
normal_logpdf_cov ,
1641
+ extra_args = {"size" : 5 },
1634
1642
)
1635
1643
self .check_logp (
1636
1644
MvNormal ,
@@ -1644,6 +1652,7 @@ def test_mvnormal(self, n):
1644
1652
{"mu" : Vector (R , n ), "chol" : PdMatrixChol (n )},
1645
1653
normal_logpdf_chol ,
1646
1654
decimal = select_by_precision (float64 = 6 , float32 = - 1 ),
1655
+ extra_args = {"size" : 5 },
1647
1656
)
1648
1657
self .check_logp (
1649
1658
MvNormal ,
@@ -1652,23 +1661,19 @@ def test_mvnormal(self, n):
1652
1661
normal_logpdf_chol ,
1653
1662
decimal = select_by_precision (float64 = 6 , float32 = 0 ),
1654
1663
)
1655
-
1656
- def MvNormalUpper (* args , ** kwargs ):
1657
- return MvNormal (lower = False , * args , ** kwargs )
1658
-
1659
1664
self .check_logp (
1660
- MvNormalUpper ,
1665
+ MvNormal ,
1661
1666
Vector (R , n ),
1662
1667
{"mu" : Vector (R , n ), "chol" : PdMatrixCholUpper (n )},
1663
1668
normal_logpdf_chol_upper ,
1664
1669
decimal = select_by_precision (float64 = 6 , float32 = 0 ),
1670
+ extra_args = {"lower" : False },
1665
1671
)
1666
1672
1667
1673
@pytest .mark .xfail (
1668
1674
condition = (aesara .config .floatX == "float32" ),
1669
1675
reason = "Fails on float32 due to inf issues" ,
1670
1676
)
1671
- @pytest .mark .xfail (reason = "Distribution not refactored yet" )
1672
1677
def test_mvnormal_indef (self ):
1673
1678
cov_val = np .array ([[1 , 0.5 ], [0.5 , - 2 ]])
1674
1679
cov = aet .matrix ("cov" )
@@ -1683,14 +1688,13 @@ def test_mvnormal_indef(self):
1683
1688
f_dlogp = aesara .function ([cov , x ], dlogp )
1684
1689
assert not np .all (np .isfinite (f_dlogp (cov_val , np .ones (2 ))))
1685
1690
1686
- logp = logp (MvNormal .dist (mu = mu , tau = cov ), x )
1691
+ logp = logpt (MvNormal .dist (mu = mu , tau = cov ), x )
1687
1692
f_logp = aesara .function ([cov , x ], logp )
1688
1693
assert f_logp (cov_val , np .ones (2 )) == - np .inf
1689
1694
dlogp = aet .grad (logp , cov )
1690
1695
f_dlogp = aesara .function ([cov , x ], dlogp )
1691
1696
assert not np .all (np .isfinite (f_dlogp (cov_val , np .ones (2 ))))
1692
1697
1693
- @pytest .mark .xfail (reason = "Distribution not refactored yet" )
1694
1698
def test_mvnormal_init_fail (self ):
1695
1699
with Model ():
1696
1700
with pytest .raises (ValueError ):
0 commit comments