20
20
21
21
import aesara .tensor as at
22
22
import numpy as np
23
+ from typing import Union
23
24
24
25
from aesara .assert_op import Assert
26
+ from aesara .tensor .var import TensorVariable
27
+ from aesara .tensor .random .op import RandomVariable
25
28
from aesara .tensor .random .basic import (
26
29
BetaRV ,
27
30
cauchy ,
28
31
exponential ,
29
32
gamma ,
33
+ gumbel ,
30
34
halfcauchy ,
31
35
halfnormal ,
32
36
invgamma ,
@@ -258,11 +262,7 @@ def logcdf(value, lower, upper):
258
262
return at .switch (
259
263
at .lt (value , lower ) | at .lt (upper , lower ),
260
264
- np .inf ,
261
- at .switch (
262
- at .lt (value , upper ),
263
- at .log (value - lower ) - at .log (upper - lower ),
264
- 0 ,
265
- ),
265
+ at .switch (at .lt (value , upper ), at .log (value - lower ) - at .log (upper - lower ), 0 ,),
266
266
)
267
267
268
268
@@ -496,10 +496,7 @@ def logcdf(value, mu, sigma):
496
496
-------
497
497
TensorVariable
498
498
"""
499
- return bound (
500
- normal_lcdf (mu , sigma , value ),
501
- 0 < sigma ,
502
- )
499
+ return bound (normal_lcdf (mu , sigma , value ), 0 < sigma ,)
503
500
504
501
505
502
class TruncatedNormal (BoundedContinuous ):
@@ -833,11 +830,7 @@ def logcdf(value, loc, sigma):
833
830
TensorVariable
834
831
"""
835
832
z = zvalue (value , mu = loc , sigma = sigma )
836
- return bound (
837
- at .log1p (- at .erfc (z / at .sqrt (2.0 ))),
838
- loc <= value ,
839
- 0 < sigma ,
840
- )
833
+ return bound (at .log1p (- at .erfc (z / at .sqrt (2.0 ))), loc <= value , 0 < sigma ,)
841
834
842
835
def _distr_parameters_for_repr (self ):
843
836
return ["sigma" ]
@@ -1053,11 +1046,7 @@ def logcdf(self, value):
1053
1046
b = 2.0 / l + normal_lcdf (0 , 1 , - (q + 1.0 ) / r )
1054
1047
1055
1048
return bound (
1056
- at .switch (
1057
- at .lt (value , np .inf ),
1058
- a + log1pexp (b - a ),
1059
- 0 ,
1060
- ),
1049
+ at .switch (at .lt (value , np .inf ), a + log1pexp (b - a ), 0 ,),
1061
1050
0 < value ,
1062
1051
0 < mu ,
1063
1052
0 < lam ,
@@ -1219,11 +1208,7 @@ def logcdf(value, alpha, beta):
1219
1208
)
1220
1209
1221
1210
return bound (
1222
- at .switch (
1223
- at .lt (value , 1 ),
1224
- at .log (incomplete_beta (alpha , beta , value )),
1225
- 0 ,
1226
- ),
1211
+ at .switch (at .lt (value , 1 ), at .log (incomplete_beta (alpha , beta , value )), 0 ,),
1227
1212
0 <= value ,
1228
1213
0 < alpha ,
1229
1214
0 < beta ,
@@ -1418,11 +1403,7 @@ def logcdf(value, lam):
1418
1403
TensorVariable
1419
1404
"""
1420
1405
a = lam * value
1421
- return bound (
1422
- log1mexp (a ),
1423
- 0 <= value ,
1424
- 0 <= lam ,
1425
- )
1406
+ return bound (log1mexp (a ), 0 <= value , 0 <= lam ,)
1426
1407
1427
1408
1428
1409
class Laplace (Continuous ):
@@ -1538,11 +1519,7 @@ def logcdf(self, value):
1538
1519
at .switch (
1539
1520
at .le (value , a ),
1540
1521
at .log (0.5 ) + y ,
1541
- at .switch (
1542
- at .gt (y , 1 ),
1543
- at .log1p (- 0.5 * at .exp (- y )),
1544
- at .log (1 - 0.5 * at .exp (- y )),
1545
- ),
1522
+ at .switch (at .gt (y , 1 ), at .log1p (- 0.5 * at .exp (- y )), at .log (1 - 0.5 * at .exp (- y )),),
1546
1523
),
1547
1524
0 < b ,
1548
1525
)
@@ -1799,11 +1776,7 @@ def logcdf(self, value):
1799
1776
sigma = self .sigma
1800
1777
tau = self .tau
1801
1778
1802
- return bound (
1803
- normal_lcdf (mu , sigma , at .log (value )),
1804
- 0 < value ,
1805
- 0 < tau ,
1806
- )
1779
+ return bound (normal_lcdf (mu , sigma , at .log (value )), 0 < value , 0 < tau ,)
1807
1780
1808
1781
1809
1782
class StudentT (Continuous ):
@@ -1967,12 +1940,7 @@ def logcdf(self, value):
1967
1940
sqrt_t2_nu = at .sqrt (t ** 2 + nu )
1968
1941
x = (t + sqrt_t2_nu ) / (2.0 * sqrt_t2_nu )
1969
1942
1970
- return bound (
1971
- at .log (incomplete_beta (nu / 2.0 , nu / 2.0 , x )),
1972
- 0 < nu ,
1973
- 0 < sigma ,
1974
- 0 < lam ,
1975
- )
1943
+ return bound (at .log (incomplete_beta (nu / 2.0 , nu / 2.0 , x )), 0 < nu , 0 < sigma , 0 < lam ,)
1976
1944
1977
1945
1978
1946
class Pareto (Continuous ):
@@ -2107,11 +2075,7 @@ def logcdf(self, value):
2107
2075
alpha = self .alpha
2108
2076
arg = (m / value ) ** alpha
2109
2077
return bound (
2110
- at .switch (
2111
- at .le (arg , 1e-5 ),
2112
- at .log1p (- arg ),
2113
- at .log (1 - arg ),
2114
- ),
2078
+ at .switch (at .le (arg , 1e-5 ), at .log1p (- arg ), at .log (1 - arg ),),
2115
2079
m <= value ,
2116
2080
0 < alpha ,
2117
2081
0 < m ,
@@ -2209,10 +2173,7 @@ def logcdf(value, alpha, beta):
2209
2173
-------
2210
2174
TensorVariable
2211
2175
"""
2212
- return bound (
2213
- at .log (0.5 + at .arctan ((value - alpha ) / beta ) / np .pi ),
2214
- 0 < beta ,
2215
- )
2176
+ return bound (at .log (0.5 + at .arctan ((value - alpha ) / beta ) / np .pi ), 0 < beta ,)
2216
2177
2217
2178
2218
2179
class HalfCauchy (PositiveContinuous ):
@@ -2296,11 +2257,7 @@ def logcdf(value, loc, beta):
2296
2257
-------
2297
2258
TensorVariable
2298
2259
"""
2299
- return bound (
2300
- at .log (2 * at .arctan ((value - loc ) / beta ) / np .pi ),
2301
- loc <= value ,
2302
- 0 < beta ,
2303
- )
2260
+ return bound (at .log (2 * at .arctan ((value - loc ) / beta ) / np .pi ), loc <= value , 0 < beta ,)
2304
2261
2305
2262
2306
2263
class Gamma (PositiveContinuous ):
@@ -2768,12 +2725,7 @@ def logcdf(self, value):
2768
2725
alpha = self .alpha
2769
2726
beta = self .beta
2770
2727
a = (value / beta ) ** alpha
2771
- return bound (
2772
- log1mexp (a ),
2773
- 0 <= value ,
2774
- 0 < alpha ,
2775
- 0 < beta ,
2776
- )
2728
+ return bound (log1mexp (a ), 0 <= value , 0 < alpha , 0 < beta ,)
2777
2729
2778
2730
2779
2731
class HalfStudentT (PositiveContinuous ):
@@ -3532,43 +3484,29 @@ class Gumbel(Continuous):
3532
3484
beta: float
3533
3485
Scale parameter (beta > 0).
3534
3486
"""
3487
+ rv_op = gumbel
3535
3488
3536
- def __init__ (self , mu = 0 , beta = 1.0 , ** kwargs ):
3537
- self .mu = at .as_tensor_variable (floatX (mu ))
3538
- self .beta = at .as_tensor_variable (floatX (beta ))
3539
-
3540
- assert_negative_support (beta , "beta" , "Gumbel" )
3541
-
3542
- self .mean = self .mu + self .beta * np .euler_gamma
3543
- self .median = self .mu - self .beta * at .log (at .log (2 ))
3544
- self .mode = self .mu
3545
- self .variance = (np .pi ** 2 / 6.0 ) * self .beta ** 2
3489
+ @classmethod
3490
+ def dist (
3491
+ cls , mu : float = None , beta : float = None , no_assert : bool = False , ** kwargs
3492
+ ) -> RandomVariable :
3546
3493
3547
- super ().__init__ (** kwargs )
3494
+ mu = at .as_tensor_variable (floatX (mu ))
3495
+ beta = at .as_tensor_variable (floatX (beta ))
3548
3496
3549
- def random (self , point = None , size = None ):
3550
- """
3551
- Draw random values from Gumbel distribution.
3497
+ if not no_assert :
3498
+ assert_negative_support (beta , "beta" , "Gumbel" )
3552
3499
3553
- Parameters
3554
- ----------
3555
- point: dict, optional
3556
- Dict of variable values on which random values are to be
3557
- conditioned (uses default point if not specified).
3558
- size: int, optional
3559
- Desired size of random sample (returns one sample if not
3560
- specified).
3500
+ return super ().dist ([mu , beta ], ** kwargs )
3561
3501
3562
- Returns
3563
- -------
3564
- array
3565
- """
3566
- # mu, sigma = draw_values([self.mu, self.beta], point=point, size=size)
3567
- # return generate_samples(
3568
- # stats.gumbel_r.rvs, loc=mu, scale=sigma, dist_shape=self.shape, size=size
3569
- # )
3502
+ def _distr_parameters_for_repr (self ):
3503
+ return ["mu" , "beta" ]
3570
3504
3571
- def logp (self , value ):
3505
+ def logp (
3506
+ value : Union [float , np .ndarray , TensorVariable ],
3507
+ mu : Union [float , np .ndarray , TensorVariable ],
3508
+ beta : Union [float , np .ndarray , TensorVariable ],
3509
+ ) -> TensorVariable :
3572
3510
"""
3573
3511
Calculate log-probability of Gumbel distribution at specified value.
3574
3512
@@ -3582,15 +3520,14 @@ def logp(self, value):
3582
3520
-------
3583
3521
TensorVariable
3584
3522
"""
3585
- mu = self .mu
3586
- beta = self .beta
3587
3523
scaled = (value - mu ) / beta
3588
- return bound (
3589
- - scaled - at .exp (- scaled ) - at .log (self .beta ),
3590
- 0 < beta ,
3591
- )
3524
+ return bound (- scaled - at .exp (- scaled ) - at .log (beta ), 0 < beta ,)
3592
3525
3593
- def logcdf (self , value ):
3526
+ def logcdf (
3527
+ value : Union [float , np .ndarray , TensorVariable ],
3528
+ mu : Union [float , np .ndarray , TensorVariable ],
3529
+ beta : Union [float , np .ndarray , TensorVariable ],
3530
+ ) -> TensorVariable :
3594
3531
"""
3595
3532
Compute the log of the cumulative distribution function for Gumbel distribution
3596
3533
at the specified value.
@@ -3605,13 +3542,7 @@ def logcdf(self, value):
3605
3542
-------
3606
3543
TensorVariable
3607
3544
"""
3608
- beta = self .beta
3609
- mu = self .mu
3610
-
3611
- return bound (
3612
- - at .exp (- (value - mu ) / beta ),
3613
- 0 < beta ,
3614
- )
3545
+ return bound (- at .exp (- (value - mu ) / beta ), 0 < beta ,)
3615
3546
3616
3547
3617
3548
class Rice (PositiveContinuous ):
@@ -3870,8 +3801,7 @@ def logp(self, value):
3870
3801
s = self .s
3871
3802
3872
3803
return bound (
3873
- - (value - mu ) / s - at .log (s ) - 2 * at .log1p (at .exp (- (value - mu ) / s )),
3874
- s > 0 ,
3804
+ - (value - mu ) / s - at .log (s ) - 2 * at .log1p (at .exp (- (value - mu ) / s )), s > 0 ,
3875
3805
)
3876
3806
3877
3807
def logcdf (self , value ):
@@ -3891,10 +3821,7 @@ def logcdf(self, value):
3891
3821
"""
3892
3822
mu = self .mu
3893
3823
s = self .s
3894
- return bound (
3895
- - log1pexp (- (value - mu ) / s ),
3896
- 0 < s ,
3897
- )
3824
+ return bound (- log1pexp (- (value - mu ) / s ), 0 < s ,)
3898
3825
3899
3826
3900
3827
class LogitNormal (UnitContinuous ):
@@ -4253,7 +4180,5 @@ def logcdf(self, value):
4253
4180
sigma = self .sigma
4254
4181
4255
4182
scaled = (value - mu ) / sigma
4256
- return bound (
4257
- at .log (at .erfc (at .exp (- scaled / 2 ) * (2 ** - 0.5 ))),
4258
- 0 < sigma ,
4259
- )
4183
+ return bound (at .log (at .erfc (at .exp (- scaled / 2 ) * (2 ** - 0.5 ))), 0 < sigma ,)
4184
+
0 commit comments