1
1
import theano .tensor as tt
2
2
from theano import scan
3
3
4
+ from pymc3 .util import get_variable_name
5
+ from .continuous import get_tau_sd , Normal , Flat
4
6
from . import multivariate
5
- from . import continuous
6
7
from . import distribution
7
8
8
9
__all__ = [
@@ -29,20 +30,20 @@ class AR1(distribution.Continuous):
29
30
30
31
def __init__ (self , k , tau_e , * args , ** kwargs ):
31
32
super (AR1 , self ).__init__ (* args , ** kwargs )
32
- self .k = k
33
- self .tau_e = tau_e
33
+ self .k = k = tt . as_tensor_variable ( k )
34
+ self .tau_e = tau_e = tt . as_tensor_variable ( tau_e )
34
35
self .tau = tau_e * (1 - k ** 2 )
35
- self .mode = 0.
36
+ self .mode = tt . as_tensor_variable ( 0. )
36
37
37
38
def logp (self , x ):
38
39
k = self .k
39
40
tau_e = self .tau_e
40
41
41
42
x_im1 = x [:- 1 ]
42
43
x_i = x [1 :]
43
- boundary = continuous . Normal .dist (0 , tau_e ).logp
44
+ boundary = Normal .dist (0. , tau = tau_e ).logp
44
45
45
- innov_like = continuous . Normal .dist (k * x_im1 , tau_e ).logp (x_i )
46
+ innov_like = Normal .dist (k * x_im1 , tau = tau_e ).logp (x_i )
46
47
return boundary (x [0 ]) + tt .sum (innov_like ) + boundary (x [- 1 ])
47
48
48
49
def _repr_latex_ (self , name = None , dist = None ):
@@ -71,14 +72,15 @@ class GaussianRandomWalk(distribution.Continuous):
71
72
distribution for initial value (Defaults to Flat())
72
73
"""
73
74
74
- def __init__ (self , tau = None , init = continuous . Flat .dist (), sd = None , mu = 0. ,
75
+ def __init__ (self , tau = None , init = Flat .dist (), sd = None , mu = 0. ,
75
76
* args , ** kwargs ):
76
77
super (GaussianRandomWalk , self ).__init__ (* args , ** kwargs )
77
- self .tau = tau
78
- self .sd = sd
79
- self .mu = mu
78
+ tau , sd = get_tau_sd (tau = tau , sd = sd )
79
+ self .tau = tau = tt .as_tensor_variable (tau )
80
+ self .sd = sd = tt .as_tensor_variable (sd )
81
+ self .mu = mu = tt .as_tensor_variable (mu )
80
82
self .init = init
81
- self .mean = 0.
83
+ self .mean = tt . as_tensor_variable ( 0. )
82
84
83
85
def logp (self , x ):
84
86
tau = self .tau
@@ -89,7 +91,7 @@ def logp(self, x):
89
91
x_im1 = x [:- 1 ]
90
92
x_i = x [1 :]
91
93
92
- innov_like = continuous . Normal .dist (mu = x_im1 + mu , tau = tau , sd = sd ).logp (x_i )
94
+ innov_like = Normal .dist (mu = x_im1 + mu , sd = sd ).logp (x_i )
93
95
return init .logp (x [0 ]) + tt .sum (innov_like )
94
96
95
97
def _repr_latex_ (self , name = None , dist = None ):
@@ -124,15 +126,15 @@ class GARCH11(distribution.Continuous):
124
126
initial_vol >= 0, distribution for initial volatility, sigma_0
125
127
"""
126
128
127
- def __init__ (self , omega = None , alpha_1 = None , beta_1 = None ,
128
- initial_vol = None , * args , ** kwargs ):
129
+ def __init__ (self , omega , alpha_1 , beta_1 ,
130
+ initial_vol , * args , ** kwargs ):
129
131
super (GARCH11 , self ).__init__ (* args , ** kwargs )
130
132
131
- self .omega = omega
132
- self .alpha_1 = alpha_1
133
- self .beta_1 = beta_1
133
+ self .omega = omega = tt . as_tensor_variable ( omega )
134
+ self .alpha_1 = alpha_1 = tt . as_tensor_variable ( alpha_1 )
135
+ self .beta_1 = beta_1 = tt . as_tensor_variable ( beta_1 )
134
136
self .initial_vol = initial_vol
135
- self .mean = 0
137
+ self .mean = tt . as_tensor_variable ( 0. )
136
138
137
139
def get_volatility (self , x ):
138
140
x = x [:- 1 ]
@@ -149,7 +151,7 @@ def volatility_update(x, vol, w, a, b):
149
151
150
152
def logp (self , x ):
151
153
vol = self .get_volatility (x )
152
- return tt .sum (continuous . Normal .dist (0 , sd = vol ).logp (x ))
154
+ return tt .sum (Normal .dist (0. , sd = vol ).logp (x ))
153
155
154
156
def _repr_latex_ (self , name = None , dist = None ):
155
157
if dist is None :
@@ -178,7 +180,7 @@ class EulerMaruyama(distribution.Continuous):
178
180
"""
179
181
def __init__ (self , dt , sde_fn , sde_pars , * args , ** kwds ):
180
182
super (EulerMaruyama , self ).__init__ (* args , ** kwds )
181
- self .dt = dt
183
+ self .dt = dt = tt . as_tensor_variable ( dt )
182
184
self .sde_fn = sde_fn
183
185
self .sde_pars = sde_pars
184
186
@@ -187,7 +189,7 @@ def logp(self, x):
187
189
f , g = self .sde_fn (x [:- 1 ], * self .sde_pars )
188
190
mu = xt + self .dt * f
189
191
sd = tt .sqrt (self .dt ) * g
190
- return tt .sum (continuous . Normal .dist (mu = mu , sd = sd ).logp (x [1 :]))
192
+ return tt .sum (Normal .dist (mu = mu , sd = sd ).logp (x [1 :]))
191
193
192
194
def _repr_latex_ (self , name = None , dist = None ):
193
195
if dist is None :
@@ -210,7 +212,7 @@ class MvGaussianRandomWalk(distribution.Continuous):
210
212
init : distribution
211
213
distribution for initial value (Defaults to Flat())
212
214
"""
213
- def __init__ (self , mu = 0. , cov = None , init = continuous . Flat .dist (),
215
+ def __init__ (self , mu = 0. , cov = None , init = Flat .dist (),
214
216
* args , ** kwargs ):
215
217
super (MvGaussianRandomWalk , self ).__init__ (* args , ** kwargs )
216
218
if cov is None :
@@ -220,9 +222,9 @@ def __init__(self, mu=0., cov=None, init=continuous.Flat.dist(),
220
222
if cov .ndim != 2 :
221
223
raise ValueError ('cov must be two dimensional.' )
222
224
self .cov = cov
223
- self .mu = mu
225
+ self .mu = mu = tt . as_tensor_variable ( mu )
224
226
self .init = init
225
- self .mean = 0.
227
+ self .mean = tt . as_tensor_variable ( 0. )
226
228
227
229
def logp (self , x ):
228
230
cov = self .cov
@@ -259,13 +261,13 @@ class MvStudentTRandomWalk(distribution.Continuous):
259
261
init : distribution
260
262
distribution for initial value (Defaults to Flat())
261
263
"""
262
- def __init__ (self , nu , mu = 0. , cov = None , init = continuous . Flat .dist (),
264
+ def __init__ (self , nu , mu = 0. , cov = None , init = Flat .dist (),
263
265
* args , ** kwargs ):
264
266
super (MvStudentTRandomWalk , self ).__init__ (* args , ** kwargs )
265
- self .mu = mu
266
- self .nu = nu
267
+ self .mu = mu = tt . as_tensor_variable ( mu )
268
+ self .nu = nu = tt . as_tensor_variable ( nu )
267
269
self .init = init
268
- self .mean = 0.
270
+ self .mean = tt . as_tensor_variable ( 0. )
269
271
270
272
if cov is None :
271
273
raise ValueError ('A covariance matrix must be provided as cov argument.' )
0 commit comments