Skip to content

Commit 92b257a

Browse files
committed
Replace np.cast with np.asarray
1 parent 76a6c2e commit 92b257a

File tree

5 files changed

+28
-28
lines changed

5 files changed

+28
-28
lines changed

pytensor/scalar/basic.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3144,7 +3144,7 @@ def L_op(self, inputs, outputs, gout):
31443144
else:
31453145
return [x.zeros_like()]
31463146

3147-
return (gz * exp2(x) * log(np.cast[x.type](2)),)
3147+
return (gz * exp2(x) * log(np.asarray(2, dtype=x.type)),)
31483148

31493149
def c_code(self, node, name, inputs, outputs, sub):
31503150
(x,) = inputs
@@ -3391,7 +3391,7 @@ def L_op(self, inputs, outputs, gout):
33913391
else:
33923392
return [x.zeros_like()]
33933393

3394-
return (-gz / sqrt(np.cast[x.type](1) - sqr(x)),)
3394+
return (-gz / sqrt(np.asarray(1, dtype=x.type) - sqr(x)),)
33953395

33963396
def c_code(self, node, name, inputs, outputs, sub):
33973397
(x,) = inputs
@@ -3465,7 +3465,7 @@ def L_op(self, inputs, outputs, gout):
34653465
else:
34663466
return [x.zeros_like()]
34673467

3468-
return (gz / sqrt(np.cast[x.type](1) - sqr(x)),)
3468+
return (gz / sqrt(np.asarray(1, dtype=x.type) - sqr(x)),)
34693469

34703470
def c_code(self, node, name, inputs, outputs, sub):
34713471
(x,) = inputs
@@ -3537,7 +3537,7 @@ def L_op(self, inputs, outputs, gout):
35373537
else:
35383538
return [x.zeros_like()]
35393539

3540-
return (gz / (np.cast[x.type](1) + sqr(x)),)
3540+
return (gz / (np.asarray(1, dtype=x.type) + sqr(x)),)
35413541

35423542
def c_code(self, node, name, inputs, outputs, sub):
35433543
(x,) = inputs
@@ -3660,7 +3660,7 @@ def L_op(self, inputs, outputs, gout):
36603660
else:
36613661
return [x.zeros_like()]
36623662

3663-
return (gz / sqrt(sqr(x) - np.cast[x.type](1)),)
3663+
return (gz / sqrt(sqr(x) - np.asarray(1, dtype=x.type)),)
36643664

36653665
def c_code(self, node, name, inputs, outputs, sub):
36663666
(x,) = inputs
@@ -3737,7 +3737,7 @@ def L_op(self, inputs, outputs, gout):
37373737
else:
37383738
return [x.zeros_like()]
37393739

3740-
return (gz / sqrt(sqr(x) + np.cast[x.type](1)),)
3740+
return (gz / sqrt(sqr(x) + np.asarray(1, dtype=x.type)),)
37413741

37423742
def c_code(self, node, name, inputs, outputs, sub):
37433743
(x,) = inputs
@@ -3815,7 +3815,7 @@ def L_op(self, inputs, outputs, gout):
38153815
else:
38163816
return [x.zeros_like()]
38173817

3818-
return (gz / (np.cast[x.type](1) - sqr(x)),)
3818+
return (gz / (np.asarray(1, dtype=x.type) - sqr(x)),)
38193819

38203820
def c_code(self, node, name, inputs, outputs, sub):
38213821
(x,) = inputs

tests/scan/test_rewriting.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -673,7 +673,7 @@ def test_machine_translation(self):
673673
zi = tensor3("zi")
674674
zi_value = x_value
675675

676-
init = pt.alloc(np.cast[config.floatX](0), batch_size, dim)
676+
init = pt.alloc(np.asarray(0, dtype=config.floatX), batch_size, dim)
677677

678678
def rnn_step1(
679679
# sequences

tests/tensor/test_extra_ops.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -685,7 +685,7 @@ def test_perform(self, shp):
685685
y = scalar()
686686
f = function([x, y], fill_diagonal(x, y))
687687
a = rng.random(shp).astype(config.floatX)
688-
val = np.cast[config.floatX](rng.random())
688+
val = rng.random(dtype=config.floatX)
689689
out = f(a, val)
690690
# We can't use np.fill_diagonal as it is bugged.
691691
assert np.allclose(np.diag(out), val)
@@ -697,7 +697,7 @@ def test_perform_3d(self):
697697
x = tensor3()
698698
y = scalar()
699699
f = function([x, y], fill_diagonal(x, y))
700-
val = np.cast[config.floatX](rng.random() + 10)
700+
val = rng.random(dtype=config.floatX) + 10
701701
out = f(a, val)
702702
# We can't use np.fill_diagonal as it is bugged.
703703
assert out[0, 0, 0] == val
@@ -759,7 +759,7 @@ def test_perform(self, test_offset, shp):
759759

760760
f = function([x, y, z], fill_diagonal_offset(x, y, z))
761761
a = rng.random(shp).astype(config.floatX)
762-
val = np.cast[config.floatX](rng.random())
762+
val = rng.random(dtype=config.floatX)
763763
out = f(a, val, test_offset)
764764
# We can't use np.fill_diagonal as it is bugged.
765765
assert np.allclose(np.diag(out, test_offset), val)

tests/test_gradient.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -480,12 +480,12 @@ def make_grad_func(X):
480480
int_type = imatrix().dtype
481481
float_type = "float64"
482482

483-
X = np.cast[int_type](rng.standard_normal((m, d)) * 127.0)
484-
W = np.cast[W.dtype](rng.standard_normal((d, n)))
485-
b = np.cast[b.dtype](rng.standard_normal(n))
483+
X = rng.standard_normal((m, d), dtype=int_type) * 127.0
484+
W = rng.standard_normal((d, n), dtype=W.dtype)
485+
b = rng.standard_normal(n, dtype=b.dtype)
486486

487487
int_result = int_func(X, W, b)
488-
float_result = float_func(np.cast[float_type](X), W, b)
488+
float_result = float_func(np.asarray(X, dtype=float_type), W, b)
489489

490490
assert np.allclose(int_result, float_result), (int_result, float_result)
491491

@@ -507,7 +507,7 @@ def test_grad_disconnected(self):
507507
# the output
508508
f = pytensor.function([x], g)
509509
rng = np.random.default_rng([2012, 9, 5])
510-
x = np.cast[x.dtype](rng.standard_normal(3))
510+
x = rng.standard_normal(3, dtype=x.dtype)
511511
g = f(x)
512512
assert np.allclose(g, np.ones(x.shape, dtype=x.dtype))
513513

@@ -629,7 +629,7 @@ def test_known_grads():
629629

630630
rng = np.random.default_rng([2012, 11, 15])
631631
values = [rng.standard_normal(10), rng.integers(10), rng.standard_normal()]
632-
values = [np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values)]
632+
values = [np.asarray(value, dtype=ipt.dtype) for ipt, value in zip(inputs, values)]
633633

634634
true_grads = grad(cost, inputs, disconnected_inputs="ignore")
635635
true_grads = pytensor.function(inputs, true_grads)
@@ -676,7 +676,7 @@ def test_known_grads_integers():
676676
f = pytensor.function([g_expected], g_grad)
677677

678678
x = -3
679-
gv = np.cast[config.floatX](0.6)
679+
gv = np.asarray(0.6, dtype=config.floatX)
680680

681681
g_actual = f(gv)
682682

@@ -742,7 +742,7 @@ def test_subgraph_grad():
742742
inputs = [t, x]
743743
rng = np.random.default_rng([2012, 11, 15])
744744
values = [rng.standard_normal(2), rng.standard_normal(3)]
745-
values = [np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values)]
745+
values = [np.asarray(value, dtype=ipt.dtype) for ipt, value in zip(inputs, values)]
746746

747747
wrt = [w2, w1]
748748
cost = cost2 + cost1
@@ -1026,30 +1026,30 @@ def test_jacobian_scalar():
10261026
# test when the jacobian is called with a tensor as wrt
10271027
Jx = jacobian(y, x)
10281028
f = pytensor.function([x], Jx)
1029-
vx = np.cast[pytensor.config.floatX](rng.uniform())
1029+
vx = rng.uniform(dtype=pytensor.config.floatX)
10301030
assert np.allclose(f(vx), 2)
10311031

10321032
# test when the jacobian is called with a tuple as wrt
10331033
Jx = jacobian(y, (x,))
10341034
assert isinstance(Jx, tuple)
10351035
f = pytensor.function([x], Jx[0])
1036-
vx = np.cast[pytensor.config.floatX](rng.uniform())
1036+
vx = rng.uniform(dtype=pytensor.config.floatX)
10371037
assert np.allclose(f(vx), 2)
10381038

10391039
# test when the jacobian is called with a list as wrt
10401040
Jx = jacobian(y, [x])
10411041
assert isinstance(Jx, list)
10421042
f = pytensor.function([x], Jx[0])
1043-
vx = np.cast[pytensor.config.floatX](rng.uniform())
1043+
vx = rng.uniform(dtype=pytensor.config.floatX)
10441044
assert np.allclose(f(vx), 2)
10451045

10461046
# test when the jacobian is called with a list of two elements
10471047
z = scalar()
10481048
y = x * z
10491049
Jx = jacobian(y, [x, z])
10501050
f = pytensor.function([x, z], Jx)
1051-
vx = np.cast[pytensor.config.floatX](rng.uniform())
1052-
vz = np.cast[pytensor.config.floatX](rng.uniform())
1051+
vx = rng.uniform(dtype=pytensor.config.floatX)
1052+
vz = rng.uniform(dtype=pytensor.config.floatX)
10531053
vJx = f(vx, vz)
10541054

10551055
assert np.allclose(vJx[0], vz)

tests/typed_list/test_basic.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -577,10 +577,10 @@ def test_correct_answer(self):
577577
x = tensor3()
578578
y = tensor3()
579579

580-
A = np.cast[pytensor.config.floatX](np.random.random((5, 3)))
581-
B = np.cast[pytensor.config.floatX](np.random.random((7, 2)))
582-
X = np.cast[pytensor.config.floatX](np.random.random((5, 6, 1)))
583-
Y = np.cast[pytensor.config.floatX](np.random.random((1, 9, 3)))
580+
A = np.random.random((5, 3), dtype=pytensor.config.floatX)
581+
B = np.random.random((7, 2), dtype=pytensor.config.floatX)
582+
X = np.random.random((5, 6, 1), dtype=pytensor.config.floatX)
583+
Y = np.random.random((1, 9, 3), dtype=pytensor.config.floatX)
584584

585585
make_list((3.0, 4.0))
586586
c = make_list((a, b))

0 commit comments

Comments
 (0)