Skip to content

Downstream #1288 #87

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 18 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
152 changes: 152 additions & 0 deletions pytensor/scalar/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1481,3 +1481,155 @@ def c_code(self, *args, **kwargs):


betainc_der = BetaIncDer(upgrade_to_float_no_complex, name="betainc_der")


class Hyp2F1(ScalarOp):
"""
Gaussian hypergeometric function ``2F1(a, b; c; z)``.

"""

nin = 4
nfunc_spec = ("scipy.special.hyp2f1", 4, 1)

@staticmethod
def st_impl(a, b, c, z):

if abs(z) >= 1:
raise NotImplementedError("hyp2f1 only supported for z < 1.")
else:
return scipy.special.hyp2f1(a, b, c, z)

def impl(self, a, b, c, z):
return Hyp2F1.st_impl(a, b, c, z)

def grad(self, inputs, grads):
a, b, c, z = inputs
(gz,) = grads
return [
gz * hyp2f1_der(a, b, c, z, 0),
gz * hyp2f1_der(a, b, c, z, 1),
gz * hyp2f1_der(a, b, c, z, 2),
gz * hyp2f1_der(a, b, c, z, 3),
]

def c_code(self, *args, **kwargs):
raise NotImplementedError()


hyp2f1 = Hyp2F1(upgrade_to_float, name="hyp2f1")


class Hyp2F1Der(ScalarOp):
"""
Derivatives of the Gaussian hypergeometric function ``2F1(a, b; c; z)``.

"""

nin = 5

def impl(self, a, b, c, z, wrt):
def _infinisum(f):
"""
Utility function for infinite summations.
"""

n, res = 0, f(0)
while True:
term = f(n + 1)
if RuntimeWarning:
break
if (res + term) - res == 0:
break
n, res = n + 1, res + term
return res

def _hyp2f1_da(a, b, c, z):
"""
Derivative of hyp2f1 wrt a

"""

if abs(z) >= 1:
raise NotImplementedError("Gradient not supported for |z| >= 1")

else:
term1 = _infinisum(
lambda k: (
(gamma(a + k) / gamma(a))
* (gamma(b + k) / gamma(b))
* psi(a + k)
* (z**k)
)
/ (gamma(c + k) / gamma(c))
* gamma(k + 1)
)
term2 = psi(a) * hyp2f1(a, b, c, z)

return term1 - term2

def _hyp2f1_db(a, b, c, z):
"""
Derivative of hyp2f1 wrt b
"""

if abs(z) >= 1:
raise NotImplementedError("Gradient not supported for |z| >= 1")

else:
term1 = _infinisum(
lambda k: (
(gamma(a + k) / gamma(a))
* (gamma(b + k) / gamma(b))
* psi(b + k)
* (z**k)
)
/ (gamma(c + k) / gamma(c))
* gamma(k + 1)
)
term2 = psi(b) * hyp2f1(a, b, c, z)

return term1 - term2

def _hyp2f1_dc(a, b, c, z):
"""
Derivative of hyp2f1 wrt c
"""
if abs(z) >= 1:
raise NotImplementedError("Gradient not supported for |z| >= 1")

else:
term1 = psi(c) * hyp2f1(a, b, c, z)
term2 = _infinisum(
lambda k: (
(gamma(a + k) / gamma(a))
* (gamma(b + k) / gamma(b))
* psi(c + k)
* (z**k)
)
/ (gamma(c + k) / gamma(c))
* gamma(k + 1)
)
return term1 - term2

def _hyp2f1_dz(a, b, c, z):
"""
Derivative of hyp2f1 wrt z
"""

return ((a * b) / c) * hyp2f1(a + 1, b + 1, c + 1, z)

if wrt == 0:
return _hyp2f1_da(a, b, c, z)
elif wrt == 1:
return _hyp2f1_db(a, b, c, z)
elif wrt == 2:
return _hyp2f1_dc(a, b, c, z)
elif wrt == 3:
return _hyp2f1_dz(a, b, c, z)

def c_code(self, *args, **kwargs):
raise NotImplementedError()


hyp2f1_der = Hyp2F1Der(upgrade_to_float, name="hyp2f1_der")
5 changes: 5 additions & 0 deletions pytensor/tensor/inplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,11 @@ def conj_inplace(a):
"""elementwise conjugate (inplace on `a`)"""


@scalar_elemwise
def hyp2f1_inplace(a, b, c, z):
"""gaussian hypergeometric function"""


pprint.assign(add_inplace, printing.OperatorPrinter("+=", -2, "either"))
pprint.assign(mul_inplace, printing.OperatorPrinter("*=", -1, "either"))
pprint.assign(sub_inplace, printing.OperatorPrinter("-=", -2, "left"))
Expand Down
12 changes: 12 additions & 0 deletions pytensor/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1384,6 +1384,16 @@ def gammal(k, x):
"""Lower incomplete gamma function."""


@scalar_elemwise
def hyp2f1(a, b, c, z):
"""Gaussian hypergeometric function."""


@scalar_elemwise
def hyp2f1_der(a, b, c, z):
"""Derivatives for Gaussian hypergeometric function."""
Comment on lines +1392 to +1394
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You don't need to provide this as an Elemwise. It's enough to implement the Scalar Op which will be used in the gradient of hyp2f1. We don't expect users to use this directly.



@scalar_elemwise
def j0(x):
"""Bessel function of the first kind of order 0."""
Expand Down Expand Up @@ -3132,6 +3142,8 @@ def matmul(x1: "ArrayLike", x2: "ArrayLike", dtype: Optional["DTypeLike"] = None
"power",
"logaddexp",
"logsumexp",
"hyp2f1",
"hyp2f1_der",
]

DEPRECATED_NAMES = [
Expand Down
25 changes: 24 additions & 1 deletion pytensor/tensor/special.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
import warnings
from textwrap import dedent
from typing import TYPE_CHECKING

import numpy as np
import scipy

from pytensor.graph.basic import Apply
from pytensor.link.c.op import COp
from pytensor.tensor.basic import as_tensor_variable
from pytensor.tensor.math import neg, sum
from pytensor.tensor.math import neg, sum, gamma


if TYPE_CHECKING:
from pytensor.tensor import TensorLike, TensorVariable


class SoftmaxGrad(COp):
Expand Down Expand Up @@ -768,7 +773,25 @@ def log_softmax(c, axis=UNSET_AXIS):
return LogSoftmax(axis=axis)(c)


def poch(z: "TensorLike", m: "TensorLike") -> "TensorVariable":
"""
Pochhammer symbol (rising factorial) function.

"""
return gamma(z + m) / gamma(z)


def factorial(n: "TensorLike") -> "TensorVariable":
"""
Factorial function of a scalar or array of numbers.

"""
return gamma(n + 1)


__all__ = [
"softmax",
"log_softmax",
"poch",
"factorial",
]
68 changes: 68 additions & 0 deletions tests/tensor/test_math_scipy.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def scipy_special_gammal(k, x):
expected_iv = scipy.special.iv
expected_erfcx = scipy.special.erfcx
expected_sigmoid = scipy.special.expit
expected_hyp2f1 = scipy.special.hyp2f1

TestErfBroadcast = makeBroadcastTester(
op=at.erf,
Expand Down Expand Up @@ -753,6 +754,73 @@ def test_deprecated_module():
inplace=True,
)

_good_broadcast_quaternary_hyp2f1 = dict(
normal=(
random_ranged(0, 1000, (2, 3)),
random_ranged(0, 1000, (2, 3)),
random_ranged(0, 1000, (2, 3)),
random_ranged(0, 0.5, (2, 3)),
),
)

_good_broadcast_pentanary_hyp2f1_der = dict(
normal=(
random_ranged(0, 1000, (2, 3)),
random_ranged(0, 1000, (2, 3)),
random_ranged(0, 1000, (2, 3)),
random_ranged(0, 0.5, (2, 3)),
integers_ranged(-1, 3, (2, 3)),
),
)
Comment on lines +766 to +774
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Similarly you don't need to test this one directly, the Scalar Op will be tested in the test of the hyp2f1


TestHyp2F1Broadcast = makeBroadcastTester(
op=at.hyp2f1,
expected=expected_hyp2f1,
good=_good_broadcast_quaternary_hyp2f1,
grad=_good_broadcast_quaternary_hyp2f1,
eps=2e-10,
)

TestHyp2F1InplaceBroadcast = makeBroadcastTester(
op=inplace.hyp2f1_inplace,
expected=expected_hyp2f1,
good=_good_broadcast_quaternary_hyp2f1,
eps=2e-10,
mode=mode_no_scipy,
inplace=True,
)

_good_broadcast_quaternary_hyp2f1 = dict(
normal=(
random_ranged(0, 1000, (2, 3)),
random_ranged(0, 1000, (2, 3)),
random_ranged(0, 1000, (2, 3)),
random_ranged(0, 0.5, (2, 3)),
),
)

TestHyp2F1Broadcast = makeBroadcastTester(
op=at.hyp2f1,
expected=expected_hyp2f1,
good=_good_broadcast_quaternary_hyp2f1,
eps=2e-10,
mode=mode_no_scipy,
)

TestHyp2F1InplaceBroadcast = makeBroadcastTester(
op=inplace.hyp2f1_inplace,
expected=expected_hyp2f1,
good=_good_broadcast_quaternary_hyp2f1,
eps=2e-10,
inplace=True,
)

TestHyp2F1DerBroadcast = makeBroadcastTester(
op=at.hyp2f1_der,
expected=expected_hyp2f1,
good=_good_broadcast_pentanary_hyp2f1_der,
eps=2e-10,
)

class TestBetaIncGrad:
def test_stan_grad_partial(self):
Expand Down
41 changes: 34 additions & 7 deletions tests/tensor/test_special.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@
import numpy as np
import pytest
from scipy.special import factorial as scipy_factorial
from scipy.special import log_softmax as scipy_log_softmax
from scipy.special import poch as scipy_poch
from scipy.special import softmax as scipy_softmax

from pytensor.compile.function import function
from pytensor.configdefaults import config
from pytensor.tensor.special import (
LogSoftmax,
Softmax,
SoftmaxGrad,
log_softmax,
softmax,
)
from pytensor.tensor import scalar, scalars
from pytensor.tensor.special import LogSoftmax, Softmax, SoftmaxGrad, log_softmax, softmax, poch, factorial
from pytensor.tensor.type import matrix, tensor3, tensor4, vector
from tests.tensor.utils import random_ranged
from tests import unittest_tools as utt


Expand Down Expand Up @@ -140,3 +138,32 @@ def test_valid_axis(self):

with pytest.raises(ValueError):
SoftmaxGrad(-4)(*x)


@pytest.mark.parametrize(
"z, m", [random_ranged(0, 5, (2,)), random_ranged(0, 5, (2,))]
)
def test_poch(z, m):

_z, _m = scalars("z", "m")

actual_fn = function([_z, _m], poch(_z, _m))
actual = actual_fn(z, m)

expected = scipy_poch(z, m)

assert np.allclose(actual, expected)


@pytest.mark.parametrize("n", random_ranged(0, 5, (1,)))
def test_factorial(n):

_n = scalar("n")

actual_fn = function([_n], factorial(_n))
actual = actual_fn(n)

expected = scipy_factorial(n)

assert np.allclose(actual, expected)