Skip to content

ENH: add a naive divmod, un-xfail relevant tests #84

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 55 additions & 0 deletions torch_np/_binary_ufuncs.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,3 +57,58 @@ def wrapped(
decorated.__qualname__ = name # XXX: is this really correct?
decorated.__name__ = name
vars()[name] = decorated


# a stub implementation of divmod, should be improved after
# https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch
#
# Implementation details: we just call two ufuncs which have been created
# just above, for x1 // x2 and x1 % x2.
# This means we are normalizing x1, x2 in each of the ufuncs --- note that there
# is no @normalizer on divmod.


def divmod(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok: SubokLike = False,
signature=None,
extobj=None,
):
out1, out2 = None, None
if out is not None:
out1, out2 = out

kwds = dict(
where=where,
casting=casting,
order=order,
dtype=dtype,
subok=subok,
signature=signature,
extobj=extobj,
)

# NB: use local names for
quot = floor_divide(x1, x2, out=out1, **kwds)
rem = remainder(x1, x2, out=out2, **kwds)
Comment on lines +100 to +101
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note that here (and in general) if we want these functions to be differentiable in PyTorch, we should not use their out= variant. We should implement the out= behaviour manually.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here these are wrapper floor_divide and remainder, so torch.remainder and torch.floor_divide never see the out=.
Also NB: this will need a rework anyway, apparently there are more (out1, out2) ufuncs, ldexp and frexp. So Tuple[NDArray] return annotation will need to appear after we settle on the generic machinery.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right! And sure, let's just push this discussion then. I still think that the out= machinery can be implemented in a generic way.


quot = _helpers.result_or_out(quot.get(), out1) # FIXME: .get() -> .tensor
rem = _helpers.result_or_out(rem.get(), out2)

return quot, rem


def modf(x, /, *args, **kwds):
quot, rem = divmod(x, 1, *args, **kwds)
return rem, quot


__all__ = __all__ + ["divmod", "modf"]
2 changes: 2 additions & 0 deletions torch_np/_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,8 @@ def __rfloordiv__(self, other):
def __ifloordiv__(self, other):
return _binary_ufuncs.floor_divide(self, other, out=self)

__divmod__ = _binary_ufuncs.divmod

# power, self**exponent
__pow__ = __rpow__ = _binary_ufuncs.float_power

Expand Down
79 changes: 49 additions & 30 deletions torch_np/_normalizations.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
""" "Normalize" arguments: convert array_likes to tensors, dtypes to torch dtypes and so on.
"""
import functools
import inspect
import operator
import typing
from typing import Optional, Sequence, Union

import torch

from . import _helpers
from . import _dtypes, _helpers

ArrayLike = typing.TypeVar("ArrayLike")
DTypeLike = typing.TypeVar("DTypeLike")
Expand All @@ -22,10 +24,6 @@
NDArrayOrSequence = Union[NDArray, Sequence[NDArray]]
OutArray = typing.TypeVar("OutArray")

import inspect

from . import _dtypes


def normalize_array_like(x, name=None):
(tensor,) = _helpers.to_tensors(x)
Expand All @@ -52,7 +50,7 @@ def normalize_dtype(dtype, name=None):
return torch_dtype


def normalize_subok_like(arg, name):
def normalize_subok_like(arg, name="subok"):
if arg:
raise ValueError(f"'{name}' parameter is not supported.")

Expand Down Expand Up @@ -87,7 +85,6 @@ def normalize_ndarray(arg, name=None):
AxisLike: normalize_axis_like,
}

import functools

_sentinel = object()

Expand All @@ -97,7 +94,7 @@ def normalize_this(arg, parm, return_on_failure=_sentinel):
normalizer = normalizers.get(parm.annotation, None)
if normalizer:
try:
return normalizer(arg)
return normalizer(arg, parm.name)
except Exception as exc:
if return_on_failure is not _sentinel:
return return_on_failure
Expand All @@ -108,6 +105,44 @@ def normalize_this(arg, parm, return_on_failure=_sentinel):
return arg


# postprocess return values


def postprocess_ndarray(result, **kwds):
return _helpers.array_from(result)


def postprocess_out(result, **kwds):
result, out = result
return _helpers.result_or_out(result, out, **kwds)


def postprocess_tuple(result, **kwds):
return _helpers.tuple_arrays_from(result)


def postprocess_list(result, **kwds):
return list(_helpers.tuple_arrays_from(result))


def postprocess_variadic(result, **kwds):
# a variadic return: a single NDArray or tuple/list of NDArrays, e.g. atleast_1d
if isinstance(result, (tuple, list)):
seq = type(result)
return seq(_helpers.tuple_arrays_from(result))
else:
return _helpers.array_from(result)


postprocessors = {
NDArray: postprocess_ndarray,
OutArray: postprocess_out,
NDArrayOrSequence: postprocess_variadic,
tuple[NDArray]: postprocess_tuple,
list[NDArray]: postprocess_list,
}


Comment on lines +108 to +145
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is duplicated from the other PR.

def normalizer(_func=None, *, return_on_failure=_sentinel, promote_scalar_out=False):
def normalizer_inner(func):
@functools.wraps(func)
Expand Down Expand Up @@ -154,33 +189,17 @@ def wrapped(*args, **kwds):
raise TypeError(
f"{func.__name__}() takes {len(ba.args)} positional argument but {len(args)} were given."
)

# finally, pass normalized arguments through
result = func(*ba.args, **ba.kwargs)

# handle returns
r = sig.return_annotation
if r == NDArray:
return _helpers.array_from(result)
elif r == inspect._empty:
return result
elif hasattr(r, "__origin__") and r.__origin__ in (list, tuple):
# this is tuple[NDArray] or list[NDArray]
# XXX: change to separate tuple and list normalizers?
return r.__origin__(_helpers.tuple_arrays_from(result))
elif r == NDArrayOrSequence:
# a variadic return: a single NDArray or tuple/list of NDArrays, e.g. atleast_1d
if isinstance(result, (tuple, list)):
seq = type(result)
return seq(_helpers.tuple_arrays_from(result))
else:
return _helpers.array_from(result)
elif r == OutArray:
result, out = result
return _helpers.result_or_out(
result, out, promote_scalar=promote_scalar_out
)
else:
raise ValueError(f"Unknown return annotation {return_annotation}")
postprocess = postprocessors.get(r, None)
if postprocess:
kwds = {"promote_scalar": promote_scalar_out}
result = postprocess(result, **kwds)
return result
Comment on lines +198 to +202
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also dupped?


return wrapped

Expand Down
77 changes: 35 additions & 42 deletions torch_np/tests/numpy_tests/core/test_scalarmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,10 +262,6 @@ class TestModulus:
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:

if op == divmod:
pytest.xfail(reason="__divmod__ not implemented")

for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
Expand All @@ -279,7 +275,7 @@ def test_modulus_basic(self):
else:
assert_(b > rem >= 0, msg)

@pytest.mark.xfail(reason='divmod not implemented')
@pytest.mark.slow
def test_float_modulus_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
Expand Down Expand Up @@ -311,10 +307,6 @@ def test_float_modulus_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:

if op == divmod:
pytest.xfail(reason="__divmod__ not implemented")

for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
Expand All @@ -329,41 +321,42 @@ def test_float_modulus_roundoff(self):
else:
assert_(b > rem >= 0, msg)

@pytest.mark.skip(reason='float16 on cpu is incomplete in pytorch')
def test_float_modulus_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
@pytest.mark.parametrize('dt', np.typecodes['Float'])
def test_float_modulus_corner_cases(self, dt):
if dt == 'e':
pytest.xfail(reason="RuntimeError: 'nextafter_cpu' not implemented for 'Half'")

b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)

# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
sup.filter(RuntimeWarning, "invalid value encountered in divmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), 'dt: %s' % dt)
# MSVC 2008 returns NaN here, so disable the check.
#rem = operator.mod(fone, finf)
#assert_(rem == fone, 'dt: %s' % dt)
rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s' % dt)
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
for op in [floordiv_and_mod, divmod]:
div, mod = op(fone, fzer)
assert_(np.isinf(div)) and assert_(np.isnan(mod))
# with suppress_warnings() as sup:
# sup.filter(RuntimeWarning, "invalid value encountered in remainder")
# sup.filter(RuntimeWarning, "divide by zero encountered in remainder")
# sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
# sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
# sup.filter(RuntimeWarning, "invalid value encountered in divmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), 'dt: %s' % dt)
# MSVC 2008 returns NaN here, so disable the check.
#rem = operator.mod(fone, finf)
#assert_(rem == fone, 'dt: %s' % dt)
rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s' % dt)
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
for op in [floordiv_and_mod, divmod]:
div, mod = op(fone, fzer)
assert_(np.isinf(div)) and assert_(np.isnan(mod))


class TestComplexDivision:
Expand Down