Skip to content

Backport gh-2032 #2038

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Sep 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions dpnp/dpnp_iface_mathematical.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,8 +224,7 @@ def _gradient_num_diff_2nd_order_interior(
# fix the shape for broadcasting
shape = [1] * ndim
shape[axis] = -1
# TODO: use shape.setter once dpctl#1699 is resolved
# a.shape = b.shape = c.shape = shape

a = a.reshape(shape)
b = b.reshape(shape)
c = c.reshape(shape)
Expand Down
124 changes: 124 additions & 0 deletions tests/third_party/cupy/core_tests/test_core.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
import sys
import unittest

import numpy
import pytest

import dpnp as cupy
from tests.third_party.cupy import testing


class TestSize(unittest.TestCase):
# def tearDown(self):
# # Free huge memory for slow test
# cupy.get_default_memory_pool().free_all_blocks()

@testing.for_all_dtypes()
@testing.numpy_cupy_equal()
def test_size(self, xp, dtype):
a = xp.ndarray((2, 3), dtype=dtype)
return xp.size(a)

@testing.for_all_dtypes()
@testing.numpy_cupy_equal()
def test_size_axis(self, xp, dtype):
a = xp.ndarray((2, 3), dtype=dtype)
return xp.size(a, axis=1)

@testing.for_all_dtypes()
def test_size_axis_error(self, dtype):
for xp in (numpy, cupy):
a = xp.ndarray((2, 3), dtype=dtype)
with pytest.raises(IndexError):
return xp.size(a, axis=3)

@testing.numpy_cupy_equal()
@testing.slow
def test_size_huge(self, xp):
a = xp.ndarray(2**32, "b") # 4 GiB
return xp.size(a)


_orders = {
order_arg: order_expect
for order_expect, order_args in [
("C", ["C", "c", "CONTIGUOUS", "", None]),
("F", ["F", "f", "FORTRAN"]),
]
for order_arg in order_args
}


@pytest.mark.skip("no cupy._core submodule")
class TestOrder(unittest.TestCase):
@testing.for_orders(_orders.keys())
def test_ndarray(self, order):
order_expect = _orders[order]
a = core.ndarray((2, 3), order=order)
expect_c = order_expect == "C"
expect_f = order_expect == "F"
assert a.flags.c_contiguous == expect_c
assert a.flags.f_contiguous == expect_f


@pytest.mark.skip("min_scalar_type() is not supported")
class TestMinScalarType:
def test_scalar(self):
for v in (-129, -128, 0, 1.2, numpy.inf):
assert cupy.min_scalar_type(v) is numpy.min_scalar_type(v)

@testing.for_all_dtypes()
def test_numpy_scalar(self, dtype):
sc = dtype(1)
for v in (sc, [sc, sc]):
assert cupy.min_scalar_type(v) is numpy.min_scalar_type(v)

@testing.for_all_dtypes()
def test_cupy_scalar(self, dtype):
sc = cupy.array(-1).astype(dtype)
for v in (sc, [sc, sc]):
assert cupy.min_scalar_type(v) is sc.dtype

@testing.for_all_dtypes()
def test_numpy_ndarray(self, dtype):
arr = numpy.array([[-1, 1]]).astype(dtype)
for v in (arr, (arr, arr)):
assert cupy.min_scalar_type(v) is numpy.min_scalar_type(v)

@testing.for_all_dtypes()
def test_cupy_ndarray(self, dtype):
arr = cupy.array([[-1, 1]]).astype(dtype)
for v in (arr, (arr, arr)):
assert cupy.min_scalar_type(v) is arr.dtype


@testing.parameterize(
*testing.product(
{
"cxx": (None, "--std=c++11"),
}
)
)
@pytest.mark.skip("compiling cupy headers are not supported")
class TestCuPyHeaders(unittest.TestCase):
def setUp(self):
self.temporary_cache_dir_context = test_raw.use_temporary_cache_dir()
self.cache_dir = self.temporary_cache_dir_context.__enter__()
self.header = "\n".join(
["#include <" + h + ">" for h in core._cupy_header_list]
)

def tearDown(self):
self.temporary_cache_dir_context.__exit__(*sys.exc_info())

def test_compiling_core_header(self):
code = r"""
extern "C" __global__ void _test_ker_() { }
"""
code = self.header + code
options = () if self.cxx is None else (self.cxx,)
ker = cupy.RawKernel(
code, "_test_ker_", options=options, backend="nvrtc"
)
ker((1,), (1,), ())
cupy.cuda.Device().synchronize()
13 changes: 10 additions & 3 deletions tests/third_party/cupy/core_tests/test_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,11 +154,18 @@ def test_unsupported_type(self):
with pytest.raises(TypeError):
cupy.array(arr)

@pytest.mark.skip("no ndim limit")
@testing.with_requires("numpy>=2.0")
@testing.numpy_cupy_array_equal()
def test_upper_limit_ndim(self, xp):
shape = [1 for i in range(64)]
return xp.zeros(shape, dtype=xp.int8)

@pytest.mark.skip("no ndim limit")
def test_excessive_ndim(self):
for xp in (numpy, cupy):
with pytest.raises(ValueError):
xp.ndarray(shape=[1 for i in range(33)], dtype=xp.int8)
xp.ndarray(shape=[1 for i in range(65)], dtype=xp.int8)


@testing.parameterize(
Expand Down Expand Up @@ -265,7 +272,8 @@ def test_shape_set(self, xp):
return xp.array(arr.shape)

@pytest.mark.skip(
"dpctl-1699: shape setter does not work with negative shape"
"dpctl-1699: shape setter does not work with negative shape "
"(no plan to support that)"
)
@testing.numpy_cupy_array_equal()
def test_shape_set_infer(self, xp):
Expand Down Expand Up @@ -588,7 +596,6 @@ def test_output_type_mismatch(self):
wrap_take(a, i)


@pytest.mark.skip("size() is not supported")
class TestSize(unittest.TestCase):
@testing.numpy_cupy_equal()
def test_size_without_axis(self, xp):
Expand Down
Loading