Skip to content

Commit 094ab7c

Browse files
Exclude code from coverage (#2272)
This PR continues #2268 and suggests excluding some lines of code from coverage using `pragma:: no cover` to increase the coverage statistic
1 parent 5649b06 commit 094ab7c

13 files changed

+48
-49
lines changed

dpnp/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
# are not installed under any of default paths where Python is searching.
4343
from platform import system
4444

45-
if system() == "Windows":
45+
if system() == "Windows": # pragma: no cover
4646
if hasattr(os, "add_dll_directory"):
4747
os.add_dll_directory(mypath)
4848
os.add_dll_directory(dpctlpath)

dpnp/dpnp_iface.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -712,7 +712,7 @@ def is_cuda_backend(obj=None):
712712
if (
713713
sycl_device is not None
714714
and sycl_device.backend == dpctl.backend_type.cuda
715-
):
715+
): # pragma: no cover
716716
return True
717717
return False
718718

dpnp/dpnp_iface_histograms.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def _align_dtypes(a_dtype, bins_dtype, ntype, supported_types, device):
9191
return sample_type, hist_type
9292

9393
# should not happen
94-
return None, None
94+
return None, None # pragma: no cover
9595

9696

9797
def _ravel_check_a_and_weights(a, weights):
@@ -392,7 +392,7 @@ def bincount(x, weights=None, minlength=None):
392392
x.dtype, x.dtype, ntype, supported_types, device
393393
)
394394

395-
if x_casted_dtype is None or ntype_casted is None:
395+
if x_casted_dtype is None or ntype_casted is None: # pragma: no cover
396396
raise ValueError(
397397
f"function '{bincount}' does not support input types "
398398
f"({x.dtype}, {ntype}), "
@@ -607,7 +607,7 @@ def histogram(a, bins=10, range=None, density=None, weights=None):
607607
a.dtype, bin_edges.dtype, ntype, supported_types, device
608608
)
609609

610-
if a_bin_dtype is None or hist_dtype is None:
610+
if a_bin_dtype is None or hist_dtype is None: # pragma: no cover
611611
raise ValueError(
612612
f"function '{histogram}' does not support input types "
613613
f"({a.dtype}, {bin_edges.dtype}, {ntype}), "

dpnp/dpnp_iface_indexing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def choose(x1, choices, out=None, mode="raise"):
138138
)
139139

140140
if x1_desc:
141-
if dpnp.is_cuda_backend(x1_desc.get_array()):
141+
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
142142
raise NotImplementedError(
143143
"Running on CUDA is currently not supported"
144144
)

dpnp/dpnp_iface_libmath.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def erf(in_array1):
8282
in_array1, copy_when_strides=False, copy_when_nondefault_queue=False
8383
)
8484
if x1_desc:
85-
if dpnp.is_cuda_backend(x1_desc.get_array()):
85+
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
8686
raise NotImplementedError(
8787
"Running on CUDA is currently not supported"
8888
)

dpnp/dpnp_iface_mathematical.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2946,7 +2946,7 @@ def modf(x1, **kwargs):
29462946

29472947
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
29482948
if x1_desc:
2949-
if dpnp.is_cuda_backend(x1_desc.get_array()):
2949+
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
29502950
raise NotImplementedError(
29512951
"Running on CUDA is currently not supported"
29522952
)

dpnp/dpnp_iface_sorting.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None):
215215

216216
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
217217
if x1_desc:
218-
if dpnp.is_cuda_backend(x1_desc.get_array()):
218+
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
219219
raise NotImplementedError(
220220
"Running on CUDA is currently not supported"
221221
)

dpnp/dpnp_iface_statistics.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def _count_reduce_items(arr, axis, where=True):
108108
for ax in axis:
109109
items *= arr.shape[normalize_axis_index(ax, arr.ndim)]
110110
items = dpnp.intp(items)
111-
else:
111+
else: # pragma: no cover
112112
raise NotImplementedError(
113113
"where keyword argument is only supported with its default value."
114114
)
@@ -576,7 +576,7 @@ def correlate(a, v, mode="valid"):
576576
rdtype = result_type_for_device([a.dtype, v.dtype], device)
577577
supported_dtype = to_supported_dtypes(rdtype, supported_types, device)
578578

579-
if supported_dtype is None:
579+
if supported_dtype is None: # pragma: no cover
580580
raise ValueError(
581581
f"function does not support input types "
582582
f"({a.dtype.name}, {v.dtype.name}), "

dpnp/dpnp_utils/dpnp_utils_common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,4 +78,4 @@ def is_castable(dtype, stype):
7878
):
7979
return stypes
8080

81-
return None
81+
return None # pragma: no cover

dpnp/linalg/dpnp_utils_linalg.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ def _batched_qr(a, mode="reduced"):
401401
# w/a to avoid raice conditional on CUDA during multiple runs
402402
# TODO: Remove it ones the OneMath issue is resolved
403403
# https://github.com/uxlfoundation/oneMath/issues/626
404-
if dpnp.is_cuda_backend(a_sycl_queue):
404+
if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover
405405
ht_ev.wait()
406406
else:
407407
_manager.add_event_pair(ht_ev, geqrf_ev)
@@ -2479,7 +2479,7 @@ def dpnp_qr(a, mode="reduced"):
24792479
# w/a to avoid raice conditional on CUDA during multiple runs
24802480
# TODO: Remove it ones the OneMath issue is resolved
24812481
# https://github.com/uxlfoundation/oneMath/issues/626
2482-
if dpnp.is_cuda_backend(a_sycl_queue):
2482+
if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover
24832483
ht_ev.wait()
24842484
else:
24852485
_manager.add_event_pair(ht_ev, geqrf_ev)

0 commit comments

Comments
 (0)