Skip to content

Commit f52bbbe

Browse files
Add no cover for dpnp.is_cuda_backend()
1 parent 2675370 commit f52bbbe

8 files changed

+41
-41
lines changed

dpnp/dpnp_iface.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -712,7 +712,7 @@ def is_cuda_backend(obj=None):
712712
if (
713713
sycl_device is not None
714714
and sycl_device.backend == dpctl.backend_type.cuda
715-
):
715+
): # pragma: no cover
716716
return True
717717
return False
718718

dpnp/dpnp_iface_indexing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def choose(x1, choices, out=None, mode="raise"):
138138
)
139139

140140
if x1_desc:
141-
if dpnp.is_cuda_backend(x1_desc.get_array()):
141+
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
142142
raise NotImplementedError(
143143
"Running on CUDA is currently not supported"
144144
)

dpnp/dpnp_iface_libmath.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def erf(in_array1):
8282
in_array1, copy_when_strides=False, copy_when_nondefault_queue=False
8383
)
8484
if x1_desc:
85-
if dpnp.is_cuda_backend(x1_desc.get_array()):
85+
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
8686
raise NotImplementedError(
8787
"Running on CUDA is currently not supported"
8888
)

dpnp/dpnp_iface_mathematical.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2946,7 +2946,7 @@ def modf(x1, **kwargs):
29462946

29472947
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
29482948
if x1_desc:
2949-
if dpnp.is_cuda_backend(x1_desc.get_array()):
2949+
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
29502950
raise NotImplementedError(
29512951
"Running on CUDA is currently not supported"
29522952
)

dpnp/dpnp_iface_sorting.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None):
215215

216216
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
217217
if x1_desc:
218-
if dpnp.is_cuda_backend(x1_desc.get_array()):
218+
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
219219
raise NotImplementedError(
220220
"Running on CUDA is currently not supported"
221221
)

dpnp/linalg/dpnp_utils_linalg.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ def _batched_qr(a, mode="reduced"):
401401
# w/a to avoid raice conditional on CUDA during multiple runs
402402
# TODO: Remove it ones the OneMath issue is resolved
403403
# https://github.com/uxlfoundation/oneMath/issues/626
404-
if dpnp.is_cuda_backend(a_sycl_queue):
404+
if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover
405405
ht_ev.wait()
406406
else:
407407
_manager.add_event_pair(ht_ev, geqrf_ev)
@@ -2479,7 +2479,7 @@ def dpnp_qr(a, mode="reduced"):
24792479
# w/a to avoid raice conditional on CUDA during multiple runs
24802480
# TODO: Remove it ones the OneMath issue is resolved
24812481
# https://github.com/uxlfoundation/oneMath/issues/626
2482-
if dpnp.is_cuda_backend(a_sycl_queue):
2482+
if dpnp.is_cuda_backend(a_sycl_queue): # pragma: no cover
24832483
ht_ev.wait()
24842484
else:
24852485
_manager.add_event_pair(ht_ev, geqrf_ev)

dpnp/random/dpnp_iface_random.py

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ def beta(a, b, size=None):
140140
"""
141141

142142
if not use_origin_backend(a):
143-
if dpnp.is_cuda_backend():
143+
if dpnp.is_cuda_backend(): # pragma: no cover
144144
raise NotImplementedError(
145145
"Running on CUDA is currently not supported"
146146
)
@@ -191,7 +191,7 @@ def binomial(n, p, size=None):
191191
"""
192192

193193
if not use_origin_backend(n):
194-
if dpnp.is_cuda_backend():
194+
if dpnp.is_cuda_backend(): # pragma: no cover
195195
raise NotImplementedError(
196196
"Running on CUDA is currently not supported"
197197
)
@@ -248,7 +248,7 @@ def chisquare(df, size=None):
248248
"""
249249

250250
if not use_origin_backend(df):
251-
if dpnp.is_cuda_backend():
251+
if dpnp.is_cuda_backend(): # pragma: no cover
252252
raise NotImplementedError(
253253
"Running on CUDA is currently not supported"
254254
)
@@ -321,7 +321,7 @@ def exponential(scale=1.0, size=None):
321321
"""
322322

323323
if not use_origin_backend(scale):
324-
if dpnp.is_cuda_backend():
324+
if dpnp.is_cuda_backend(): # pragma: no cover
325325
raise NotImplementedError(
326326
"Running on CUDA is currently not supported"
327327
)
@@ -358,7 +358,7 @@ def f(dfnum, dfden, size=None):
358358
"""
359359

360360
if not use_origin_backend(dfnum):
361-
if dpnp.is_cuda_backend():
361+
if dpnp.is_cuda_backend(): # pragma: no cover
362362
raise NotImplementedError(
363363
"Running on CUDA is currently not supported"
364364
)
@@ -401,7 +401,7 @@ def gamma(shape, scale=1.0, size=None):
401401
"""
402402

403403
if not use_origin_backend(scale):
404-
if dpnp.is_cuda_backend():
404+
if dpnp.is_cuda_backend(): # pragma: no cover
405405
raise NotImplementedError(
406406
"Running on CUDA is currently not supported"
407407
)
@@ -444,7 +444,7 @@ def geometric(p, size=None):
444444
"""
445445

446446
if not use_origin_backend(p):
447-
if dpnp.is_cuda_backend():
447+
if dpnp.is_cuda_backend(): # pragma: no cover
448448
raise NotImplementedError(
449449
"Running on CUDA is currently not supported"
450450
)
@@ -483,7 +483,7 @@ def gumbel(loc=0.0, scale=1.0, size=None):
483483
"""
484484

485485
if not use_origin_backend(loc):
486-
if dpnp.is_cuda_backend():
486+
if dpnp.is_cuda_backend(): # pragma: no cover
487487
raise NotImplementedError(
488488
"Running on CUDA is currently not supported"
489489
)
@@ -526,7 +526,7 @@ def hypergeometric(ngood, nbad, nsample, size=None):
526526
"""
527527

528528
if not use_origin_backend(ngood):
529-
if dpnp.is_cuda_backend():
529+
if dpnp.is_cuda_backend(): # pragma: no cover
530530
raise NotImplementedError(
531531
"Running on CUDA is currently not supported"
532532
)
@@ -579,7 +579,7 @@ def laplace(loc=0.0, scale=1.0, size=None):
579579
"""
580580

581581
if not use_origin_backend(loc):
582-
if dpnp.is_cuda_backend():
582+
if dpnp.is_cuda_backend(): # pragma: no cover
583583
raise NotImplementedError(
584584
"Running on CUDA is currently not supported"
585585
)
@@ -618,7 +618,7 @@ def logistic(loc=0.0, scale=1.0, size=None):
618618
"""
619619

620620
if not use_origin_backend(loc):
621-
if dpnp.is_cuda_backend():
621+
if dpnp.is_cuda_backend(): # pragma: no cover
622622
raise NotImplementedError(
623623
"Running on CUDA is currently not supported"
624624
)
@@ -664,7 +664,7 @@ def lognormal(mean=0.0, sigma=1.0, size=None):
664664
"""
665665

666666
if not use_origin_backend(mean):
667-
if dpnp.is_cuda_backend():
667+
if dpnp.is_cuda_backend(): # pragma: no cover
668668
raise NotImplementedError(
669669
"Running on CUDA is currently not supported"
670670
)
@@ -726,7 +726,7 @@ def multinomial(n, pvals, size=None):
726726
pvals_sum = sum(pvals)
727727
pvals_desc = dpnp.get_dpnp_descriptor(dpnp.array(pvals))
728728
d = len(pvals)
729-
if dpnp.is_cuda_backend(pvals_desc.get_array()):
729+
if dpnp.is_cuda_backend(pvals_desc.get_array()): # pragma: no cover
730730
raise NotImplementedError(
731731
"Running on CUDA is currently not supported"
732732
)
@@ -780,7 +780,7 @@ def multivariate_normal(mean, cov, size=None, check_valid="warn", tol=1e-8):
780780
cov_ = dpnp.get_dpnp_descriptor(dpnp.array(cov, dtype=dpnp.float64))
781781
if dpnp.is_cuda_backend(mean_.get_array()) or dpnp.is_cuda_backend(
782782
cov_.get_array()
783-
):
783+
): # pragma: no cover
784784
raise NotImplementedError(
785785
"Running on CUDA is currently not supported"
786786
)
@@ -839,7 +839,7 @@ def negative_binomial(n, p, size=None):
839839
"""
840840

841841
if not use_origin_backend(n):
842-
if dpnp.is_cuda_backend():
842+
if dpnp.is_cuda_backend(): # pragma: no cover
843843
raise NotImplementedError(
844844
"Running on CUDA is currently not supported"
845845
)
@@ -929,7 +929,7 @@ def noncentral_chisquare(df, nonc, size=None):
929929
"""
930930

931931
if not use_origin_backend(df):
932-
if dpnp.is_cuda_backend():
932+
if dpnp.is_cuda_backend(): # pragma: no cover
933933
raise NotImplementedError(
934934
"Running on CUDA is currently not supported"
935935
)
@@ -988,7 +988,7 @@ def pareto(a, size=None):
988988
"""
989989

990990
if not use_origin_backend(a):
991-
if dpnp.is_cuda_backend():
991+
if dpnp.is_cuda_backend(): # pragma: no cover
992992
raise NotImplementedError(
993993
"Running on CUDA is currently not supported"
994994
)
@@ -1062,7 +1062,7 @@ def poisson(lam=1.0, size=None):
10621062
"""
10631063

10641064
if not use_origin_backend(lam):
1065-
if dpnp.is_cuda_backend():
1065+
if dpnp.is_cuda_backend(): # pragma: no cover
10661066
raise NotImplementedError(
10671067
"Running on CUDA is currently not supported"
10681068
)
@@ -1102,7 +1102,7 @@ def power(a, size=None):
11021102
"""
11031103

11041104
if not use_origin_backend(a):
1105-
if dpnp.is_cuda_backend():
1105+
if dpnp.is_cuda_backend(): # pragma: no cover
11061106
raise NotImplementedError(
11071107
"Running on CUDA is currently not supported"
11081108
)
@@ -1524,7 +1524,7 @@ def rayleigh(scale=1.0, size=None):
15241524
"""
15251525

15261526
if not use_origin_backend(scale):
1527-
if dpnp.is_cuda_backend():
1527+
if dpnp.is_cuda_backend(): # pragma: no cover
15281528
raise NotImplementedError(
15291529
"Running on CUDA is currently not supported"
15301530
)
@@ -1606,7 +1606,7 @@ def shuffle(x1):
16061606
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_strides=False)
16071607
if x1_desc:
16081608

1609-
if dpnp.is_cuda_backend(x1_desc.get_array()):
1609+
if dpnp.is_cuda_backend(x1_desc.get_array()): # pragma: no cover
16101610
raise NotImplementedError(
16111611
"Running on CUDA is currently not supported"
16121612
)
@@ -1655,7 +1655,7 @@ def seed(seed=None, device=None, sycl_queue=None):
16551655
)
16561656

16571657
if not use_origin_backend(seed):
1658-
if dpnp.is_cuda_backend():
1658+
if dpnp.is_cuda_backend(): # pragma: no cover
16591659
raise NotImplementedError(
16601660
"Running on CUDA is currently not supported"
16611661
)
@@ -1700,7 +1700,7 @@ def standard_cauchy(size=None):
17001700
"""
17011701

17021702
if not use_origin_backend(size):
1703-
if dpnp.is_cuda_backend():
1703+
if dpnp.is_cuda_backend(): # pragma: no cover
17041704
raise NotImplementedError(
17051705
"Running on CUDA is currently not supported"
17061706
)
@@ -1729,7 +1729,7 @@ def standard_exponential(size=None):
17291729
"""
17301730

17311731
if not use_origin_backend(size):
1732-
if dpnp.is_cuda_backend():
1732+
if dpnp.is_cuda_backend(): # pragma: no cover
17331733
raise NotImplementedError(
17341734
"Running on CUDA is currently not supported"
17351735
)
@@ -1761,7 +1761,7 @@ def standard_gamma(shape, size=None):
17611761
"""
17621762

17631763
if not use_origin_backend(shape):
1764-
if dpnp.is_cuda_backend():
1764+
if dpnp.is_cuda_backend(): # pragma: no cover
17651765
raise NotImplementedError(
17661766
"Running on CUDA is currently not supported"
17671767
)
@@ -1844,7 +1844,7 @@ def standard_t(df, size=None):
18441844
"""
18451845

18461846
if not use_origin_backend(df):
1847-
if dpnp.is_cuda_backend():
1847+
if dpnp.is_cuda_backend(): # pragma: no cover
18481848
raise NotImplementedError(
18491849
"Running on CUDA is currently not supported"
18501850
)
@@ -1885,7 +1885,7 @@ def triangular(left, mode, right, size=None):
18851885
"""
18861886

18871887
if not use_origin_backend(left):
1888-
if dpnp.is_cuda_backend():
1888+
if dpnp.is_cuda_backend(): # pragma: no cover
18891889
raise NotImplementedError(
18901890
"Running on CUDA is currently not supported"
18911891
)
@@ -1998,7 +1998,7 @@ def vonmises(mu, kappa, size=None):
19981998
"""
19991999

20002000
if not use_origin_backend(mu):
2001-
if dpnp.is_cuda_backend():
2001+
if dpnp.is_cuda_backend(): # pragma: no cover
20022002
raise NotImplementedError(
20032003
"Running on CUDA is currently not supported"
20042004
)
@@ -2039,7 +2039,7 @@ def wald(mean, scale, size=None):
20392039
"""
20402040

20412041
if not use_origin_backend(mean):
2042-
if dpnp.is_cuda_backend():
2042+
if dpnp.is_cuda_backend(): # pragma: no cover
20432043
raise NotImplementedError(
20442044
"Running on CUDA is currently not supported"
20452045
)
@@ -2080,7 +2080,7 @@ def weibull(a, size=None):
20802080
"""
20812081

20822082
if not use_origin_backend(a):
2083-
if dpnp.is_cuda_backend():
2083+
if dpnp.is_cuda_backend(): # pragma: no cover
20842084
raise NotImplementedError(
20852085
"Running on CUDA is currently not supported"
20862086
)
@@ -2117,7 +2117,7 @@ def zipf(a, size=None):
21172117
"""
21182118

21192119
if not use_origin_backend(a):
2120-
if dpnp.is_cuda_backend():
2120+
if dpnp.is_cuda_backend(): # pragma: no cover
21212121
raise NotImplementedError(
21222122
"Running on CUDA is currently not supported"
21232123
)

dpnp/random/dpnp_random_state.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ def normal(
235235
"""
236236

237237
if not use_origin_backend():
238-
if dpnp.is_cuda_backend(self):
238+
if dpnp.is_cuda_backend(self): # pragma: no cover
239239
raise NotImplementedError(
240240
"Running on CUDA is currently not supported"
241241
)
@@ -369,7 +369,7 @@ def randint(self, low, high=None, size=None, dtype=int, usm_type="device"):
369369
"""
370370

371371
if not use_origin_backend(low):
372-
if dpnp.is_cuda_backend(self):
372+
if dpnp.is_cuda_backend(self): # pragma: no cover
373373
raise NotImplementedError(
374374
"Running on CUDA is currently not supported"
375375
)
@@ -598,7 +598,7 @@ def uniform(
598598
"""
599599

600600
if not use_origin_backend():
601-
if dpnp.is_cuda_backend(self):
601+
if dpnp.is_cuda_backend(self): # pragma: no cover
602602
raise NotImplementedError(
603603
"Running on CUDA is currently not supported"
604604
)

0 commit comments

Comments
 (0)