@@ -140,7 +140,7 @@ def beta(a, b, size=None):
140
140
"""
141
141
142
142
if not use_origin_backend (a ):
143
- if dpnp .is_cuda_backend ():
143
+ if dpnp .is_cuda_backend (): # pragma: no cover
144
144
raise NotImplementedError (
145
145
"Running on CUDA is currently not supported"
146
146
)
@@ -191,7 +191,7 @@ def binomial(n, p, size=None):
191
191
"""
192
192
193
193
if not use_origin_backend (n ):
194
- if dpnp .is_cuda_backend ():
194
+ if dpnp .is_cuda_backend (): # pragma: no cover
195
195
raise NotImplementedError (
196
196
"Running on CUDA is currently not supported"
197
197
)
@@ -248,7 +248,7 @@ def chisquare(df, size=None):
248
248
"""
249
249
250
250
if not use_origin_backend (df ):
251
- if dpnp .is_cuda_backend ():
251
+ if dpnp .is_cuda_backend (): # pragma: no cover
252
252
raise NotImplementedError (
253
253
"Running on CUDA is currently not supported"
254
254
)
@@ -321,7 +321,7 @@ def exponential(scale=1.0, size=None):
321
321
"""
322
322
323
323
if not use_origin_backend (scale ):
324
- if dpnp .is_cuda_backend ():
324
+ if dpnp .is_cuda_backend (): # pragma: no cover
325
325
raise NotImplementedError (
326
326
"Running on CUDA is currently not supported"
327
327
)
@@ -358,7 +358,7 @@ def f(dfnum, dfden, size=None):
358
358
"""
359
359
360
360
if not use_origin_backend (dfnum ):
361
- if dpnp .is_cuda_backend ():
361
+ if dpnp .is_cuda_backend (): # pragma: no cover
362
362
raise NotImplementedError (
363
363
"Running on CUDA is currently not supported"
364
364
)
@@ -401,7 +401,7 @@ def gamma(shape, scale=1.0, size=None):
401
401
"""
402
402
403
403
if not use_origin_backend (scale ):
404
- if dpnp .is_cuda_backend ():
404
+ if dpnp .is_cuda_backend (): # pragma: no cover
405
405
raise NotImplementedError (
406
406
"Running on CUDA is currently not supported"
407
407
)
@@ -444,7 +444,7 @@ def geometric(p, size=None):
444
444
"""
445
445
446
446
if not use_origin_backend (p ):
447
- if dpnp .is_cuda_backend ():
447
+ if dpnp .is_cuda_backend (): # pragma: no cover
448
448
raise NotImplementedError (
449
449
"Running on CUDA is currently not supported"
450
450
)
@@ -483,7 +483,7 @@ def gumbel(loc=0.0, scale=1.0, size=None):
483
483
"""
484
484
485
485
if not use_origin_backend (loc ):
486
- if dpnp .is_cuda_backend ():
486
+ if dpnp .is_cuda_backend (): # pragma: no cover
487
487
raise NotImplementedError (
488
488
"Running on CUDA is currently not supported"
489
489
)
@@ -526,7 +526,7 @@ def hypergeometric(ngood, nbad, nsample, size=None):
526
526
"""
527
527
528
528
if not use_origin_backend (ngood ):
529
- if dpnp .is_cuda_backend ():
529
+ if dpnp .is_cuda_backend (): # pragma: no cover
530
530
raise NotImplementedError (
531
531
"Running on CUDA is currently not supported"
532
532
)
@@ -579,7 +579,7 @@ def laplace(loc=0.0, scale=1.0, size=None):
579
579
"""
580
580
581
581
if not use_origin_backend (loc ):
582
- if dpnp .is_cuda_backend ():
582
+ if dpnp .is_cuda_backend (): # pragma: no cover
583
583
raise NotImplementedError (
584
584
"Running on CUDA is currently not supported"
585
585
)
@@ -618,7 +618,7 @@ def logistic(loc=0.0, scale=1.0, size=None):
618
618
"""
619
619
620
620
if not use_origin_backend (loc ):
621
- if dpnp .is_cuda_backend ():
621
+ if dpnp .is_cuda_backend (): # pragma: no cover
622
622
raise NotImplementedError (
623
623
"Running on CUDA is currently not supported"
624
624
)
@@ -664,7 +664,7 @@ def lognormal(mean=0.0, sigma=1.0, size=None):
664
664
"""
665
665
666
666
if not use_origin_backend (mean ):
667
- if dpnp .is_cuda_backend ():
667
+ if dpnp .is_cuda_backend (): # pragma: no cover
668
668
raise NotImplementedError (
669
669
"Running on CUDA is currently not supported"
670
670
)
@@ -726,7 +726,7 @@ def multinomial(n, pvals, size=None):
726
726
pvals_sum = sum (pvals )
727
727
pvals_desc = dpnp .get_dpnp_descriptor (dpnp .array (pvals ))
728
728
d = len (pvals )
729
- if dpnp .is_cuda_backend (pvals_desc .get_array ()):
729
+ if dpnp .is_cuda_backend (pvals_desc .get_array ()): # pragma: no cover
730
730
raise NotImplementedError (
731
731
"Running on CUDA is currently not supported"
732
732
)
@@ -780,7 +780,7 @@ def multivariate_normal(mean, cov, size=None, check_valid="warn", tol=1e-8):
780
780
cov_ = dpnp .get_dpnp_descriptor (dpnp .array (cov , dtype = dpnp .float64 ))
781
781
if dpnp .is_cuda_backend (mean_ .get_array ()) or dpnp .is_cuda_backend (
782
782
cov_ .get_array ()
783
- ):
783
+ ): # pragma: no cover
784
784
raise NotImplementedError (
785
785
"Running on CUDA is currently not supported"
786
786
)
@@ -839,7 +839,7 @@ def negative_binomial(n, p, size=None):
839
839
"""
840
840
841
841
if not use_origin_backend (n ):
842
- if dpnp .is_cuda_backend ():
842
+ if dpnp .is_cuda_backend (): # pragma: no cover
843
843
raise NotImplementedError (
844
844
"Running on CUDA is currently not supported"
845
845
)
@@ -929,7 +929,7 @@ def noncentral_chisquare(df, nonc, size=None):
929
929
"""
930
930
931
931
if not use_origin_backend (df ):
932
- if dpnp .is_cuda_backend ():
932
+ if dpnp .is_cuda_backend (): # pragma: no cover
933
933
raise NotImplementedError (
934
934
"Running on CUDA is currently not supported"
935
935
)
@@ -988,7 +988,7 @@ def pareto(a, size=None):
988
988
"""
989
989
990
990
if not use_origin_backend (a ):
991
- if dpnp .is_cuda_backend ():
991
+ if dpnp .is_cuda_backend (): # pragma: no cover
992
992
raise NotImplementedError (
993
993
"Running on CUDA is currently not supported"
994
994
)
@@ -1062,7 +1062,7 @@ def poisson(lam=1.0, size=None):
1062
1062
"""
1063
1063
1064
1064
if not use_origin_backend (lam ):
1065
- if dpnp .is_cuda_backend ():
1065
+ if dpnp .is_cuda_backend (): # pragma: no cover
1066
1066
raise NotImplementedError (
1067
1067
"Running on CUDA is currently not supported"
1068
1068
)
@@ -1102,7 +1102,7 @@ def power(a, size=None):
1102
1102
"""
1103
1103
1104
1104
if not use_origin_backend (a ):
1105
- if dpnp .is_cuda_backend ():
1105
+ if dpnp .is_cuda_backend (): # pragma: no cover
1106
1106
raise NotImplementedError (
1107
1107
"Running on CUDA is currently not supported"
1108
1108
)
@@ -1524,7 +1524,7 @@ def rayleigh(scale=1.0, size=None):
1524
1524
"""
1525
1525
1526
1526
if not use_origin_backend (scale ):
1527
- if dpnp .is_cuda_backend ():
1527
+ if dpnp .is_cuda_backend (): # pragma: no cover
1528
1528
raise NotImplementedError (
1529
1529
"Running on CUDA is currently not supported"
1530
1530
)
@@ -1606,7 +1606,7 @@ def shuffle(x1):
1606
1606
x1_desc = dpnp .get_dpnp_descriptor (x1 , copy_when_strides = False )
1607
1607
if x1_desc :
1608
1608
1609
- if dpnp .is_cuda_backend (x1_desc .get_array ()):
1609
+ if dpnp .is_cuda_backend (x1_desc .get_array ()): # pragma: no cover
1610
1610
raise NotImplementedError (
1611
1611
"Running on CUDA is currently not supported"
1612
1612
)
@@ -1655,7 +1655,7 @@ def seed(seed=None, device=None, sycl_queue=None):
1655
1655
)
1656
1656
1657
1657
if not use_origin_backend (seed ):
1658
- if dpnp .is_cuda_backend ():
1658
+ if dpnp .is_cuda_backend (): # pragma: no cover
1659
1659
raise NotImplementedError (
1660
1660
"Running on CUDA is currently not supported"
1661
1661
)
@@ -1700,7 +1700,7 @@ def standard_cauchy(size=None):
1700
1700
"""
1701
1701
1702
1702
if not use_origin_backend (size ):
1703
- if dpnp .is_cuda_backend ():
1703
+ if dpnp .is_cuda_backend (): # pragma: no cover
1704
1704
raise NotImplementedError (
1705
1705
"Running on CUDA is currently not supported"
1706
1706
)
@@ -1729,7 +1729,7 @@ def standard_exponential(size=None):
1729
1729
"""
1730
1730
1731
1731
if not use_origin_backend (size ):
1732
- if dpnp .is_cuda_backend ():
1732
+ if dpnp .is_cuda_backend (): # pragma: no cover
1733
1733
raise NotImplementedError (
1734
1734
"Running on CUDA is currently not supported"
1735
1735
)
@@ -1761,7 +1761,7 @@ def standard_gamma(shape, size=None):
1761
1761
"""
1762
1762
1763
1763
if not use_origin_backend (shape ):
1764
- if dpnp .is_cuda_backend ():
1764
+ if dpnp .is_cuda_backend (): # pragma: no cover
1765
1765
raise NotImplementedError (
1766
1766
"Running on CUDA is currently not supported"
1767
1767
)
@@ -1844,7 +1844,7 @@ def standard_t(df, size=None):
1844
1844
"""
1845
1845
1846
1846
if not use_origin_backend (df ):
1847
- if dpnp .is_cuda_backend ():
1847
+ if dpnp .is_cuda_backend (): # pragma: no cover
1848
1848
raise NotImplementedError (
1849
1849
"Running on CUDA is currently not supported"
1850
1850
)
@@ -1885,7 +1885,7 @@ def triangular(left, mode, right, size=None):
1885
1885
"""
1886
1886
1887
1887
if not use_origin_backend (left ):
1888
- if dpnp .is_cuda_backend ():
1888
+ if dpnp .is_cuda_backend (): # pragma: no cover
1889
1889
raise NotImplementedError (
1890
1890
"Running on CUDA is currently not supported"
1891
1891
)
@@ -1998,7 +1998,7 @@ def vonmises(mu, kappa, size=None):
1998
1998
"""
1999
1999
2000
2000
if not use_origin_backend (mu ):
2001
- if dpnp .is_cuda_backend ():
2001
+ if dpnp .is_cuda_backend (): # pragma: no cover
2002
2002
raise NotImplementedError (
2003
2003
"Running on CUDA is currently not supported"
2004
2004
)
@@ -2039,7 +2039,7 @@ def wald(mean, scale, size=None):
2039
2039
"""
2040
2040
2041
2041
if not use_origin_backend (mean ):
2042
- if dpnp .is_cuda_backend ():
2042
+ if dpnp .is_cuda_backend (): # pragma: no cover
2043
2043
raise NotImplementedError (
2044
2044
"Running on CUDA is currently not supported"
2045
2045
)
@@ -2080,7 +2080,7 @@ def weibull(a, size=None):
2080
2080
"""
2081
2081
2082
2082
if not use_origin_backend (a ):
2083
- if dpnp .is_cuda_backend ():
2083
+ if dpnp .is_cuda_backend (): # pragma: no cover
2084
2084
raise NotImplementedError (
2085
2085
"Running on CUDA is currently not supported"
2086
2086
)
@@ -2117,7 +2117,7 @@ def zipf(a, size=None):
2117
2117
"""
2118
2118
2119
2119
if not use_origin_backend (a ):
2120
- if dpnp .is_cuda_backend ():
2120
+ if dpnp .is_cuda_backend (): # pragma: no cover
2121
2121
raise NotImplementedError (
2122
2122
"Running on CUDA is currently not supported"
2123
2123
)
0 commit comments