Skip to content

Commit 88981b5

Browse files
authored
Add black to pre-commit config (#1449)
* Added black to pre-commit config * Replaced numpy.Inf with dpnp.Inf in linalg tests * correct naming to OneMKL * rename numpy in test to pass validation tests
1 parent b0cd5f8 commit 88981b5

File tree

145 files changed

+11486
-7991
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

145 files changed

+11486
-7991
lines changed

.pre-commit-config.yaml

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,12 @@ repos:
1919
- id: check-toml
2020
- id: debug-statements
2121
- id: destroyed-symlinks
22-
- id: double-quote-string-fixer
2322
- id: end-of-file-fixer
2423
- id: fix-byte-order-marker
2524
- id: mixed-line-ending
2625
- id: trailing-whitespace
26+
- repo: https://github.com/psf/black
27+
rev: 23.3.0
28+
hooks:
29+
- id: black
30+
args: ["--check", "--diff", "--color"]

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
12
[![Pre-commit](https://github.com/IntelPython/dpnp/actions/workflows/pre-commit.yml/badge.svg?branch=master&event=push)](https://github.com/IntelPython/dpnp/actions/workflows/pre-commit.yml)
23
[![Conda package](https://github.com/IntelPython/dpnp/actions/workflows/conda-package.yml/badge.svg?branch=master&event=push)](https://github.com/IntelPython/dpnp/actions/workflows/conda-package.yml)
34
[![Coverage Status](https://coveralls.io/repos/github/IntelPython/dpnp/badge.svg?branch=master)](https://coveralls.io/github/IntelPython/dpnp?branch=master)

benchmarks/benchmarks/bench_elementwise.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,13 @@
88
# but looks like first execution has additional overheads
99
# (need to be investigated)
1010
class Elementwise(Benchmark):
11-
executors = {'dpnp': dpnp, 'numpy': numpy}
12-
params = [['dpnp', 'numpy'],
13-
[2**16, 2**20, 2**24],
14-
['float64', 'float32', 'int64', 'int32']
15-
]
16-
param_names = ['executor', 'size', 'dtype']
11+
executors = {"dpnp": dpnp, "numpy": numpy}
12+
params = [
13+
["dpnp", "numpy"],
14+
[2**16, 2**20, 2**24],
15+
["float64", "float32", "int64", "int32"],
16+
]
17+
param_names = ["executor", "size", "dtype"]
1718

1819
def setup(self, executor, size, dtype):
1920
self.np = self.executors[executor]

benchmarks/benchmarks/bench_linalg.py

Lines changed: 43 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,12 @@
55

66

77
class Eindot(Benchmark):
8-
params = [[dpnp, numpy],
9-
[16, 32, 64, 128, 256, 512, 1024],
10-
['float64', 'float32', 'int64', 'int32']]
11-
param_names = ['executor', 'size', 'dtype']
8+
params = [
9+
[dpnp, numpy],
10+
[16, 32, 64, 128, 256, 512, 1024],
11+
["float64", "float32", "int64", "int32"],
12+
]
13+
param_names = ["executor", "size", "dtype"]
1214

1315
def setup(self, np, size, dtype):
1416
dt = getattr(np, dtype)
@@ -45,13 +47,13 @@ def time_dot_trans_atc_a(self, np, *args):
4547
np.dot(self.atc, self.a)
4648

4749
def time_einsum_i_ij_j(self, np, *args):
48-
np.einsum('i,ij,j', self.d, self.b, self.c)
50+
np.einsum("i,ij,j", self.d, self.b, self.c)
4951

5052
def time_einsum_ij_jk_a_b(self, np, *args):
51-
np.einsum('ij,jk', self.a, self.b)
53+
np.einsum("ij,jk", self.a, self.b)
5254

5355
def time_einsum_ijk_jil_kl(self, np, *args):
54-
np.einsum('ijk,jil->kl', self.a3, self.b3)
56+
np.einsum("ijk,jil->kl", self.a3, self.b3)
5557

5658
def time_inner_trans_a_a(self, np, *args):
5759
np.inner(self.a, self.a)
@@ -82,20 +84,19 @@ def time_tensordot_a_b_axes_1_0_0_1(self, np, *args):
8284

8385

8486
class Linalg(Benchmark):
85-
params = [[dpnp, numpy],
86-
['svd', 'pinv', 'det', 'norm'],
87-
TYPES1]
88-
param_names = ['executor', 'op', 'type']
87+
params = [[dpnp, numpy], ["svd", "pinv", "det", "norm"], TYPES1]
88+
param_names = ["executor", "op", "type"]
8989

9090
def setup(self, np, op, typename):
91-
np.seterr(all='ignore')
91+
np.seterr(all="ignore")
9292

9393
self.func = getattr(np.linalg, op)
9494

95-
if op == 'cholesky':
95+
if op == "cholesky":
9696
# we need a positive definite
97-
self.a = np.dot(get_squares_()[typename],
98-
get_squares_()[typename].T)
97+
self.a = np.dot(
98+
get_squares_()[typename], get_squares_()[typename].T
99+
)
99100
else:
100101
self.a = get_squares_()[typename]
101102

@@ -111,37 +112,38 @@ def time_op(self, np, op, typename):
111112

112113
class Lstsq(Benchmark):
113114
params = [dpnp, numpy]
114-
param_names = ['executor']
115+
param_names = ["executor"]
115116

116117
def setup(self, np):
117-
self.a = get_squares_()['float64']
118+
self.a = get_squares_()["float64"]
118119
self.b = get_indexes_rand()[:100].astype(np.float64)
119120

120121
def time_numpy_linalg_lstsq_a__b_float64(self, np):
121122
np.linalg.lstsq(self.a, self.b, rcond=-1)
122123

124+
123125
# class Einsum(Benchmark):
124-
# param_names = ['dtype']
125-
# params = [[np.float64]]
126-
# def setup(self, dtype):
127-
# self.a = np.arange(2900, dtype=dtype)
128-
# self.b = np.arange(3000, dtype=dtype)
129-
# self.c = np.arange(24000, dtype=dtype).reshape(20, 30, 40)
130-
# self.c1 = np.arange(1200, dtype=dtype).reshape(30, 40)
131-
# self.d = np.arange(10000, dtype=dtype).reshape(10,100,10)
132-
133-
# #outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two
134-
# def time_einsum_outer(self, dtype):
135-
# np.einsum("i,j", self.a, self.b, optimize=True)
136-
137-
# # multiply(a, b):trigger sum_of_products_contig_two
138-
# def time_einsum_multiply(self, dtype):
139-
# np.einsum("..., ...", self.c1, self.c , optimize=True)
140-
141-
# # sum and multiply:trigger sum_of_products_contig_stride0_outstride0_two
142-
# def time_einsum_sum_mul(self, dtype):
143-
# np.einsum(",i...->", 300, self.d, optimize=True)
144-
145-
# # sum and multiply:trigger sum_of_products_stride0_contig_outstride0_two
146-
# def time_einsum_sum_mul2(self, dtype):
147-
# np.einsum("i...,->", self.d, 300, optimize=True)
126+
# param_names = ['dtype']
127+
# params = [[np.float64]]
128+
# def setup(self, dtype):
129+
# self.a = np.arange(2900, dtype=dtype)
130+
# self.b = np.arange(3000, dtype=dtype)
131+
# self.c = np.arange(24000, dtype=dtype).reshape(20, 30, 40)
132+
# self.c1 = np.arange(1200, dtype=dtype).reshape(30, 40)
133+
# self.d = np.arange(10000, dtype=dtype).reshape(10,100,10)
134+
135+
# #outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two
136+
# def time_einsum_outer(self, dtype):
137+
# np.einsum("i,j", self.a, self.b, optimize=True)
138+
139+
# # multiply(a, b):trigger sum_of_products_contig_two
140+
# def time_einsum_multiply(self, dtype):
141+
# np.einsum("..., ...", self.c1, self.c , optimize=True)
142+
143+
# # sum and multiply:trigger sum_of_products_contig_stride0_outstride0_two
144+
# def time_einsum_sum_mul(self, dtype):
145+
# np.einsum(",i...->", 300, self.d, optimize=True)
146+
147+
# # sum and multiply:trigger sum_of_products_stride0_contig_outstride0_two
148+
# def time_einsum_sum_mul2(self, dtype):
149+
# np.einsum("i...,->", self.d, 300, optimize=True)

benchmarks/benchmarks/bench_random.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,9 @@
55

66
# asv run --python=python --quick --bench Sample
77
class Sample(Benchmark):
8-
executors = {'dpnp': dpnp, 'numpy': numpy}
9-
params = [['dpnp', 'numpy'], [2**16, 2**20, 2**24]]
10-
param_names = ['executor', 'size']
8+
executors = {"dpnp": dpnp, "numpy": numpy}
9+
params = [["dpnp", "numpy"], [2**16, 2**20, 2**24]]
10+
param_names = ["executor", "size"]
1111

1212
def setup(self, executor, size):
1313
self.executor = self.executors[executor]

benchmarks/benchmarks/common.py

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,18 @@
1616

1717
# a set of interesting types to test
1818
TYPES1 = [
19-
'int16', 'float16',
20-
'int32', 'float32',
21-
'int64', 'float64', 'complex64',
22-
'longfloat', 'complex128',
19+
"int16",
20+
"float16",
21+
"int32",
22+
"float32",
23+
"int64",
24+
"float64",
25+
"complex64",
26+
"longfloat",
27+
"complex128",
2328
]
24-
if 'complex256' in numpy.typeDict:
25-
TYPES1.append('complex256')
29+
if "complex256" in numpy.typeDict:
30+
TYPES1.append("complex256")
2631

2732

2833
def memoize(func):
@@ -32,13 +37,15 @@ def wrapper():
3237
if not result:
3338
result.append(func())
3439
return result[0]
40+
3541
return wrapper
3642

3743

3844
# values which will be used to construct our sample data matrices
3945
# replicate 10 times to speed up initial imports of this helper
4046
# and generate some redundancy
4147

48+
4249
@memoize
4350
def get_values():
4451
rnd = numpy.random.RandomState(1)
@@ -49,14 +56,15 @@ def get_values():
4956
@memoize
5057
def get_squares():
5158
values = get_values()
52-
squares = {t: numpy.array(values,
53-
dtype=getattr(numpy, t)).reshape((nx, ny))
54-
for t in TYPES1}
59+
squares = {
60+
t: numpy.array(values, dtype=getattr(numpy, t)).reshape((nx, ny))
61+
for t in TYPES1
62+
}
5563

5664
# adjust complex ones to have non-degenerated imagery part -- use
5765
# original data transposed for that
5866
for t, v in squares.items():
59-
if t.startswith('complex'):
67+
if t.startswith("complex"):
6068
v += v.T * 1j
6169
return squares
6270

@@ -90,8 +98,8 @@ def get_indexes():
9098
def get_indexes_rand():
9199
rnd = random.Random(1)
92100

93-
indexes_rand = get_indexes().tolist() # copy
94-
rnd.shuffle(indexes_rand) # in-place shuffle
101+
indexes_rand = get_indexes().tolist() # copy
102+
rnd.shuffle(indexes_rand) # in-place shuffle
95103
indexes_rand = numpy.array(indexes_rand)
96104
return indexes_rand
97105

benchmarks/pytest_benchmark/test_random.py

Lines changed: 66 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
# cython: language_level=3
32
# -*- coding: utf-8 -*-
43
# *****************************************************************************
@@ -37,36 +36,82 @@
3736
NNUMBERS = 2**26
3837

3938

40-
@pytest.mark.parametrize('function', [dpnp.random.beta, np.random.beta],
41-
ids=['dpnp', 'numpy'])
39+
@pytest.mark.parametrize(
40+
"function", [dpnp.random.beta, np.random.beta], ids=["dpnp", "numpy"]
41+
)
4242
def test_beta(benchmark, function):
43-
result = benchmark.pedantic(target=function, args=(4.0, 5.0, NNUMBERS,),
44-
rounds=ROUNDS, iterations=ITERATIONS)
43+
result = benchmark.pedantic(
44+
target=function,
45+
args=(
46+
4.0,
47+
5.0,
48+
NNUMBERS,
49+
),
50+
rounds=ROUNDS,
51+
iterations=ITERATIONS,
52+
)
4553

4654

47-
@pytest.mark.parametrize('function', [dpnp.random.exponential, np.random.exponential],
48-
ids=['dpnp', 'numpy'])
55+
@pytest.mark.parametrize(
56+
"function",
57+
[dpnp.random.exponential, np.random.exponential],
58+
ids=["dpnp", "numpy"],
59+
)
4960
def test_exponential(benchmark, function):
50-
result = benchmark.pedantic(target=function, args=(4.0, NNUMBERS,),
51-
rounds=ROUNDS, iterations=ITERATIONS)
61+
result = benchmark.pedantic(
62+
target=function,
63+
args=(
64+
4.0,
65+
NNUMBERS,
66+
),
67+
rounds=ROUNDS,
68+
iterations=ITERATIONS,
69+
)
5270

5371

54-
@pytest.mark.parametrize('function', [dpnp.random.gamma, np.random.gamma],
55-
ids=['dpnp', 'numpy'])
72+
@pytest.mark.parametrize(
73+
"function", [dpnp.random.gamma, np.random.gamma], ids=["dpnp", "numpy"]
74+
)
5675
def test_gamma(benchmark, function):
57-
result = benchmark.pedantic(target=function, args=(2.0, 4.0, NNUMBERS,),
58-
rounds=ROUNDS, iterations=ITERATIONS)
76+
result = benchmark.pedantic(
77+
target=function,
78+
args=(
79+
2.0,
80+
4.0,
81+
NNUMBERS,
82+
),
83+
rounds=ROUNDS,
84+
iterations=ITERATIONS,
85+
)
5986

6087

61-
@pytest.mark.parametrize('function', [dpnp.random.normal, np.random.normal],
62-
ids=['dpnp', 'numpy'])
88+
@pytest.mark.parametrize(
89+
"function", [dpnp.random.normal, np.random.normal], ids=["dpnp", "numpy"]
90+
)
6391
def test_normal(benchmark, function):
64-
result = benchmark.pedantic(target=function, args=(0.0, 1.0, NNUMBERS,),
65-
rounds=ROUNDS, iterations=ITERATIONS)
92+
result = benchmark.pedantic(
93+
target=function,
94+
args=(
95+
0.0,
96+
1.0,
97+
NNUMBERS,
98+
),
99+
rounds=ROUNDS,
100+
iterations=ITERATIONS,
101+
)
66102

67103

68-
@pytest.mark.parametrize('function', [dpnp.random.uniform, np.random.uniform],
69-
ids=['dpnp', 'numpy'])
104+
@pytest.mark.parametrize(
105+
"function", [dpnp.random.uniform, np.random.uniform], ids=["dpnp", "numpy"]
106+
)
70107
def test_uniform(benchmark, function):
71-
result = benchmark.pedantic(target=function, args=(0.0, 1.0, NNUMBERS,),
72-
rounds=ROUNDS, iterations=ITERATIONS)
108+
result = benchmark.pedantic(
109+
target=function,
110+
args=(
111+
0.0,
112+
1.0,
113+
NNUMBERS,
114+
),
115+
rounds=ROUNDS,
116+
iterations=ITERATIONS,
117+
)

0 commit comments

Comments
 (0)