Skip to content

Commit aa3e348

Browse files
committed
fix the spelling mistake
1 parent 623e90f commit aa3e348

File tree

1 file changed

+8
-8
lines changed
  • mlir/test/Integration/GPU/CUDA/sm90/python

1 file changed

+8
-8
lines changed

mlir/test/Integration/GPU/CUDA/sm90/python/matmul.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -95,12 +95,12 @@ def generate_matmul(
9595
BLOCK_M=128,
9696
BLOCK_N=128,
9797
BLOCK_K=64,
98-
use_warp_specilization=True,
98+
use_warp_specialization=True,
9999
saveIR=False,
100100
max_num_stages=3,
101101
):
102102
with matmulBuilder.ir.Context() as ctx, matmulBuilder.ir.Location.unknown():
103-
if use_warp_specilization:
103+
if use_warp_specialization:
104104
mlir_nvgpu_module = matmulBuilder.generate_matmul_ws(
105105
input_type,
106106
output_type,
@@ -161,7 +161,7 @@ def matmul(
161161
BLOCK_M=128,
162162
BLOCK_N=128,
163163
BLOCK_K=64,
164-
use_warp_specilization=True,
164+
use_warp_specialization=True,
165165
saveIR=False,
166166
max_num_stages=3,
167167
print_results=False,
@@ -170,7 +170,7 @@ def matmul(
170170
# Print the configuration
171171
ity = "f16" if input_type == np.float16 else "f32"
172172
oty = "f16" if output_type == np.float16 else "f32"
173-
gemmty = "Warp Specilization" if use_warp_specilization else "Multistage"
173+
gemmty = "Warp specialization" if use_warp_specialization else "Multistage"
174174
print(
175175
"===-- Running GEMM "
176176
+ gemmty
@@ -207,7 +207,7 @@ def matmul(
207207
BLOCK_M,
208208
BLOCK_N,
209209
BLOCK_K,
210-
use_warp_specilization,
210+
use_warp_specialization,
211211
saveIR,
212212
max_num_stages,
213213
)
@@ -221,7 +221,7 @@ def matmul(
221221
mem_c = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(c)))
222222
kernelName = (
223223
"mlir_matmul_warpspecialized"
224-
if use_warp_specilization
224+
if use_warp_specialization
225225
else "mlir_matmul_multistage"
226226
)
227227

@@ -252,7 +252,7 @@ def matmul(
252252
128,
253253
4096,
254254
max_num_stages=3,
255-
use_warp_specilization=False,
255+
use_warp_specialization=False,
256256
)
257257
# GEMM Warp Specilized f32 += f16 * f16
258258
matmul(
@@ -262,5 +262,5 @@ def matmul(
262262
1024,
263263
512,
264264
max_num_stages=3,
265-
use_warp_specilization=True,
265+
use_warp_specialization=True,
266266
)

0 commit comments

Comments
 (0)