Skip to content

Commit 4809882

Browse files
committed
[MLIR][Linalg] Remove/update failing obsolete OpDSL test for linalg.matmul.
The earlier PR(llvm#104783) was reverted due to two failing OpDSL test for linalg.matmul. Since linalg.matmul is now defined using TableGen ODS instead of Python-based OpDSL, these test started failing and needs to be removed/updated. This commit removes/updates the failing obsolete tests from below tests. "mlir/test/python/integration/dialects/linalg/opsrun.py" "mlir/test/python/integration/dialects/transform.py"
1 parent 4bfe1e3 commit 4809882

File tree

2 files changed

+14
-127
lines changed

2 files changed

+14
-127
lines changed

mlir/test/python/integration/dialects/linalg/opsrun.py

Lines changed: 0 additions & 115 deletions
Original file line numberDiff line numberDiff line change
@@ -50,37 +50,6 @@ def log(*args):
5050
}
5151
"""
5252

53-
matmul_boiler = """
54-
func.func @main() -> f32 attributes {llvm.emit_c_interface} {
55-
%v0 = arith.constant 0.0 : f32
56-
%v1 = arith.constant -1 : i8
57-
%v2 = arith.constant 2.0 : f32
58-
59-
%A = memref.alloc() : memref<4x16xi8>
60-
%B = memref.alloc() : memref<16x8xf32>
61-
%C0 = memref.alloc() : memref<4x8xf32>
62-
%C1 = memref.alloc() : memref<4x8xf32>
63-
linalg.fill ins(%v1 : i8) outs(%A : memref<4x16xi8>)
64-
linalg.fill ins(%v2 : f32) outs(%B : memref<16x8xf32>)
65-
linalg.fill ins(%v0 : f32) outs(%C0 : memref<4x8xf32>)
66-
linalg.fill ins(%v0 : f32) outs(%C1 : memref<4x8xf32>)
67-
68-
call @matmul_signed_on_buffers(%A, %B, %C0) :
69-
(memref<4x16xi8>, memref<16x8xf32>, memref<4x8xf32>) -> ()
70-
call @matmul_unsigned_on_buffers(%A, %B, %C1) :
71-
(memref<4x16xi8>, memref<16x8xf32>, memref<4x8xf32>) -> ()
72-
73-
%c0 = arith.constant 0 : index
74-
%res0 = memref.load %C0[%c0, %c0] : memref<4x8xf32>
75-
%res1 = memref.load %C1[%c0, %c0] : memref<4x8xf32>
76-
77-
%0 = arith.addf %res0, %res1 : f32
78-
79-
// TODO: FFI-based solution to allow testing and printing with python code.
80-
return %0 : f32
81-
}
82-
"""
83-
8453
fill_boiler = """
8554
func.func @main() -> i32 attributes {llvm.emit_c_interface} {
8655
%O0 = memref.alloc() : memref<i32>
@@ -296,90 +265,6 @@ def elemwise_log_mul_on_buffers(lhs, rhs, out):
296265
test_elemwise_generic()
297266

298267

299-
def test_matmul_builtin():
300-
with Context() as ctx, Location.unknown():
301-
module = Module.create()
302-
f32 = F32Type.get()
303-
i8 = IntegerType.get_signless(8)
304-
with InsertionPoint(module.body):
305-
306-
@func.FuncOp.from_py_func(
307-
MemRefType.get((4, 16), i8),
308-
MemRefType.get((16, 8), f32),
309-
MemRefType.get((4, 8), f32),
310-
)
311-
def matmul_signed_on_buffers(lhs, rhs, out):
312-
linalg.matmul(lhs, rhs, outs=[out])
313-
314-
@func.FuncOp.from_py_func(
315-
MemRefType.get((4, 16), i8),
316-
MemRefType.get((16, 8), f32),
317-
MemRefType.get((4, 8), f32),
318-
)
319-
def matmul_unsigned_on_buffers(lhs, rhs, out):
320-
linalg.matmul(lhs, rhs, outs=[out], cast=TypeFn.cast_unsigned)
321-
322-
execution_engine = ExecutionEngine(transform(module, matmul_boiler))
323-
324-
# TODO: FFI-based solution to allow testing and printing with python code.
325-
# Prepare arguments: one result f32.
326-
# Arguments must be passed as pointers.
327-
c_float_p = ctypes.c_float * 1
328-
res = c_float_p(-1.0)
329-
execution_engine.invoke("main", res)
330-
331-
log("RESULT: ", res[0])
332-
# matmul_signed_on_buffers: -1 * 2.0 * 16 = -32
333-
# matmul_unsigned_on_buffers: (2^8-1) * 2.0 * 16 = 8160
334-
# CHECK: RESULT: 8128
335-
336-
337-
test_matmul_builtin()
338-
339-
340-
def test_matmul_generic():
341-
with Context() as ctx, Location.unknown():
342-
module = Module.create()
343-
f32 = F32Type.get()
344-
i8 = IntegerType.get_signless(8)
345-
with InsertionPoint(module.body):
346-
347-
@func.FuncOp.from_py_func(
348-
MemRefType.get((4, 16), i8),
349-
MemRefType.get((16, 8), f32),
350-
MemRefType.get((4, 8), f32),
351-
)
352-
def matmul_signed_on_buffers(lhs, rhs, out):
353-
linalg.matmul(lhs, rhs, outs=[out], emit_generic=True)
354-
355-
@func.FuncOp.from_py_func(
356-
MemRefType.get((4, 16), i8),
357-
MemRefType.get((16, 8), f32),
358-
MemRefType.get((4, 8), f32),
359-
)
360-
def matmul_unsigned_on_buffers(lhs, rhs, out):
361-
linalg.matmul(
362-
lhs, rhs, outs=[out], cast=TypeFn.cast_unsigned, emit_generic=True
363-
)
364-
365-
execution_engine = ExecutionEngine(transform(module, matmul_boiler))
366-
367-
# TODO: FFI-based solution to allow testing and printing with python code.
368-
# Prepare arguments: one result f32.
369-
# Arguments must be passed as pointers.
370-
c_float_p = ctypes.c_float * 1
371-
res = c_float_p(-1.0)
372-
execution_engine.invoke("main", res)
373-
374-
log("RESULT: ", res[0])
375-
# matmul_signed_on_buffers = -1 * 2.0 * 16 = -32
376-
# matmul_unsigned_on_buffers = (2^8-1) * 2.0 * 16 = 8160
377-
# CHECK: RESULT: 8128
378-
379-
380-
test_matmul_generic()
381-
382-
383268
def test_fill_builtin():
384269
with Context() as ctx, Location.unknown():
385270
module = Module.create()

mlir/test/python/integration/dialects/transform.py

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -99,26 +99,28 @@ def basic(target: any_op_t()):
9999
# CHECK-LABEL: TEST: test_apply_patterns
100100
@construct_and_print_in_module
101101
def test_apply_patterns(module_):
102-
M, N, K = 3, 5, 3
102+
b, M, N, K = 1, 3, 5, 3
103103

104-
# CHECK-LABEL: func.func @matmul(
105-
# CHECK-SAME: %[[VAL_0:.*]]: tensor<3x5xf32>, %[[VAL_1:.*]]: tensor<5x3xf32>, %[[VAL_2:.*]]: tensor<3x3xf32>) -> tensor<3x3xf32> {
104+
# CHECK-LABEL: func.func @batch_reduce_matmul(
105+
# CHECK-SAME: %[[VAL_0:.*]]: tensor<1x3x5xf32>,
106+
# CHECK-SAME: %[[VAL_1:.*]]: tensor<1x5x3xf32>,
107+
# CHECK-SAME: %[[VAL_2:.*]]: tensor<3x3xf32>) -> tensor<3x3xf32> {
106108
# CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32
107109
# CHECK: %[[VAL_4:.*]] = arith.addi %[[VAL_3]], %[[VAL_3]] : i32
108-
# CHECK: %[[VAL_5:.*]] = linalg.matmul {cast = #linalg.type_fn<cast_signed>} ins(%[[VAL_0]], %[[VAL_1]] : tensor<3x5xf32>, tensor<5x3xf32>) outs(%[[VAL_2]] : tensor<3x3xf32>) -> tensor<3x3xf32>
110+
# CHECK: %[[VAL_5:.*]] = linalg.batch_reduce_matmul ins(%[[VAL_0]], %[[VAL_1]] : tensor<1x3x5xf32>, tensor<1x5x3xf32>) outs(%[[VAL_2]] : tensor<3x3xf32>) -> tensor<3x3xf32>
109111
# CHECK: return %[[VAL_5]] : tensor<3x3xf32>
110112
# CHECK: }
111113
@func.func(
112-
T.tensor(M, N, T.f32()), T.tensor(N, K, T.f32()), T.tensor(M, K, T.f32())
114+
T.tensor(b, M, N, T.f32()), T.tensor(b, N, K, T.f32()), T.tensor(M, K, T.f32())
113115
)
114-
def matmul(A, B, C):
116+
def batch_reduce_matmul(A, B, C):
115117
i = arith.constant(T.i32(), 1)
116118
v = arith.addi(i, i)
117-
return linalg.matmul(A, B, outs=[C])
119+
return linalg.batch_reduce_matmul(A, B, outs=[C])
118120

119121
# CHECK-LABEL: module attributes {transform.with_named_sequence} {
120122
# CHECK: transform.named_sequence @__transform_main(%[[VAL_0:.*]]: !transform.any_op) {
121-
# CHECK: %[[VAL_1:.*]] = transform.structured.match ops{["linalg.matmul"]} in %[[VAL_0]] : (!transform.any_op) -> !transform.any_op
123+
# CHECK: %[[VAL_1:.*]] = transform.structured.match ops{["linalg.batch_reduce_matmul"]} in %[[VAL_0]] : (!transform.any_op) -> !transform.any_op
122124
# CHECK: %[[VAL_2:.*]] = transform.get_parent_op %[[VAL_1]] {op_name = "func.func"} : (!transform.any_op) -> !pdl.operation
123125
# CHECK: transform.apply_patterns to %[[VAL_2]] {
124126
# CHECK: transform.apply_patterns.canonicalization
@@ -132,7 +134,7 @@ def matmul(A, B, C):
132134
def mod():
133135
@named_sequence("__transform_main", [any_op_t()], [])
134136
def basic(variant_op: any_op_t()):
135-
matmul = structured_match(any_op_t(), variant_op, ops=["linalg.matmul"])
137+
matmul = structured_match(any_op_t(), variant_op, ops=["linalg.batch_reduce_matmul"])
136138
top_func = get_parent_op(pdl.op_t(), matmul, op_name="func.func")
137139

138140
@apply_patterns(top_func)
@@ -147,9 +149,9 @@ def pats():
147149
pm = PassManager.parse("builtin.module(transform-interpreter)")
148150
pm.run(module_.operation)
149151

150-
# CHECK-LABEL: func.func @matmul(
151-
# CHECK-SAME: %[[VAL_0:.*]]: tensor<3x5xf32>, %[[VAL_1:.*]]: tensor<5x3xf32>, %[[VAL_2:.*]]: tensor<3x3xf32>) -> tensor<3x3xf32> {
152-
# CHECK: %[[VAL_3:.*]] = linalg.matmul {cast = #linalg.type_fn<cast_signed>} ins(%[[VAL_0]], %[[VAL_1]] : tensor<3x5xf32>, tensor<5x3xf32>) outs(%[[VAL_2]] : tensor<3x3xf32>) -> tensor<3x3xf32>
152+
# CHECK-LABEL: func.func @batch_reduce_matmul(
153+
# CHECK-SAME: %[[VAL_0:.*]]: tensor<1x3x5xf32>, %[[VAL_1:.*]]: tensor<1x5x3xf32>, %[[VAL_2:.*]]: tensor<3x3xf32>) -> tensor<3x3xf32> {
154+
# CHECK: %[[VAL_3:.*]] = linalg.batch_reduce_matmul ins(%[[VAL_0]], %[[VAL_1]] : tensor<1x3x5xf32>, tensor<1x5x3xf32>) outs(%[[VAL_2]] : tensor<3x3xf32>) -> tensor<3x3xf32>
153155
# CHECK: return %[[VAL_3]] : tensor<3x3xf32>
154156
# CHECK: }
155157
print(module_)

0 commit comments

Comments
 (0)