Skip to content

Commit 52491c9

Browse files
authored
[MLIR][LLVM] Remove typed pointer remnants from integration tests (#71208)
This commit removes all LLVM dialect typed pointers from the integration tests. Typed pointers have been deprecated for a while now and it's planned to soon remove them from the LLVM dialect. Related PSA: https://discourse.llvm.org/t/psa-removal-of-typed-pointers-from-the-llvm-dialect/74502
1 parent a682a9c commit 52491c9

21 files changed

+28
-29
lines changed

mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false
2626
// R_UN: %{compile} | env %{env} %{run} | FileCheck %s
2727

28-
!Filename = !llvm.ptr<i8>
28+
!Filename = !llvm.ptr
2929

3030
#BSR = #sparse_tensor.encoding<{
3131
map = (i, j) ->

mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
// Do the same run, but now with direct IR generation and VLA vectorization.
3232
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
3333

34-
!Filename = !llvm.ptr<i8>
34+
!Filename = !llvm.ptr
3535

3636
#DenseMatrix = #sparse_tensor.encoding<{
3737
map = (d0, d1) -> (d0 : dense, d1 : dense)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false
2626
// R_UN: %{compile} | env %{env} %{run} | FileCheck %s
2727

28-
!Filename = !llvm.ptr<i8>
28+
!Filename = !llvm.ptr
2929

3030
#CSR = #sparse_tensor.encoding<{
3131
map = (i, j) -> ( i : dense, j : compressed)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
// Do the same run, but now with direct IR generation and VLA vectorization.
3232
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
3333

34-
!Filename = !llvm.ptr<i8>
34+
!Filename = !llvm.ptr
3535

3636
#SparseTensor = #sparse_tensor.encoding<{
3737
// Note that any dimToLvl permutation should give the same results

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
// vectorization.
4141
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
4242

43-
!Filename = !llvm.ptr<i8>
43+
!Filename = !llvm.ptr
4444

4545
#SparseMatrix = #sparse_tensor.encoding<{
4646
map = (d0, d1) -> (d0 : dense, d1 : compressed),

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
// vectorization.
3333
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
3434

35-
!Filename = !llvm.ptr<i8>
35+
!Filename = !llvm.ptr
3636

3737
#SparseTensor = #sparse_tensor.encoding<{
3838
map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
// vectorization.
2929
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
3030

31-
!Filename = !llvm.ptr<i8>
31+
!Filename = !llvm.ptr
3232

3333
#DCSR = #sparse_tensor.encoding<{
3434
map = (d0, d1) -> (d0 : compressed, d1 : compressed)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
// vectorization.
3333
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
3434

35-
!Filename = !llvm.ptr<i8>
35+
!Filename = !llvm.ptr
3636

3737
#SparseMatrix = #sparse_tensor.encoding<{
3838
map = (d0, d1) -> (d0 : compressed, d1 : compressed),

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
// Do the same run, but now with VLA vectorization.
3333
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
3434

35-
!Filename = !llvm.ptr<i8>
35+
!Filename = !llvm.ptr
3636

3737
#SortedCOO = #sparse_tensor.encoding<{
3838
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
// Do the same run, but now with VLA vectorization.
3232
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
3333

34-
!Filename = !llvm.ptr<i8>
34+
!Filename = !llvm.ptr
3535

3636
#SparseMatrix = #sparse_tensor.encoding<{
3737
map = (d0, d1) -> (d0 : dense, d1 : compressed)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
// TODO: The test currently only operates on the triangular part of the
3535
// symmetric matrix.
3636

37-
!Filename = !llvm.ptr<i8>
37+
!Filename = !llvm.ptr
3838

3939
#SparseMatrix = #sparse_tensor.encoding<{
4040
map = (d0, d1) -> (d0 : compressed, d1 : compressed)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131

3232
// UNSUPPORTED: target=aarch64{{.*}}
3333

34-
!Filename = !llvm.ptr<i8>
34+
!Filename = !llvm.ptr
3535

3636
#SparseMatrix = #sparse_tensor.encoding<{
3737
map = (d0, d1) -> (d0 : compressed, d1 : compressed)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
// TODO: The test currently only operates on the triangular part of the
3535
// symmetric matrix.
3636

37-
!Filename = !llvm.ptr<i8>
37+
!Filename = !llvm.ptr
3838

3939
#SparseMatrix = #sparse_tensor.encoding<{
4040
map = (d0, d1) -> (d0 : compressed, d1 : compressed)

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
// Do the same run, but now with VLA vectorization.
3131
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
3232

33-
!Filename = !llvm.ptr<i8>
33+
!Filename = !llvm.ptr
3434

3535
#SparseMatrix = #sparse_tensor.encoding<{
3636
map = (d0, d1) -> (d0 : compressed, d1 : compressed)

mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
// RUNNOT: %{compile} enable-runtime-library=false gpu-data-transfer-strategy=zero-copy" | %{run}
2727
//
2828

29-
!Filename = !llvm.ptr<i8>
29+
!Filename = !llvm.ptr
3030

3131
#CSR = #sparse_tensor.encoding<{
3232
map = (d0, d1) -> (d0 : dense, d1 : compressed)

mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
// R_UN: %{compile} enable-runtime-library=false" | %{run}
2222
//
2323

24-
!Filename = !llvm.ptr<i8>
24+
!Filename = !llvm.ptr
2525

2626
#CSR = #sparse_tensor.encoding<{
2727
map = (d0, d1) -> (d0 : dense, d1 : compressed)

mlir/test/Integration/Dialect/SparseTensor/python/test_output.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,11 @@
1919
def boilerplate(attr: st.EncodingAttr):
2020
"""Returns boilerplate main method."""
2121
return f"""
22-
func.func @main(%p : !llvm.ptr<i8>) -> () attributes {{ llvm.emit_c_interface }} {{
22+
func.func @main(%p : !llvm.ptr) -> () attributes {{ llvm.emit_c_interface }} {{
2323
%d = arith.constant sparse<[[0, 0], [1, 1], [0, 9], [9, 0], [4, 4]],
2424
[1.0, 2.0, 3.0, 4.0, 5.0]> : tensor<10x10xf64>
2525
%a = sparse_tensor.convert %d : tensor<10x10xf64> to tensor<10x10xf64, {attr}>
26-
sparse_tensor.out %a, %p : tensor<10x10xf64, {attr}>, !llvm.ptr<i8>
26+
sparse_tensor.out %a, %p : tensor<10x10xf64, {attr}>, !llvm.ptr
2727
return
2828
}}
2929
"""

mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,10 @@ func.func @vector_copy_i128(%src: memref<?x?xi128>, %dst: memref<?x?xi128>) {
3737
}
3838

3939
func.func @test_load_store_zaq0() {
40-
%init_a_str = llvm.mlir.addressof @init_tile_a : !llvm.ptr<array<17 x i8>>
41-
%init_b_str = llvm.mlir.addressof @init_tile_b : !llvm.ptr<array<17 x i8>>
42-
%final_a_str = llvm.mlir.addressof @final_tile_a : !llvm.ptr<array<17 x i8>>
43-
%final_b_str = llvm.mlir.addressof @final_tile_b : !llvm.ptr<array<17 x i8>>
40+
%init_a_str = llvm.mlir.addressof @init_tile_a : !llvm.ptr
41+
%init_b_str = llvm.mlir.addressof @init_tile_b : !llvm.ptr
42+
%final_a_str = llvm.mlir.addressof @final_tile_a : !llvm.ptr
43+
%final_b_str = llvm.mlir.addressof @final_tile_b : !llvm.ptr
4444

4545
%c0 = arith.constant 0 : index
4646
%min_elts_q = arith.constant 1 : index

mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,18 +13,17 @@ module {
1313
llvm.func @entry() -> i32 {
1414
%c0 = llvm.mlir.constant(0 : index) : i64
1515

16-
%1 = llvm.mlir.addressof @const16 : !llvm.ptr<array<16 x i32>>
16+
%1 = llvm.mlir.addressof @const16 : !llvm.ptr
1717
%ptr = llvm.getelementptr %1[%c0, %c0]
18-
: (!llvm.ptr<array<16 x i32>>, i64, i64) -> !llvm.ptr<i32>
19-
%ptr2 = llvm.bitcast %ptr : !llvm.ptr<i32> to !llvm.ptr<vector<16xi32>>
18+
: (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<16 x i32>
2019

2120
// operand_attrs of *m operands need to be piped through to LLVM for
2221
// verification to pass.
2322
%v = llvm.inline_asm
2423
asm_dialect = intel
2524
operand_attrs = [{ elementtype = vector<16xi32> }]
26-
"vmovdqu32 $0, $1", "=x,*m" %ptr2
27-
: (!llvm.ptr<vector<16xi32>>) -> vector<16xi32>
25+
"vmovdqu32 $0, $1", "=x,*m" %ptr
26+
: (!llvm.ptr) -> vector<16xi32>
2827

2928
// CHECK: 0
3029
%v0 = vector.extract %v[0]: i32 from vector<16xi32>

mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
module @mymod {
3232
func.func private @printMemrefF32(memref<*xf32>)
3333
memref.global "private" @bufferLhsGlobal : !shmemlhs
34-
llvm.func @printf(!llvm.ptr<i8>, ...) -> i32
34+
llvm.func @printf(!llvm.ptr, ...) -> i32
3535
func.func @main() {
3636
%c8192 = arith.constant 8192 : index
3737
%c-1_i32 = arith.constant -1 : i32

mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ module @mymod {
4141
func.func private @printMemrefF32(memref<*xf32>)
4242
memref.global "private" @bufferLhsGlobal : !shmemlhs
4343
memref.global "private" @bufferRhsGlobal : !shmemrhs
44-
llvm.func @printf(!llvm.ptr<i8>, ...) -> i32
44+
llvm.func @printf(!llvm.ptr, ...) -> i32
4545
func.func @main() {
4646
%c32768 = arith.constant 32768 : index
4747
%c-1_i32 = arith.constant -1 : i32

0 commit comments

Comments
 (0)