-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[OpenMP][MLIR] Use opaque pointers in OpenMP translation tests 1/2 #70057
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-mlir-llvm @llvm/pr-subscribers-flang-openmp Author: Kiran Chandramohan (kiranchandramohan) ChangesFirst half of the tests switching to opaque pointers. Rest of the tests are omptarget-*.mlir and a specific test for typed pointers. Patch is 65.56 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/70057.diff 4 Files Affected:
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm-invalid.mlir b/mlir/test/Target/LLVMIR/openmp-llvm-invalid.mlir
index 994b8c9f52b100b..3c6ca1ef0c6bf5b 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm-invalid.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm-invalid.mlir
@@ -2,8 +2,8 @@
// Checking translation when the update is carried out by using more than one op
// in the region.
-llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr<i32>, %expr: i32) {
- omp.atomic.update %x : !llvm.ptr<i32> {
+llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr, %expr: i32) {
+ omp.atomic.update %x : !llvm.ptr {
^bb0(%xval: i32):
%t1 = llvm.mul %xval, %expr : i32
%t2 = llvm.sdiv %t1, %expr : i32
@@ -17,10 +17,10 @@ llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr<i32>, %expr: i32
// Checking translation when the captured variable is not used in the inner
// update operation
-llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr<i32>, %expr: i32) {
+llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr, %expr: i32) {
// expected-error @+2 {{no atomic update operation with region argument as operand found inside atomic.update region}}
// expected-error @+1 {{LLVM Translation failed for operation: omp.atomic.update}}
- omp.atomic.update %x : !llvm.ptr<i32> {
+ omp.atomic.update %x : !llvm.ptr {
^bb0(%xval: i32):
%newval = llvm.mul %expr, %expr : i32
omp.yield(%newval : i32)
@@ -32,12 +32,12 @@ llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr<i32>, %expr: i32
// Checking translation when the update is carried out by using more than one
// operations in the atomic capture region.
-llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr<i32>, %v: !llvm.ptr<i32>, %expr: i32) {
+llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr, %v: !llvm.ptr, %expr: i32) {
// expected-error @+1 {{LLVM Translation failed for operation: omp.atomic.capture}}
omp.atomic.capture memory_order(seq_cst) {
- omp.atomic.read %v = %x : !llvm.ptr<i32>, i32
+ omp.atomic.read %v = %x : !llvm.ptr, i32
// expected-error @+1 {{no atomic update operation with region argument as operand found inside atomic.update region}}
- omp.atomic.update %x : !llvm.ptr<i32> {
+ omp.atomic.update %x : !llvm.ptr {
^bb0(%xval: i32):
%newval = llvm.mul %expr, %expr : i32
omp.yield(%newval : i32)
@@ -50,10 +50,10 @@ llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr<i32>, %v: !llvm.
// Checking translation when the captured variable is not used in the inner
// update operation
-llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr<i32>, %v: !llvm.ptr<i32>, %expr: i32) {
+llvm.func @omp_atomic_update_multiple_step_update(%x: !llvm.ptr, %v: !llvm.ptr, %expr: i32) {
omp.atomic.capture memory_order(seq_cst) {
- omp.atomic.read %v = %x : !llvm.ptr<i32>, i32
- omp.atomic.update %x : !llvm.ptr<i32> {
+ omp.atomic.read %v = %x : !llvm.ptr, i32
+ omp.atomic.update %x : !llvm.ptr {
^bb0(%xval: i32):
%t1 = llvm.mul %xval, %expr : i32
%t2 = llvm.sdiv %t1, %expr : i32
@@ -72,20 +72,20 @@ llvm.func @omp_threadprivate() {
%2 = llvm.mlir.constant(2 : i32) : i32
%3 = llvm.mlir.constant(3 : i32) : i32
- %4 = llvm.alloca %0 x i32 {in_type = i32, name = "a"} : (i64) -> !llvm.ptr<i32>
+ %4 = llvm.alloca %0 x i32 {in_type = i32, name = "a"} : (i64) -> !llvm.ptr
// expected-error @below {{Addressing symbol not found}}
// expected-error @below {{LLVM Translation failed for operation: omp.threadprivate}}
- %5 = omp.threadprivate %4 : !llvm.ptr<i32> -> !llvm.ptr<i32>
+ %5 = omp.threadprivate %4 : !llvm.ptr -> !llvm.ptr
- llvm.store %1, %5 : !llvm.ptr<i32>
+ llvm.store %1, %5 : i32, !llvm.ptr
omp.parallel {
- %6 = omp.threadprivate %4 : !llvm.ptr<i32> -> !llvm.ptr<i32>
- llvm.store %2, %6 : !llvm.ptr<i32>
+ %6 = omp.threadprivate %4 : !llvm.ptr -> !llvm.ptr
+ llvm.store %2, %6 : i32, !llvm.ptr
omp.terminator
}
- llvm.store %3, %5 : !llvm.ptr<i32>
+ llvm.store %3, %5 : i32, !llvm.ptr
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 2cd561cb021075f..116c05f3747c6ae 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -19,23 +19,23 @@ llvm.func @test_stand_alone_directives() {
}
// CHECK-LABEL: define void @test_flush_construct(ptr %{{[0-9]+}})
-llvm.func @test_flush_construct(%arg0: !llvm.ptr<i32>) {
+llvm.func @test_flush_construct(%arg0: !llvm.ptr) {
// CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
omp.flush
// CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
- omp.flush (%arg0 : !llvm.ptr<i32>)
+ omp.flush (%arg0 : !llvm.ptr)
// CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
- omp.flush (%arg0, %arg0 : !llvm.ptr<i32>, !llvm.ptr<i32>)
+ omp.flush (%arg0, %arg0 : !llvm.ptr, !llvm.ptr)
%0 = llvm.mlir.constant(1 : i64) : i64
// CHECK: alloca {{.*}} align 4
- %1 = llvm.alloca %0 x i32 {in_type = i32, name = "a"} : (i64) -> !llvm.ptr<i32>
+ %1 = llvm.alloca %0 x i32 {in_type = i32, name = "a"} : (i64) -> !llvm.ptr
// CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
omp.flush
// CHECK: load i32, ptr
- %2 = llvm.load %1 : !llvm.ptr<i32>
+ %2 = llvm.load %1 : !llvm.ptr -> i32
// CHECK-NEXT: ret void
llvm.return
@@ -294,7 +294,7 @@ llvm.func @test_omp_master() -> () {
// CHECK: @[[$loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$loc]] {{.*}}
// CHECK-LABEL: @wsloop_simple
-llvm.func @wsloop_simple(%arg0: !llvm.ptr<f32>) {
+llvm.func @wsloop_simple(%arg0: !llvm.ptr) {
%0 = llvm.mlir.constant(42 : index) : i64
%1 = llvm.mlir.constant(10 : index) : i64
%2 = llvm.mlir.constant(1 : index) : i64
@@ -306,8 +306,8 @@ llvm.func @wsloop_simple(%arg0: !llvm.ptr<f32>) {
// CHECK: call i32 @__kmpc_global_thread_num
// CHECK: call void @__kmpc_for_static_init_{{.*}}(ptr @[[$loc_struct]],
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
- %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- llvm.store %3, %4 : !llvm.ptr<f32>
+ %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ llvm.store %3, %4 : f32, !llvm.ptr
omp.yield
// CHECK: call void @__kmpc_for_static_fini(ptr @[[$loc_struct]],
}) {operandSegmentSizes = array<i32: 1, 1, 1, 0, 0, 0, 0>} : (i64, i64, i64) -> ()
@@ -319,7 +319,7 @@ llvm.func @wsloop_simple(%arg0: !llvm.ptr<f32>) {
// -----
// CHECK-LABEL: @wsloop_inclusive_1
-llvm.func @wsloop_inclusive_1(%arg0: !llvm.ptr<f32>) {
+llvm.func @wsloop_inclusive_1(%arg0: !llvm.ptr) {
%0 = llvm.mlir.constant(42 : index) : i64
%1 = llvm.mlir.constant(10 : index) : i64
%2 = llvm.mlir.constant(1 : index) : i64
@@ -327,8 +327,8 @@ llvm.func @wsloop_inclusive_1(%arg0: !llvm.ptr<f32>) {
"omp.wsloop"(%1, %0, %2) ({
^bb0(%arg1: i64):
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
- %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- llvm.store %3, %4 : !llvm.ptr<f32>
+ %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ llvm.store %3, %4 : f32, !llvm.ptr
omp.yield
}) {operandSegmentSizes = array<i32: 1, 1, 1, 0, 0, 0, 0>} : (i64, i64, i64) -> ()
llvm.return
@@ -337,7 +337,7 @@ llvm.func @wsloop_inclusive_1(%arg0: !llvm.ptr<f32>) {
// -----
// CHECK-LABEL: @wsloop_inclusive_2
-llvm.func @wsloop_inclusive_2(%arg0: !llvm.ptr<f32>) {
+llvm.func @wsloop_inclusive_2(%arg0: !llvm.ptr) {
%0 = llvm.mlir.constant(42 : index) : i64
%1 = llvm.mlir.constant(10 : index) : i64
%2 = llvm.mlir.constant(1 : index) : i64
@@ -345,8 +345,8 @@ llvm.func @wsloop_inclusive_2(%arg0: !llvm.ptr<f32>) {
"omp.wsloop"(%1, %0, %2) ({
^bb0(%arg1: i64):
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
- %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- llvm.store %3, %4 : !llvm.ptr<f32>
+ %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ llvm.store %3, %4 : f32, !llvm.ptr
omp.yield
}) {inclusive, operandSegmentSizes = array<i32: 1, 1, 1, 0, 0, 0, 0>} : (i64, i64, i64) -> ()
llvm.return
@@ -443,8 +443,8 @@ llvm.func @body(i32)
llvm.func @test_omp_wsloop_dynamic_chunk_var(%lb : i32, %ub : i32, %step : i32) -> () {
%1 = llvm.mlir.constant(1 : i64) : i64
- %chunk_size_alloca = llvm.alloca %1 x i16 {bindc_name = "chunk_size", in_type = i16, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr<i16>
- %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr<i16>
+ %chunk_size_alloca = llvm.alloca %1 x i16 {bindc_name = "chunk_size", in_type = i16, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr
+ %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr -> i16
omp.wsloop schedule(dynamic = %chunk_size_var : i16)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
// CHECK: %[[CHUNK_SIZE:.*]] = sext i16 %{{.*}} to i32
@@ -464,8 +464,8 @@ llvm.func @body(i32)
llvm.func @test_omp_wsloop_dynamic_chunk_var2(%lb : i32, %ub : i32, %step : i32) -> () {
%1 = llvm.mlir.constant(1 : i64) : i64
- %chunk_size_alloca = llvm.alloca %1 x i64 {bindc_name = "chunk_size", in_type = i64, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr<i64>
- %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr<i64>
+ %chunk_size_alloca = llvm.alloca %1 x i64 {bindc_name = "chunk_size", in_type = i64, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr
+ %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr -> i64
omp.wsloop schedule(dynamic = %chunk_size_var : i64)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
// CHECK: %[[CHUNK_SIZE:.*]] = trunc i64 %{{.*}} to i32
@@ -618,15 +618,15 @@ llvm.func @test_omp_wsloop_guided_simd(%lb : i64, %ub : i64, %step : i64) -> ()
// -----
// CHECK-LABEL: @simdloop_simple
-llvm.func @simdloop_simple(%lb : i64, %ub : i64, %step : i64, %arg0: !llvm.ptr<f32>) {
+llvm.func @simdloop_simple(%lb : i64, %ub : i64, %step : i64, %arg0: !llvm.ptr) {
"omp.simdloop" (%lb, %ub, %step) ({
^bb0(%iv: i64):
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
// The form of the emitted IR is controlled by OpenMPIRBuilder and
// tested there. Just check that the right metadata is added.
// CHECK: llvm.access.group
- %4 = llvm.getelementptr %arg0[%iv] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- llvm.store %3, %4 : !llvm.ptr<f32>
+ %4 = llvm.getelementptr %arg0[%iv] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ llvm.store %3, %4 : f32, !llvm.ptr
omp.yield
}) {operandSegmentSizes = array<i32: 1,1,1,0,0,0>} :
(i64, i64, i64) -> ()
@@ -639,7 +639,7 @@ llvm.func @simdloop_simple(%lb : i64, %ub : i64, %step : i64, %arg0: !llvm.ptr<f
// -----
// CHECK-LABEL: @simdloop_simple_multiple
-llvm.func @simdloop_simple_multiple(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 : i64, %ub2 : i64, %step2 : i64, %arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>) {
+llvm.func @simdloop_simple_multiple(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 : i64, %ub2 : i64, %step2 : i64, %arg0: !llvm.ptr, %arg1: !llvm.ptr) {
omp.simdloop for (%iv1, %iv2) : i64 = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) {
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
// The form of the emitted IR is controlled by OpenMPIRBuilder and
@@ -661,10 +661,10 @@ llvm.func @simdloop_simple_multiple(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 :
// CHECK: br label %[[COLLAPSED_HEADER:.*]]
// CHECK: llvm.access.group
// CHECK-NEXT: llvm.access.group
- %4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- %5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- llvm.store %3, %4 : !llvm.ptr<f32>
- llvm.store %3, %5 : !llvm.ptr<f32>
+ %4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ %5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ llvm.store %3, %4 : f32, !llvm.ptr
+ llvm.store %3, %5 : f32, !llvm.ptr
omp.yield
}
llvm.return
@@ -675,17 +675,17 @@ llvm.func @simdloop_simple_multiple(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 :
// -----
// CHECK-LABEL: @simdloop_simple_multiple_simdlen
-llvm.func @simdloop_simple_multiple_simdlen(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 : i64, %ub2 : i64, %step2 : i64, %arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>) {
+llvm.func @simdloop_simple_multiple_simdlen(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 : i64, %ub2 : i64, %step2 : i64, %arg0: !llvm.ptr, %arg1: !llvm.ptr) {
omp.simdloop simdlen(2) for (%iv1, %iv2) : i64 = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) {
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
// The form of the emitted IR is controlled by OpenMPIRBuilder and
// tested there. Just check that the right metadata is added.
// CHECK: llvm.access.group
// CHECK-NEXT: llvm.access.group
- %4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- %5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- llvm.store %3, %4 : !llvm.ptr<f32>
- llvm.store %3, %5 : !llvm.ptr<f32>
+ %4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ %5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ llvm.store %3, %4 : f32, !llvm.ptr
+ llvm.store %3, %5 : f32, !llvm.ptr
omp.yield
}
llvm.return
@@ -697,13 +697,13 @@ llvm.func @simdloop_simple_multiple_simdlen(%lb1 : i64, %ub1 : i64, %step1 : i64
// -----
// CHECK-LABEL: @simdloop_simple_multiple_safelen
-llvm.func @simdloop_simple_multiple_safelen(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 : i64, %ub2 : i64, %step2 : i64, %arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>) {
+llvm.func @simdloop_simple_multiple_safelen(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 : i64, %ub2 : i64, %step2 : i64, %arg0: !llvm.ptr, %arg1: !llvm.ptr) {
omp.simdloop safelen(2) for (%iv1, %iv2) : i64 = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) {
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
- %4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- %5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- llvm.store %3, %4 : !llvm.ptr<f32>
- llvm.store %3, %5 : !llvm.ptr<f32>
+ %4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ %5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ llvm.store %3, %4 : f32, !llvm.ptr
+ llvm.store %3, %5 : f32, !llvm.ptr
omp.yield
}
llvm.return
@@ -714,13 +714,13 @@ llvm.func @simdloop_simple_multiple_safelen(%lb1 : i64, %ub1 : i64, %step1 : i64
// -----
// CHECK-LABEL: @simdloop_simple_multiple_simdlen_safelen
-llvm.func @simdloop_simple_multiple_simdlen_safelen(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 : i64, %ub2 : i64, %step2 : i64, %arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>) {
+llvm.func @simdloop_simple_multiple_simdlen_safelen(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 : i64, %ub2 : i64, %step2 : i64, %arg0: !llvm.ptr, %arg1: !llvm.ptr) {
omp.simdloop simdlen(1) safelen(2) for (%iv1, %iv2) : i64 = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) {
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
- %4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- %5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
- llvm.store %3, %4 : !llvm.ptr<f32>
- llvm.store %3, %5 : !llvm.ptr<f32>
+ %4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ %5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+ llvm.store %3, %4 : f32, !llvm.ptr
+ llvm.store %3, %5 : f32, !llvm.ptr
omp.yield
}
llvm.return
@@ -731,22 +731,22 @@ llvm.func @simdloop_simple_multiple_simdlen_safelen(%lb1 : i64, %ub1 : i64, %ste
// -----
// CHECK-LABEL: @simdloop_if
-llvm.func @simdloop_if(%arg0: !llvm.ptr<i32> {fir.bindc_name = "n"}, %arg1: !llvm.ptr<i32> {fir.bindc_name = "threshold"}) {
+llvm.func @simdloop_if(%arg0: !llvm.ptr {fir.bindc_name = "n"}, %arg1: !llvm.ptr {fir.bindc_name = "threshold"}) {
%0 = llvm.mlir.constant(1 : i64) : i64
- %1 = llvm.alloca %0 x i32 {adapt.valuebyref, in_type = i32, operandSegmentSizes = array<i32: 0, 0>} : (i64) -> !llvm.ptr<i32>
+ %1 = llvm.alloca %0 x i32 {adapt.valuebyref, in_type = i32, operandSegmentSizes = array<i32: 0, 0>} : (i64) -> !llvm.ptr
%2 = llvm.mlir.constant(1 : i64) : i64
- %3 = llvm.alloca %2 x i32 {bindc_name = "i", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFtest_simdEi"} : (i64) -> !llvm.ptr<i32>
+ %3 = llvm.alloca %2 x i32 {bindc_name = "i", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFtest_simdEi"} : (i64) -> !llvm.ptr
%4 = llvm.mlir.constant(0 : i32) : i32
- %5 = llvm.load %arg0 : !llvm.ptr<i32>
+ %5 = llvm.load %arg0 : !llvm.ptr -> i32
%6 = llvm.mlir.constant(1 : i32) : i32
- %7 = llvm.load %arg0 : !llvm.ptr<i32>
- %8 = llvm.load %arg1 : !llvm.ptr<i32>
+ %7 = llvm.load %arg0 : !llvm.ptr -> i32
+ %8 = llvm.load %arg1 : !llvm.ptr -> i32
%9 = llvm.icmp "sge" %7, %8 : i32
omp.simdloop if(%9) for (%arg2) : i32 = (%4) to (%5) inclusive step (%6) {
// The form of the emitted IR is controlled by OpenMPIRBuilder and
// tested there. Just check that the right metadata is added.
// CHECK: llvm.access.group
- llvm.store %arg2, %1 : !llvm.ptr<i32>
+ llvm.store %arg2, %1 : i32, !llvm.ptr
omp.yield
}
llvm.return
@@ -932,13 +932,13 @@ omp.critical.declare @mutex_speculative_uncontended hint(speculative, uncontende
omp.critical.declare @mutex_speculative_contended hint(speculative, contended) // 10
// CHECK-LABEL: @omp_critical
-llvm.func @omp_critical(%x : !llvm.ptr<i32>, %xval : i32) -> () {
+llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: call void @__kmpc_critical({{.*}}critical_user_.var{{.*}})
// CHECK: br label %omp.critical.region
// CHECK: omp.critical.region
omp.critical {
// CHECK: store
- llvm.store %xval, %x : !llvm.ptr<i32>
+ llvm.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_.var{{.*}})
@@ -948,7 +948,7 @@ llvm.func @omp_critical(%x : !llvm.ptr<i32>, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_none) {
// CHECK: store
- llvm.store %xval, %x : !llvm.ptr<i32>
+ llvm.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_none.var{{.*}})
@@ -958,7 +958,7 @@ llvm.func @omp_critical(%x : !llvm.ptr<i32>, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_uncontended) {
// CHECK: store
- llvm.store %xval, %x : !llvm.ptr<i32>
+ llvm.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_uncontended.var{{.*}})
@@ -968,7 +968,7 @@ llvm.func @omp_critical(%x : !llvm.ptr<i32>, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_contended) {
// CHECK: store
- llvm.store %xval, %x : !llvm.ptr<i32>
+ llvm.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_contended.var{{.*}})
@@ -978,7 +978,7 @@ llvm.func @omp_critical(%x : !llvm.ptr<i32>, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_nonspeculative) {
// CHECK: store
- llvm.store %xval, %x : !llvm.ptr<i32>
+ llvm.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_nonspeculative.var{{.*}})
@@ -988,7 +988,7 @@ llvm.func @omp_critical(%x : !llvm.ptr<i32>, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_nonspeculative_uncontended) {
// CHECK: store
- llvm.store %xval, %x : !llvm.ptr<i32>
+ llvm.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_nonspeculative_unco...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for working on this!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM! Thanks for the cleanup 😄
First half of the tests switching to opaque pointers. Rest of the tests are omptarget-*.mlir and a specific test for typed pointers.
Patch created as requested in #69772