|
| 1 | +// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s |
| 2 | + |
| 3 | +// Test that the block argument to the initialization region of |
| 4 | +// omp.declare_reduction gets mapped properly when translating to LLVMIR. |
| 5 | + |
| 6 | +module { |
| 7 | + omp.declare_reduction @add_reduction_byref_box_Uxf64 : !llvm.ptr init { |
| 8 | + ^bb0(%arg0: !llvm.ptr): |
| 9 | +// test usage of %arg0: |
| 10 | + %11 = llvm.load %arg0 : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> |
| 11 | + omp.yield(%arg0 : !llvm.ptr) |
| 12 | + } combiner { |
| 13 | + ^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr): |
| 14 | + omp.yield(%arg0 : !llvm.ptr) |
| 15 | + } |
| 16 | + |
| 17 | + llvm.func internal @_QFPreduce(%arg0: !llvm.ptr {fir.bindc_name = "r"}, %arg1: !llvm.ptr {fir.bindc_name = "r2"}) attributes {sym_visibility = "private"} { |
| 18 | + %8 = llvm.mlir.constant(1 : i32) : i32 |
| 19 | + %9 = llvm.mlir.constant(10 : i32) : i32 |
| 20 | + %10 = llvm.mlir.constant(0 : i32) : i32 |
| 21 | + %83 = llvm.mlir.constant(1 : i64) : i64 |
| 22 | + %84 = llvm.alloca %83 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> : (i64) -> !llvm.ptr |
| 23 | + %86 = llvm.mlir.constant(1 : i64) : i64 |
| 24 | + %87 = llvm.alloca %86 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> : (i64) -> !llvm.ptr |
| 25 | +// test multiple reduction variables to ensure they don't intefere with eachother |
| 26 | +// when inlining the reduction init region multiple times |
| 27 | + omp.parallel byref reduction(@add_reduction_byref_box_Uxf64 %84 -> %arg3 : !llvm.ptr, @add_reduction_byref_box_Uxf64 %87 -> %arg4 : !llvm.ptr) { |
| 28 | + omp.terminator |
| 29 | + } |
| 30 | + llvm.return |
| 31 | + } |
| 32 | +} |
| 33 | + |
| 34 | +// CHECK-LABEL: define internal void @_QFPreduce |
| 35 | +// CHECK: %[[VAL_0:.*]] = alloca { ptr, ptr }, align 8 |
| 36 | +// CHECK: %[[VAL_1:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, i64 1, align 8 |
| 37 | +// CHECK: %[[VAL_2:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, i64 1, align 8 |
| 38 | +// CHECK: br label %[[VAL_3:.*]] |
| 39 | +// CHECK: entry: ; preds = %[[VAL_4:.*]] |
| 40 | +// CHECK: %[[VAL_5:.*]] = call i32 @__kmpc_global_thread_num(ptr @1) |
| 41 | +// CHECK: br label %[[VAL_6:.*]] |
| 42 | +// CHECK: omp_parallel: ; preds = %[[VAL_3]] |
| 43 | +// CHECK: %[[VAL_7:.*]] = getelementptr { ptr, ptr }, ptr %[[VAL_0]], i32 0, i32 0 |
| 44 | +// CHECK: store ptr %[[VAL_1]], ptr %[[VAL_7]], align 8 |
| 45 | +// CHECK: %[[VAL_8:.*]] = getelementptr { ptr, ptr }, ptr %[[VAL_0]], i32 0, i32 1 |
| 46 | +// CHECK: store ptr %[[VAL_2]], ptr %[[VAL_8]], align 8 |
| 47 | +// CHECK: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @1, i32 1, ptr @_QFPreduce..omp_par, ptr %[[VAL_0]]) |
| 48 | +// CHECK: br label %[[VAL_9:.*]] |
| 49 | +// CHECK: omp.par.outlined.exit: ; preds = %[[VAL_6]] |
| 50 | +// CHECK: br label %[[VAL_10:.*]] |
| 51 | +// CHECK: omp.par.exit.split: ; preds = %[[VAL_9]] |
| 52 | +// CHECK: ret void |
| 53 | +// CHECK: omp.par.entry: |
| 54 | +// CHECK: %[[VAL_11:.*]] = getelementptr { ptr, ptr }, ptr %[[VAL_12:.*]], i32 0, i32 0 |
| 55 | +// CHECK: %[[VAL_13:.*]] = load ptr, ptr %[[VAL_11]], align 8 |
| 56 | +// CHECK: %[[VAL_14:.*]] = getelementptr { ptr, ptr }, ptr %[[VAL_12]], i32 0, i32 1 |
| 57 | +// CHECK: %[[VAL_15:.*]] = load ptr, ptr %[[VAL_14]], align 8 |
| 58 | +// CHECK: %[[VAL_16:.*]] = alloca i32, align 4 |
| 59 | +// CHECK: %[[VAL_17:.*]] = load i32, ptr %[[VAL_18:.*]], align 4 |
| 60 | +// CHECK: store i32 %[[VAL_17]], ptr %[[VAL_16]], align 4 |
| 61 | +// CHECK: %[[VAL_19:.*]] = load i32, ptr %[[VAL_16]], align 4 |
| 62 | +// CHECK: %[[VAL_20:.*]] = load { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr %[[VAL_13]], align 8 |
| 63 | +// CHECK: %[[VAL_21:.*]] = alloca ptr, align 8 |
| 64 | +// CHECK: store ptr %[[VAL_13]], ptr %[[VAL_21]], align 8 |
| 65 | +// CHECK: %[[VAL_22:.*]] = load { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, ptr %[[VAL_15]], align 8 |
| 66 | +// CHECK: %[[VAL_23:.*]] = alloca ptr, align 8 |
| 67 | +// CHECK: store ptr %[[VAL_15]], ptr %[[VAL_23]], align 8 |
| 68 | +// CHECK: %[[VAL_24:.*]] = alloca [2 x ptr], align 8 |
| 69 | +// CHECK: br label %[[VAL_25:.*]] |
| 70 | +// CHECK: omp.par.region: ; preds = %[[VAL_26:.*]] |
| 71 | +// CHECK: br label %[[VAL_27:.*]] |
| 72 | +// CHECK: omp.par.region1: ; preds = %[[VAL_25]] |
| 73 | +// CHECK: br label %[[VAL_28:.*]] |
| 74 | +// CHECK: omp.region.cont: ; preds = %[[VAL_27]] |
| 75 | +// CHECK: %[[VAL_29:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_24]], i64 0, i64 0 |
| 76 | +// CHECK: store ptr %[[VAL_21]], ptr %[[VAL_29]], align 8 |
| 77 | +// CHECK: %[[VAL_30:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_24]], i64 0, i64 1 |
| 78 | +// CHECK: store ptr %[[VAL_23]], ptr %[[VAL_30]], align 8 |
| 79 | +// CHECK: %[[VAL_31:.*]] = call i32 @__kmpc_global_thread_num(ptr @1) |
| 80 | +// CHECK: %[[VAL_32:.*]] = call i32 @__kmpc_reduce(ptr @1, i32 %[[VAL_31]], i32 2, i64 16, ptr %[[VAL_24]], ptr @.omp.reduction.func, ptr @.gomp_critical_user_.reduction.var) |
| 81 | +// CHECK: switch i32 %[[VAL_32]], label %[[VAL_33:.*]] [ |
| 82 | +// CHECK: i32 1, label %[[VAL_34:.*]] |
| 83 | +// CHECK: i32 2, label %[[VAL_35:.*]] |
| 84 | +// CHECK: ] |
| 85 | +// CHECK: reduce.switch.atomic: ; preds = %[[VAL_28]] |
| 86 | +// CHECK: unreachable |
| 87 | +// CHECK: reduce.switch.nonatomic: ; preds = %[[VAL_28]] |
| 88 | +// CHECK: %[[VAL_36:.*]] = load ptr, ptr %[[VAL_21]], align 8 |
| 89 | +// CHECK: %[[VAL_37:.*]] = load ptr, ptr %[[VAL_23]], align 8 |
| 90 | +// CHECK: call void @__kmpc_end_reduce(ptr @1, i32 %[[VAL_31]], ptr @.gomp_critical_user_.reduction.var) |
| 91 | +// CHECK: br label %[[VAL_33]] |
| 92 | +// CHECK: reduce.finalize: ; preds = %[[VAL_34]], %[[VAL_28]] |
| 93 | +// CHECK: br label %[[VAL_38:.*]] |
| 94 | +// CHECK: omp.par.pre_finalize: ; preds = %[[VAL_33]] |
| 95 | +// CHECK: br label %[[VAL_39:.*]] |
| 96 | +// CHECK: omp.par.outlined.exit.exitStub: ; preds = %[[VAL_38]] |
| 97 | +// CHECK: ret void |
| 98 | +// CHECK: %[[VAL_40:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_41:.*]], i64 0, i64 0 |
| 99 | +// CHECK: %[[VAL_42:.*]] = load ptr, ptr %[[VAL_40]], align 8 |
| 100 | +// CHECK: %[[VAL_43:.*]] = load ptr, ptr %[[VAL_42]], align 8 |
| 101 | +// CHECK: %[[VAL_44:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_45:.*]], i64 0, i64 0 |
| 102 | +// CHECK: %[[VAL_46:.*]] = load ptr, ptr %[[VAL_44]], align 8 |
| 103 | +// CHECK: %[[VAL_47:.*]] = load ptr, ptr %[[VAL_46]], align 8 |
| 104 | +// CHECK: %[[VAL_48:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_41]], i64 0, i64 1 |
| 105 | +// CHECK: %[[VAL_49:.*]] = load ptr, ptr %[[VAL_48]], align 8 |
| 106 | +// CHECK: %[[VAL_50:.*]] = load ptr, ptr %[[VAL_49]], align 8 |
| 107 | +// CHECK: %[[VAL_51:.*]] = getelementptr inbounds [2 x ptr], ptr %[[VAL_45]], i64 0, i64 1 |
| 108 | +// CHECK: %[[VAL_52:.*]] = load ptr, ptr %[[VAL_51]], align 8 |
| 109 | +// CHECK: %[[VAL_53:.*]] = load ptr, ptr %[[VAL_52]], align 8 |
| 110 | +// CHECK: ret void |
| 111 | + |
0 commit comments