@@ -39,7 +39,7 @@ int test_amdgcn_target_tid_threads_simd() {
39
39
// CHECK-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr
40
40
// CHECK-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
41
41
// CHECK-NEXT: store ptr [[ARR]], ptr [[ARR_ADDR_ASCAST]], align 8
42
- // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR_ASCAST]], align 8, !nonnull [[META9:![0-9]+]], !dereferenceable [[META10:![0-9]+]], ! align [[META11 :![0-9]+]]
42
+ // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR_ASCAST]], align 8, !nonnull [[META9:![0-9]+]], !align [[META10 :![0-9]+]]
43
43
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z30test_amdgcn_target_tid_threadsv_l14_kernel_environment to ptr), ptr [[DYN_PTR]])
44
44
// CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
45
45
// CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
@@ -60,7 +60,7 @@ int test_amdgcn_target_tid_threads_simd() {
60
60
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I_ASCAST]], align 4
61
61
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP4]], 1
62
62
// CHECK-NEXT: store i32 [[INC]], ptr [[I_ASCAST]], align 4
63
- // CHECK-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP12 :![0-9]+]]
63
+ // CHECK-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP11 :![0-9]+]]
64
64
// CHECK: worker.exit:
65
65
// CHECK-NEXT: ret void
66
66
// CHECK: for.end:
@@ -83,34 +83,34 @@ int test_amdgcn_target_tid_threads_simd() {
83
83
// CHECK-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr
84
84
// CHECK-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
85
85
// CHECK-NEXT: store ptr [[ARR]], ptr [[ARR_ADDR_ASCAST]], align 8
86
- // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR_ASCAST]], align 8, !nonnull [[META9]], !dereferenceable [[META10]], ! align [[META11 ]]
86
+ // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR_ASCAST]], align 8, !nonnull [[META9]], !align [[META10 ]]
87
87
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z35test_amdgcn_target_tid_threads_simdv_l23_kernel_environment to ptr), ptr [[DYN_PTR]])
88
88
// CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
89
89
// CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
90
90
// CHECK: user_code.entry:
91
91
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IV_ASCAST]], align 4
92
92
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
93
93
// CHECK: omp.inner.for.cond:
94
- // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4, !llvm.access.group [[ACC_GRP14 :![0-9]+]]
94
+ // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4, !llvm.access.group [[ACC_GRP13 :![0-9]+]]
95
95
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], 1000
96
96
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
97
97
// CHECK: omp.inner.for.body:
98
- // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4, !llvm.access.group [[ACC_GRP14 ]]
98
+ // CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4, !llvm.access.group [[ACC_GRP13 ]]
99
99
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
100
100
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
101
- // CHECK-NEXT: store i32 [[ADD]], ptr [[I_ASCAST]], align 4, !llvm.access.group [[ACC_GRP14 ]]
102
- // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I_ASCAST]], align 4, !llvm.access.group [[ACC_GRP14 ]]
101
+ // CHECK-NEXT: store i32 [[ADD]], ptr [[I_ASCAST]], align 4, !llvm.access.group [[ACC_GRP13 ]]
102
+ // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I_ASCAST]], align 4, !llvm.access.group [[ACC_GRP13 ]]
103
103
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64
104
104
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
105
- // CHECK-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP14 ]]
105
+ // CHECK-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13 ]]
106
106
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
107
107
// CHECK: omp.body.continue:
108
108
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
109
109
// CHECK: omp.inner.for.inc:
110
- // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4, !llvm.access.group [[ACC_GRP14 ]]
110
+ // CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4, !llvm.access.group [[ACC_GRP13 ]]
111
111
// CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1
112
- // CHECK-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV_ASCAST]], align 4, !llvm.access.group [[ACC_GRP14 ]]
113
- // CHECK-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15 :![0-9]+]]
112
+ // CHECK-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV_ASCAST]], align 4, !llvm.access.group [[ACC_GRP13 ]]
113
+ // CHECK-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14 :![0-9]+]]
114
114
// CHECK: worker.exit:
115
115
// CHECK-NEXT: ret void
116
116
// CHECK: omp.inner.for.end:
0 commit comments