1
- // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
1
+ // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
2
2
// RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \
3
3
// RUN: -o - | FileCheck %s
4
4
5
- // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
5
+ // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
6
6
// RUN: -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \
7
7
// RUN: -o - | FileCheck %s
8
8
9
9
#include " Inputs/cuda.h"
10
10
11
11
// CHECK-LABEL: @_Z16use_dispatch_ptrPi(
12
12
// CHECK: %[[PTR:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
13
- // CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i8 *
13
+ // CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i32 *
14
14
__global__ void use_dispatch_ptr (int * out) {
15
15
const int * dispatch_ptr = (const int *)__builtin_amdgcn_dispatch_ptr ();
16
16
*out = *dispatch_ptr;
@@ -24,6 +24,39 @@ void test_ds_fmax(float src) {
24
24
volatile float x = __builtin_amdgcn_ds_fmaxf (&shared, src, 0 , 0 , false );
25
25
}
26
26
27
+ // CHECK-LABEL: @_Z12test_ds_faddf(
28
+ // CHECK: call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
29
+ __global__ void test_ds_fadd (float src) {
30
+ __shared__ float shared;
31
+ volatile float x = __builtin_amdgcn_ds_faddf (&shared, src, 0 , 0 , false );
32
+ }
33
+
34
+ // CHECK-LABEL: @_Z12test_ds_fminfPf(float %src, float addrspace(1)* %shared.coerce
35
+ // CHECK: %shared = alloca float*, align 8, addrspace(5)
36
+ // CHECK: %shared.ascast = addrspacecast float* addrspace(5)* %shared to float**
37
+ // CHECK: %shared.addr = alloca float*, align 8, addrspace(5)
38
+ // CHECK: %shared.addr.ascast = addrspacecast float* addrspace(5)* %shared.addr to float**
39
+ // CHECK: %[[S0:.*]] = addrspacecast float addrspace(1)* %shared.coerce to float*
40
+ // CHECK: store float* %[[S0]], float** %shared.ascast, align 8
41
+ // CHECK: %shared1 = load float*, float** %shared.ascast, align 8
42
+ // CHECK: store float* %shared1, float** %shared.addr.ascast, align 8
43
+ // CHECK: %[[S1:.*]] = load float*, float** %shared.addr.ascast, align 8
44
+ // CHECK: %[[S2:.*]] = addrspacecast float* %[[S1]] to float addrspace(3)*
45
+ // CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[S2]]
46
+ __global__ void test_ds_fmin (float src, float *shared) {
47
+ volatile float x = __builtin_amdgcn_ds_fminf (shared, src, 0 , 0 , false );
48
+ }
49
+
50
+ // CHECK: @_Z33test_ret_builtin_nondef_addrspace
51
+ // CHECK: %[[X:.*]] = alloca i8*, align 8, addrspace(5)
52
+ // CHECK: %[[XC:.*]] = addrspacecast i8* addrspace(5)* %[[X]] to i8**
53
+ // CHECK: %[[Y:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
54
+ // CHECK: %[[YASCAST:.*]] = addrspacecast i8 addrspace(4)* %[[Y]] to i8*
55
+ // CHECK: store i8* %[[YASCAST]], i8** %[[XC]], align 8
56
+ __device__ void test_ret_builtin_nondef_addrspace () {
57
+ void *x = __builtin_amdgcn_dispatch_ptr ();
58
+ }
59
+
27
60
// CHECK-LABEL: @_Z6endpgmv(
28
61
// CHECK: call void @llvm.amdgcn.endpgm()
29
62
__global__ void endpgm () {
@@ -33,12 +66,12 @@ __global__ void endpgm() {
33
66
// Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion.
34
67
35
68
// CHECK-LABEL: @_Z14test_uicmp_i64
36
- // CHECK: store i64* %out , i64** %out.addr.ascast
69
+ // CHECK: store i64* %out1 , i64** %out.addr.ascast
37
70
// CHECK-NEXT: store i64 %a, i64* %a.addr.ascast
38
71
// CHECK-NEXT: store i64 %b, i64* %b.addr.ascast
39
72
// CHECK-NEXT: %[[V0:.*]] = load i64, i64* %a.addr.ascast
40
73
// CHECK-NEXT: %[[V1:.*]] = load i64, i64* %b.addr.ascast
41
- // CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %0 , i64 %1 , i32 35)
74
+ // CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %[[V0]] , i64 %[[V1]] , i32 35)
42
75
// CHECK-NEXT: %[[V3:.*]] = load i64*, i64** %out.addr.ascast
43
76
// CHECK-NEXT: store i64 %[[V2]], i64* %[[V3]]
44
77
// CHECK-NEXT: ret void
@@ -58,3 +91,28 @@ __global__ void test_s_memtime(unsigned long long* out)
58
91
{
59
92
*out = __builtin_amdgcn_s_memtime ();
60
93
}
94
+
95
+ // Check a generic pointer can be passed as a shared pointer and a generic pointer.
96
+ __device__ void func (float *x);
97
+
98
+ // CHECK: @_Z17test_ds_fmin_funcfPf
99
+ // CHECK: %[[SHARED:.*]] = alloca float*, align 8, addrspace(5)
100
+ // CHECK: %[[SHARED_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED]] to float**
101
+ // CHECK: %[[SRC_ADDR:.*]] = alloca float, align 4, addrspace(5)
102
+ // CHECK: %[[SRC_ADDR_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[SRC_ADDR]] to float*
103
+ // CHECK: %[[SHARED_ADDR:.*]] = alloca float*, align 8, addrspace(5)
104
+ // CHECK: %[[SHARED_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED_ADDR]] to float**
105
+ // CHECK: %[[X:.*]] = alloca float, align 4, addrspace(5)
106
+ // CHECK: %[[X_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[X]] to float*
107
+ // CHECK: %[[SHARED1:.*]] = load float*, float** %[[SHARED_ASCAST]], align 8
108
+ // CHECK: store float %src, float* %[[SRC_ADDR_ASCAST]], align 4
109
+ // CHECK: store float* %[[SHARED1]], float** %[[SHARED_ADDR_ASCAST]], align 8
110
+ // CHECK: %[[ARG0_PTR:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
111
+ // CHECK: %[[ARG0:.*]] = addrspacecast float* %[[ARG0_PTR]] to float addrspace(3)*
112
+ // CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[ARG0]]
113
+ // CHECK: %[[ARG0:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
114
+ // CHECK: call void @_Z4funcPf(float* %[[ARG0]]) #8
115
+ __global__ void test_ds_fmin_func (float src, float *__restrict shared) {
116
+ volatile float x = __builtin_amdgcn_ds_fminf (shared, src, 0 , 0 , false );
117
+ func (shared);
118
+ }
0 commit comments