Skip to content

Commit a350089

Browse files
committed
[HIP] Allow target addr space in target builtins
This patch allows target specific addr space in target builtins for HIP. It inserts implicit addr space cast for non-generic pointer to generic pointer in general, and inserts implicit addr space cast for generic to non-generic for target builtin arguments only. It is NFC for non-HIP languages. Differential Revision: https://reviews.llvm.org/D102405
1 parent 2708cca commit a350089

File tree

4 files changed

+126
-7
lines changed

4 files changed

+126
-7
lines changed

clang/include/clang/AST/Type.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -495,7 +495,12 @@ class Qualifiers {
495495
(A == LangAS::Default &&
496496
(B == LangAS::sycl_private || B == LangAS::sycl_local ||
497497
B == LangAS::sycl_global || B == LangAS::sycl_global_device ||
498-
B == LangAS::sycl_global_host));
498+
B == LangAS::sycl_global_host)) ||
499+
// In HIP device compilation, any cuda address space is allowed
500+
// to implicitly cast into the default address space.
501+
(A == LangAS::Default &&
502+
(B == LangAS::cuda_constant || B == LangAS::cuda_device ||
503+
B == LangAS::cuda_shared));
499504
}
500505

501506
/// Returns true if the address space in these qualifiers is equal to or

clang/lib/Basic/Targets/AMDGPU.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -352,7 +352,16 @@ class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
352352
}
353353

354354
LangAS getCUDABuiltinAddressSpace(unsigned AS) const override {
355-
return LangAS::Default;
355+
switch (AS) {
356+
case 1:
357+
return LangAS::cuda_device;
358+
case 3:
359+
return LangAS::cuda_shared;
360+
case 4:
361+
return LangAS::cuda_constant;
362+
default:
363+
return getLangASFromTargetAS(AS);
364+
}
356365
}
357366

358367
llvm::Optional<LangAS> getConstantAddressSpace() const override {

clang/lib/Sema/SemaExpr.cpp

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6572,6 +6572,53 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
65726572
return ExprError();
65736573

65746574
checkDirectCallValidity(*this, Fn, FD, ArgExprs);
6575+
6576+
// If this expression is a call to a builtin function in HIP device
6577+
// compilation, allow a pointer-type argument to default address space to be
6578+
// passed as a pointer-type parameter to a non-default address space.
6579+
// If Arg is declared in the default address space and Param is declared
6580+
// in a non-default address space, perform an implicit address space cast to
6581+
// the parameter type.
6582+
if (getLangOpts().HIP && getLangOpts().CUDAIsDevice && FD &&
6583+
FD->getBuiltinID()) {
6584+
for (unsigned Idx = 0; Idx < FD->param_size(); ++Idx) {
6585+
ParmVarDecl *Param = FD->getParamDecl(Idx);
6586+
if (!ArgExprs[Idx] || !Param || !Param->getType()->isPointerType() ||
6587+
!ArgExprs[Idx]->getType()->isPointerType())
6588+
continue;
6589+
6590+
auto ParamAS = Param->getType()->getPointeeType().getAddressSpace();
6591+
auto ArgTy = ArgExprs[Idx]->getType();
6592+
auto ArgPtTy = ArgTy->getPointeeType();
6593+
auto ArgAS = ArgPtTy.getAddressSpace();
6594+
6595+
// Only allow implicit casting from a non-default address space pointee
6596+
// type to a default address space pointee type
6597+
if (ArgAS != LangAS::Default || ParamAS == LangAS::Default)
6598+
continue;
6599+
6600+
// First, ensure that the Arg is an RValue.
6601+
if (ArgExprs[Idx]->isGLValue()) {
6602+
ArgExprs[Idx] = ImplicitCastExpr::Create(
6603+
Context, ArgExprs[Idx]->getType(), CK_NoOp, ArgExprs[Idx],
6604+
nullptr, VK_PRValue, FPOptionsOverride());
6605+
}
6606+
6607+
// Construct a new arg type with address space of Param
6608+
Qualifiers ArgPtQuals = ArgPtTy.getQualifiers();
6609+
ArgPtQuals.setAddressSpace(ParamAS);
6610+
auto NewArgPtTy =
6611+
Context.getQualifiedType(ArgPtTy.getUnqualifiedType(), ArgPtQuals);
6612+
auto NewArgTy =
6613+
Context.getQualifiedType(Context.getPointerType(NewArgPtTy),
6614+
ArgTy.getQualifiers());
6615+
6616+
// Finally perform an implicit address space cast
6617+
ArgExprs[Idx] = ImpCastExprToType(ArgExprs[Idx], NewArgTy,
6618+
CK_AddressSpaceConversion)
6619+
.get();
6620+
}
6621+
}
65756622
}
65766623

65776624
if (Context.isDependenceAllowed() &&

clang/test/CodeGenCUDA/builtins-amdgcn.cu

Lines changed: 63 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
1-
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
1+
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
22
// RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \
33
// RUN: -o - | FileCheck %s
44

5-
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
5+
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
66
// RUN: -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \
77
// RUN: -o - | FileCheck %s
88

99
#include "Inputs/cuda.h"
1010

1111
// CHECK-LABEL: @_Z16use_dispatch_ptrPi(
1212
// CHECK: %[[PTR:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
13-
// CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i8*
13+
// CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i32*
1414
__global__ void use_dispatch_ptr(int* out) {
1515
const int* dispatch_ptr = (const int*)__builtin_amdgcn_dispatch_ptr();
1616
*out = *dispatch_ptr;
@@ -24,6 +24,39 @@ void test_ds_fmax(float src) {
2424
volatile float x = __builtin_amdgcn_ds_fmaxf(&shared, src, 0, 0, false);
2525
}
2626

27+
// CHECK-LABEL: @_Z12test_ds_faddf(
28+
// CHECK: call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
29+
__global__ void test_ds_fadd(float src) {
30+
__shared__ float shared;
31+
volatile float x = __builtin_amdgcn_ds_faddf(&shared, src, 0, 0, false);
32+
}
33+
34+
// CHECK-LABEL: @_Z12test_ds_fminfPf(float %src, float addrspace(1)* %shared.coerce
35+
// CHECK: %shared = alloca float*, align 8, addrspace(5)
36+
// CHECK: %shared.ascast = addrspacecast float* addrspace(5)* %shared to float**
37+
// CHECK: %shared.addr = alloca float*, align 8, addrspace(5)
38+
// CHECK: %shared.addr.ascast = addrspacecast float* addrspace(5)* %shared.addr to float**
39+
// CHECK: %[[S0:.*]] = addrspacecast float addrspace(1)* %shared.coerce to float*
40+
// CHECK: store float* %[[S0]], float** %shared.ascast, align 8
41+
// CHECK: %shared1 = load float*, float** %shared.ascast, align 8
42+
// CHECK: store float* %shared1, float** %shared.addr.ascast, align 8
43+
// CHECK: %[[S1:.*]] = load float*, float** %shared.addr.ascast, align 8
44+
// CHECK: %[[S2:.*]] = addrspacecast float* %[[S1]] to float addrspace(3)*
45+
// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[S2]]
46+
__global__ void test_ds_fmin(float src, float *shared) {
47+
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
48+
}
49+
50+
// CHECK: @_Z33test_ret_builtin_nondef_addrspace
51+
// CHECK: %[[X:.*]] = alloca i8*, align 8, addrspace(5)
52+
// CHECK: %[[XC:.*]] = addrspacecast i8* addrspace(5)* %[[X]] to i8**
53+
// CHECK: %[[Y:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
54+
// CHECK: %[[YASCAST:.*]] = addrspacecast i8 addrspace(4)* %[[Y]] to i8*
55+
// CHECK: store i8* %[[YASCAST]], i8** %[[XC]], align 8
56+
__device__ void test_ret_builtin_nondef_addrspace() {
57+
void *x = __builtin_amdgcn_dispatch_ptr();
58+
}
59+
2760
// CHECK-LABEL: @_Z6endpgmv(
2861
// CHECK: call void @llvm.amdgcn.endpgm()
2962
__global__ void endpgm() {
@@ -33,12 +66,12 @@ __global__ void endpgm() {
3366
// Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion.
3467

3568
// CHECK-LABEL: @_Z14test_uicmp_i64
36-
// CHECK: store i64* %out, i64** %out.addr.ascast
69+
// CHECK: store i64* %out1, i64** %out.addr.ascast
3770
// CHECK-NEXT: store i64 %a, i64* %a.addr.ascast
3871
// CHECK-NEXT: store i64 %b, i64* %b.addr.ascast
3972
// CHECK-NEXT: %[[V0:.*]] = load i64, i64* %a.addr.ascast
4073
// CHECK-NEXT: %[[V1:.*]] = load i64, i64* %b.addr.ascast
41-
// CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %0, i64 %1, i32 35)
74+
// CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %[[V0]], i64 %[[V1]], i32 35)
4275
// CHECK-NEXT: %[[V3:.*]] = load i64*, i64** %out.addr.ascast
4376
// CHECK-NEXT: store i64 %[[V2]], i64* %[[V3]]
4477
// CHECK-NEXT: ret void
@@ -58,3 +91,28 @@ __global__ void test_s_memtime(unsigned long long* out)
5891
{
5992
*out = __builtin_amdgcn_s_memtime();
6093
}
94+
95+
// Check a generic pointer can be passed as a shared pointer and a generic pointer.
96+
__device__ void func(float *x);
97+
98+
// CHECK: @_Z17test_ds_fmin_funcfPf
99+
// CHECK: %[[SHARED:.*]] = alloca float*, align 8, addrspace(5)
100+
// CHECK: %[[SHARED_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED]] to float**
101+
// CHECK: %[[SRC_ADDR:.*]] = alloca float, align 4, addrspace(5)
102+
// CHECK: %[[SRC_ADDR_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[SRC_ADDR]] to float*
103+
// CHECK: %[[SHARED_ADDR:.*]] = alloca float*, align 8, addrspace(5)
104+
// CHECK: %[[SHARED_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED_ADDR]] to float**
105+
// CHECK: %[[X:.*]] = alloca float, align 4, addrspace(5)
106+
// CHECK: %[[X_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[X]] to float*
107+
// CHECK: %[[SHARED1:.*]] = load float*, float** %[[SHARED_ASCAST]], align 8
108+
// CHECK: store float %src, float* %[[SRC_ADDR_ASCAST]], align 4
109+
// CHECK: store float* %[[SHARED1]], float** %[[SHARED_ADDR_ASCAST]], align 8
110+
// CHECK: %[[ARG0_PTR:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
111+
// CHECK: %[[ARG0:.*]] = addrspacecast float* %[[ARG0_PTR]] to float addrspace(3)*
112+
// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[ARG0]]
113+
// CHECK: %[[ARG0:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
114+
// CHECK: call void @_Z4funcPf(float* %[[ARG0]]) #8
115+
__global__ void test_ds_fmin_func(float src, float *__restrict shared) {
116+
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
117+
func(shared);
118+
}

0 commit comments

Comments
 (0)