Skip to content

Commit df13acf

Browse files
[CudaSPIRV] Add support for optional spir-v attributes (#116589)
Add support for optional spir-v attributes. Test plan: ninja check-all
1 parent 174899f commit df13acf

File tree

4 files changed

+54
-2
lines changed

4 files changed

+54
-2
lines changed

clang/lib/CodeGen/CodeGenFunction.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -635,7 +635,9 @@ void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
635635

636636
CGM.GenKernelArgMetadata(Fn, FD, this);
637637

638-
if (!getLangOpts().OpenCL)
638+
if (!(getLangOpts().OpenCL ||
639+
(getLangOpts().CUDA &&
640+
getContext().getTargetInfo().getTriple().isSPIRV())))
639641
return;
640642

641643
if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
@@ -1022,6 +1024,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
10221024
}
10231025

10241026
if (FD && (getLangOpts().OpenCL ||
1027+
(getLangOpts().CUDA &&
1028+
getContext().getTargetInfo().getTriple().isSPIRV()) ||
10251029
((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) &&
10261030
getLangOpts().CUDAIsDevice))) {
10271031
// Add metadata for a kernel function.

clang/lib/Sema/SemaDeclAttr.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7368,7 +7368,9 @@ void Sema::ProcessDeclAttributeList(
73687368
// good to have a way to specify "these attributes must appear as a group",
73697369
// for these. Additionally, it would be good to have a way to specify "these
73707370
// attribute must never appear as a group" for attributes like cold and hot.
7371-
if (!D->hasAttr<OpenCLKernelAttr>()) {
7371+
if (!(D->hasAttr<OpenCLKernelAttr>() ||
7372+
(D->hasAttr<CUDAGlobalAttr>() &&
7373+
Context.getTargetInfo().getTriple().isSPIRV()))) {
73727374
// These attributes cannot be applied to a non-kernel function.
73737375
if (const auto *A = D->getAttr<ReqdWorkGroupSizeAttr>()) {
73747376
// FIXME: This emits a different error message than
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
// RUN: %clang_cc1 -fcuda-is-device -triple spirv64 -o - -emit-llvm -x cuda %s | FileCheck %s
2+
// RUN: %clang_cc1 -fcuda-is-device -triple spirv32 -o - -emit-llvm -x cuda %s | FileCheck %s
3+
4+
#define __global__ __attribute__((global))
5+
6+
__attribute__((reqd_work_group_size(128, 1, 1)))
7+
__global__ void reqd_work_group_size_128_1_1() {}
8+
9+
__attribute__((work_group_size_hint(2, 2, 2)))
10+
__global__ void work_group_size_hint_2_2_2() {}
11+
12+
__attribute__((vec_type_hint(int)))
13+
__global__ void vec_type_hint_int() {}
14+
15+
__attribute__((intel_reqd_sub_group_size(64)))
16+
__global__ void intel_reqd_sub_group_size_64() {}
17+
18+
// CHECK: define spir_kernel void @_Z28reqd_work_group_size_128_1_1v() #[[ATTR:[0-9]+]] !reqd_work_group_size ![[WG_SIZE:[0-9]+]]
19+
// CHECK: define spir_kernel void @_Z26work_group_size_hint_2_2_2v() #[[ATTR]] !work_group_size_hint ![[WG_HINT:[0-9]+]]
20+
// CHECK: define spir_kernel void @_Z17vec_type_hint_intv() #[[ATTR]] !vec_type_hint ![[VEC_HINT:[0-9]+]]
21+
// CHECK: define spir_kernel void @_Z28intel_reqd_sub_group_size_64v() #[[ATTR]] !intel_reqd_sub_group_size ![[SUB_GRP:[0-9]+]]
22+
23+
// CHECK: attributes #[[ATTR]] = { {{.*}} }
24+
25+
// CHECK: ![[WG_SIZE]] = !{i32 128, i32 1, i32 1}
26+
// CHECK: ![[WG_HINT]] = !{i32 2, i32 2, i32 2}
27+
// CHECK: ![[VEC_HINT]] = !{i32 undef, i32 1}
28+
// CHECK: ![[SUB_GRP]] = !{i32 64}

clang/test/SemaCUDA/spirv-attrs.cu

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
// expected-no-diagnostics
2+
3+
// RUN: %clang_cc1 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu \
4+
// RUN: -fcuda-is-device -verify -fsyntax-only %s
5+
6+
#include "Inputs/cuda.h"
7+
8+
__attribute__((reqd_work_group_size(128, 1, 1)))
9+
__global__ void reqd_work_group_size_128_1_1() {}
10+
11+
__attribute__((work_group_size_hint(2, 2, 2)))
12+
__global__ void work_group_size_hint_2_2_2() {}
13+
14+
__attribute__((vec_type_hint(int)))
15+
__global__ void vec_type_hint_int() {}
16+
17+
__attribute__((intel_reqd_sub_group_size(64)))
18+
__global__ void intel_reqd_sub_group_size_64() {}

0 commit comments

Comments
 (0)