Skip to content

[Clang] Implement resource directory headers for common GPU intrinsics #110179

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 20 commits into from
Nov 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions clang/lib/Headers/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,12 @@ set(x86_files
cpuid.h
)

set(gpu_files
gpuintrin.h
nvptxintrin.h
amdgpuintrin.h
)

set(windows_only_files
intrin0.h
intrin.h
Expand Down Expand Up @@ -301,6 +307,7 @@ set(files
${systemz_files}
${ve_files}
${x86_files}
${gpu_files}
${webassembly_files}
${windows_only_files}
${utility_files}
Expand Down Expand Up @@ -523,6 +530,7 @@ add_header_target("systemz-resource-headers" "${systemz_files};${zos_wrapper_fil
add_header_target("ve-resource-headers" "${ve_files}")
add_header_target("webassembly-resource-headers" "${webassembly_files}")
add_header_target("x86-resource-headers" "${x86_files}")
add_header_target("gpu-resource-headers" "${gpu_files}")

# Other header groupings
add_header_target("hlsl-resource-headers" ${hlsl_files})
Expand Down Expand Up @@ -709,6 +717,12 @@ install(
EXCLUDE_FROM_ALL
COMPONENT x86-resource-headers)

install(
FILES ${gpu_files}
DESTINATION ${header_install_dir}
EXCLUDE_FROM_ALL
COMPONENT gpu-resource-headers)

if(NOT CLANG_ENABLE_HLSL)
set(EXCLUDE_HLSL EXCLUDE_FROM_ALL)
endif()
Expand Down
190 changes: 190 additions & 0 deletions clang/lib/Headers/amdgpuintrin.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
//===-- amdgpuintrin.h - AMDPGU intrinsic functions -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef __AMDGPUINTRIN_H
#define __AMDGPUINTRIN_H

#ifndef __AMDGPU__
#error "This file is intended for AMDGPU targets or offloading to AMDGPU"
#endif

#include <stdint.h>

#if !defined(__cplusplus)
_Pragma("push_macro(\"bool\")");
#define bool _Bool
#endif

_Pragma("omp begin declare target device_type(nohost)");
_Pragma("omp begin declare variant match(device = {arch(amdgcn)})");

// Type aliases to the address spaces used by the AMDGPU backend.
#define __gpu_private __attribute__((opencl_private))
#define __gpu_constant __attribute__((opencl_constant))
#define __gpu_local __attribute__((opencl_local))
#define __gpu_global __attribute__((opencl_global))
#define __gpu_generic __attribute__((opencl_generic))

// Attribute to declare a function as a kernel.
#define __gpu_kernel __attribute__((amdgpu_kernel, visibility("protected")))

// Returns the number of workgroups in the 'x' dimension of the grid.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_x(void) {
return __builtin_amdgcn_grid_size_x() / __builtin_amdgcn_workgroup_size_x();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe the new COV4/5 have this pre-computed and we should likely use it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I remember talking about this w/ @arsenm and he said that this should just be caught by AMDGPUInstCombine or something. Maybe I should do that at some point.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, we can and should optimize this

}

// Returns the number of workgroups in the 'y' dimension of the grid.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_y(void) {
return __builtin_amdgcn_grid_size_y() / __builtin_amdgcn_workgroup_size_y();
}

// Returns the number of workgroups in the 'z' dimension of the grid.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_z(void) {
return __builtin_amdgcn_grid_size_z() / __builtin_amdgcn_workgroup_size_z();
}

// Returns the 'x' dimension of the current AMD workgroup's id.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't these have three "///"?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems to vary between headers, I'll defer to someone like @AaronBallman.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We're consistently inconsistent, so it doesn't much matter. (Personally, I think all of our compiler-provided system headers should be using /* */ comments because they're available in C language modes and it seems silly to me that we'd want the overhead of issuing -Wcomment diagnostics that get suppressed by the diagnostics engine because they're in a system header. But that's just me being pedantic. Well, and the compiler being pedantic too, I suppose.)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the input, is there anything else about this approach that you have concerns about? I remember in my original RFC someone suggested waiting for you to chime in.

_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_x(void) {
return __builtin_amdgcn_workgroup_id_x();
}

// Returns the 'y' dimension of the current AMD workgroup's id.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_y(void) {
return __builtin_amdgcn_workgroup_id_y();
}

// Returns the 'z' dimension of the current AMD workgroup's id.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_z(void) {
return __builtin_amdgcn_workgroup_id_z();
}

// Returns the number of workitems in the 'x' dimension.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_x(void) {
return __builtin_amdgcn_workgroup_size_x();
}

// Returns the number of workitems in the 'y' dimension.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_y(void) {
return __builtin_amdgcn_workgroup_size_y();
}

// Returns the number of workitems in the 'z' dimension.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_z(void) {
return __builtin_amdgcn_workgroup_size_z();
}

// Returns the 'x' dimension id of the workitem in the current AMD workgroup.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_x(void) {
return __builtin_amdgcn_workitem_id_x();
}

// Returns the 'y' dimension id of the workitem in the current AMD workgroup.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_y(void) {
return __builtin_amdgcn_workitem_id_y();
}

// Returns the 'z' dimension id of the workitem in the current AMD workgroup.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_z(void) {
return __builtin_amdgcn_workitem_id_z();
}

// Returns the size of an AMD wavefront, either 32 or 64 depending on hardware
// and compilation options.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_lanes(void) {
return __builtin_amdgcn_wavefrontsize();
}

// Returns the id of the thread inside of an AMD wavefront executing together.
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_lane_id(void) {
return __builtin_amdgcn_mbcnt_hi(~0u, __builtin_amdgcn_mbcnt_lo(~0u, 0u));
}

// Returns the bit-mask of active threads in the current wavefront.
_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_lane_mask(void) {
return __builtin_amdgcn_read_exec();
}

// Copies the value from the first active thread in the wavefront to the rest.
_DEFAULT_FN_ATTRS static __inline__ uint32_t
__gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x) {
return __builtin_amdgcn_readfirstlane(__x);
}

// Copies the value from the first active thread in the wavefront to the rest.
_DEFAULT_FN_ATTRS __inline__ uint64_t
__gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x) {
uint32_t __hi = (uint32_t)(__x >> 32ull);
uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
return ((uint64_t)__builtin_amdgcn_readfirstlane(__hi) << 32ull) |
((uint64_t)__builtin_amdgcn_readfirstlane(__lo));
}

// Returns a bitmask of threads in the current lane for which \p x is true.
_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask,
bool __x) {
// The lane_mask & gives the nvptx semantics when lane_mask is a subset of
// the active threads
return __lane_mask & __builtin_amdgcn_ballot_w64(__x);
}

// Waits for all the threads in the block to converge and issues a fence.
_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_threads(void) {
__builtin_amdgcn_s_barrier();
__builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup");
}

// Wait for all threads in the wavefront to converge, this is a noop on AMDGPU.
_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_lane(uint64_t __lane_mask) {
__builtin_amdgcn_wave_barrier();
}

// Shuffles the the lanes inside the wavefront according to the given index.
_DEFAULT_FN_ATTRS static __inline__ uint32_t
__gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x) {
return __builtin_amdgcn_ds_bpermute(__idx << 2, __x);
}

// Shuffles the the lanes inside the wavefront according to the given index.
_DEFAULT_FN_ATTRS static __inline__ uint64_t
__gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x) {
uint32_t __hi = (uint32_t)(__x >> 32ull);
uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
return ((uint64_t)__builtin_amdgcn_ds_bpermute(__idx << 2, __hi) << 32ull) |
((uint64_t)__builtin_amdgcn_ds_bpermute(__idx << 2, __lo));
}

// Returns true if the flat pointer points to CUDA 'shared' memory.
_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_local(void *ptr) {
return __builtin_amdgcn_is_shared(
(void __attribute__((address_space(0))) *)((void __gpu_generic *)ptr));
}

// Returns true if the flat pointer points to CUDA 'local' memory.
_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_private(void *ptr) {
return __builtin_amdgcn_is_private(
(void __attribute__((address_space(0))) *)((void __gpu_generic *)ptr));
}

// Terminates execution of the associated wavefront.
_DEFAULT_FN_ATTRS [[noreturn]] static __inline__ void __gpu_exit(void) {
__builtin_amdgcn_endpgm();
}

// Suspend the thread briefly to assist the scheduler during busy loops.
_DEFAULT_FN_ATTRS static __inline__ void __gpu_thread_suspend(void) {
__builtin_amdgcn_s_sleep(2);
}

_Pragma("omp end declare variant");
_Pragma("omp end declare target");

#if !defined(__cplusplus)
_Pragma("pop_macro(\"bool\")");
#endif

#endif // __AMDGPUINTRIN_H
Loading