-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[Clang] Implement resource directory headers for common GPU intrinsics #110179
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
2a8b7a7
12443da
2e2a3ea
10e807d
db8dbd1
7ebfdf4
24e04fa
f905445
1e41cf4
dadaca2
b942931
1703f01
f727e6a
ffc10e8
f9f0e83
860b550
2386733
d48b335
239f482
3f70c93
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,190 @@ | ||
//===-- amdgpuintrin.h - AMDPGU intrinsic functions -----------------------===// | ||
// | ||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||
// See https://llvm.org/LICENSE.txt for license information. | ||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||
// | ||
//===----------------------------------------------------------------------===// | ||
|
||
#ifndef __AMDGPUINTRIN_H | ||
#define __AMDGPUINTRIN_H | ||
|
||
#ifndef __AMDGPU__ | ||
#error "This file is intended for AMDGPU targets or offloading to AMDGPU" | ||
#endif | ||
|
||
#include <stdint.h> | ||
|
||
#if !defined(__cplusplus) | ||
_Pragma("push_macro(\"bool\")"); | ||
#define bool _Bool | ||
#endif | ||
|
||
_Pragma("omp begin declare target device_type(nohost)"); | ||
_Pragma("omp begin declare variant match(device = {arch(amdgcn)})"); | ||
|
||
// Type aliases to the address spaces used by the AMDGPU backend. | ||
#define __gpu_private __attribute__((opencl_private)) | ||
#define __gpu_constant __attribute__((opencl_constant)) | ||
#define __gpu_local __attribute__((opencl_local)) | ||
#define __gpu_global __attribute__((opencl_global)) | ||
#define __gpu_generic __attribute__((opencl_generic)) | ||
|
||
// Attribute to declare a function as a kernel. | ||
#define __gpu_kernel __attribute__((amdgpu_kernel, visibility("protected"))) | ||
|
||
// Returns the number of workgroups in the 'x' dimension of the grid. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_x(void) { | ||
return __builtin_amdgcn_grid_size_x() / __builtin_amdgcn_workgroup_size_x(); | ||
} | ||
|
||
// Returns the number of workgroups in the 'y' dimension of the grid. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_y(void) { | ||
return __builtin_amdgcn_grid_size_y() / __builtin_amdgcn_workgroup_size_y(); | ||
} | ||
|
||
// Returns the number of workgroups in the 'z' dimension of the grid. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_z(void) { | ||
return __builtin_amdgcn_grid_size_z() / __builtin_amdgcn_workgroup_size_z(); | ||
} | ||
|
||
// Returns the 'x' dimension of the current AMD workgroup's id. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shouldn't these have three "///"? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Seems to vary between headers, I'll defer to someone like @AaronBallman. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We're consistently inconsistent, so it doesn't much matter. (Personally, I think all of our compiler-provided system headers should be using There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for the input, is there anything else about this approach that you have concerns about? I remember in my original RFC someone suggested waiting for you to chime in. |
||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_x(void) { | ||
return __builtin_amdgcn_workgroup_id_x(); | ||
} | ||
|
||
// Returns the 'y' dimension of the current AMD workgroup's id. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_y(void) { | ||
return __builtin_amdgcn_workgroup_id_y(); | ||
} | ||
|
||
// Returns the 'z' dimension of the current AMD workgroup's id. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_z(void) { | ||
return __builtin_amdgcn_workgroup_id_z(); | ||
} | ||
|
||
// Returns the number of workitems in the 'x' dimension. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_x(void) { | ||
return __builtin_amdgcn_workgroup_size_x(); | ||
} | ||
|
||
// Returns the number of workitems in the 'y' dimension. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_y(void) { | ||
return __builtin_amdgcn_workgroup_size_y(); | ||
} | ||
|
||
// Returns the number of workitems in the 'z' dimension. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_z(void) { | ||
return __builtin_amdgcn_workgroup_size_z(); | ||
} | ||
|
||
// Returns the 'x' dimension id of the workitem in the current AMD workgroup. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_x(void) { | ||
return __builtin_amdgcn_workitem_id_x(); | ||
} | ||
|
||
// Returns the 'y' dimension id of the workitem in the current AMD workgroup. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_y(void) { | ||
return __builtin_amdgcn_workitem_id_y(); | ||
} | ||
|
||
// Returns the 'z' dimension id of the workitem in the current AMD workgroup. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_z(void) { | ||
return __builtin_amdgcn_workitem_id_z(); | ||
} | ||
|
||
// Returns the size of an AMD wavefront, either 32 or 64 depending on hardware | ||
// and compilation options. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_lanes(void) { | ||
return __builtin_amdgcn_wavefrontsize(); | ||
} | ||
|
||
// Returns the id of the thread inside of an AMD wavefront executing together. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_lane_id(void) { | ||
return __builtin_amdgcn_mbcnt_hi(~0u, __builtin_amdgcn_mbcnt_lo(~0u, 0u)); | ||
} | ||
|
||
// Returns the bit-mask of active threads in the current wavefront. | ||
_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_lane_mask(void) { | ||
return __builtin_amdgcn_read_exec(); | ||
} | ||
|
||
// Copies the value from the first active thread in the wavefront to the rest. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t | ||
__gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x) { | ||
return __builtin_amdgcn_readfirstlane(__x); | ||
} | ||
|
||
// Copies the value from the first active thread in the wavefront to the rest. | ||
_DEFAULT_FN_ATTRS __inline__ uint64_t | ||
__gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x) { | ||
uint32_t __hi = (uint32_t)(__x >> 32ull); | ||
uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF); | ||
return ((uint64_t)__builtin_amdgcn_readfirstlane(__hi) << 32ull) | | ||
((uint64_t)__builtin_amdgcn_readfirstlane(__lo)); | ||
} | ||
|
||
// Returns a bitmask of threads in the current lane for which \p x is true. | ||
_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask, | ||
bool __x) { | ||
// The lane_mask & gives the nvptx semantics when lane_mask is a subset of | ||
// the active threads | ||
return __lane_mask & __builtin_amdgcn_ballot_w64(__x); | ||
} | ||
|
||
// Waits for all the threads in the block to converge and issues a fence. | ||
_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_threads(void) { | ||
__builtin_amdgcn_s_barrier(); | ||
__builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup"); | ||
} | ||
|
||
// Wait for all threads in the wavefront to converge, this is a noop on AMDGPU. | ||
_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_lane(uint64_t __lane_mask) { | ||
__builtin_amdgcn_wave_barrier(); | ||
} | ||
|
||
// Shuffles the the lanes inside the wavefront according to the given index. | ||
_DEFAULT_FN_ATTRS static __inline__ uint32_t | ||
__gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x) { | ||
return __builtin_amdgcn_ds_bpermute(__idx << 2, __x); | ||
} | ||
jhuber6 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
// Shuffles the the lanes inside the wavefront according to the given index. | ||
_DEFAULT_FN_ATTRS static __inline__ uint64_t | ||
__gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x) { | ||
uint32_t __hi = (uint32_t)(__x >> 32ull); | ||
uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF); | ||
return ((uint64_t)__builtin_amdgcn_ds_bpermute(__idx << 2, __hi) << 32ull) | | ||
((uint64_t)__builtin_amdgcn_ds_bpermute(__idx << 2, __lo)); | ||
} | ||
|
||
// Returns true if the flat pointer points to CUDA 'shared' memory. | ||
_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_local(void *ptr) { | ||
return __builtin_amdgcn_is_shared( | ||
(void __attribute__((address_space(0))) *)((void __gpu_generic *)ptr)); | ||
} | ||
|
||
// Returns true if the flat pointer points to CUDA 'local' memory. | ||
_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_private(void *ptr) { | ||
return __builtin_amdgcn_is_private( | ||
(void __attribute__((address_space(0))) *)((void __gpu_generic *)ptr)); | ||
} | ||
|
||
// Terminates execution of the associated wavefront. | ||
_DEFAULT_FN_ATTRS [[noreturn]] static __inline__ void __gpu_exit(void) { | ||
__builtin_amdgcn_endpgm(); | ||
} | ||
|
||
// Suspend the thread briefly to assist the scheduler during busy loops. | ||
_DEFAULT_FN_ATTRS static __inline__ void __gpu_thread_suspend(void) { | ||
__builtin_amdgcn_s_sleep(2); | ||
} | ||
|
||
_Pragma("omp end declare variant"); | ||
_Pragma("omp end declare target"); | ||
|
||
#if !defined(__cplusplus) | ||
_Pragma("pop_macro(\"bool\")"); | ||
#endif | ||
|
||
#endif // __AMDGPUINTRIN_H |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I believe the new COV4/5 have this pre-computed and we should likely use it.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I remember talking about this w/ @arsenm and he said that this should just be caught by
AMDGPUInstCombine
or something. Maybe I should do that at some point.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, we can and should optimize this