Skip to content

Commit 4c710e4

Browse files
committed
[OpenMP] Use generic IR for the OpenMP DeviceRTL
Summary: We previously built this for every single architecture to deal with incompatibility. This patch updates it to use the 'generic' IR that `libc` and other projects use. Who knows if this will have any side-effects, probably worth testing more but it passes the tests I expect to pass on my side.
1 parent 3f458cd commit 4c710e4

File tree

4 files changed

+62
-101
lines changed

4 files changed

+62
-101
lines changed

clang/lib/Driver/ToolChains/CommonArgs.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2800,8 +2800,7 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
28002800
: options::OPT_libomptarget_nvptx_bc_path_EQ;
28012801

28022802
StringRef ArchPrefix = Triple.isAMDGCN() ? "amdgpu" : "nvptx";
2803-
std::string LibOmpTargetName =
2804-
("libomptarget-" + ArchPrefix + "-" + BitcodeSuffix + ".bc").str();
2803+
std::string LibOmpTargetName = ("libomptarget-" + ArchPrefix + ".bc").str();
28052804

28062805
// First check whether user specifies bc library
28072806
if (const Arg *A = DriverArgs.getLastArg(LibomptargetBCPathOpt)) {

clang/lib/Driver/ToolChains/Cuda.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -851,7 +851,6 @@ void CudaToolChain::addClangTargetOptions(
851851
HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
852852

853853
StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
854-
assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
855854
assert((DeviceOffloadingKind == Action::OFK_OpenMP ||
856855
DeviceOffloadingKind == Action::OFK_Cuda) &&
857856
"Only OpenMP or CUDA offloading kinds are supported for NVIDIA GPUs.");

offload/DeviceRTL/CMakeLists.txt

Lines changed: 16 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -42,38 +42,6 @@ set(devicertl_base_directory ${CMAKE_CURRENT_SOURCE_DIR})
4242
set(include_directory ${devicertl_base_directory}/include)
4343
set(source_directory ${devicertl_base_directory}/src)
4444

45-
set(all_amdgpu_architectures "gfx700;gfx701;gfx801;gfx803;gfx900;gfx902;gfx906"
46-
"gfx908;gfx90a;gfx90c;gfx940;gfx941;gfx942;gfx950;gfx1010"
47-
"gfx1012;gfx1030;gfx1031;gfx1032;gfx1033;gfx1034;gfx1035"
48-
"gfx1036;gfx1100;gfx1101;gfx1102;gfx1103;gfx1150"
49-
"gfx1151;gfx1152;gfx1153")
50-
set(all_nvptx_architectures "sm_35;sm_37;sm_50;sm_52;sm_53;sm_60;sm_61;sm_62"
51-
"sm_70;sm_72;sm_75;sm_80;sm_86;sm_87;sm_89;sm_90")
52-
set(all_gpu_architectures
53-
"${all_amdgpu_architectures};${all_nvptx_architectures}")
54-
55-
set(LIBOMPTARGET_DEVICE_ARCHITECTURES "all" CACHE STRING
56-
"List of device architectures to be used to compile the OpenMP DeviceRTL.")
57-
58-
if(LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "all")
59-
set(LIBOMPTARGET_DEVICE_ARCHITECTURES ${all_gpu_architectures})
60-
elseif(LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "amdgpu")
61-
set(LIBOMPTARGET_DEVICE_ARCHITECTURES ${all_amdgpu_architectures})
62-
elseif(LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "nvptx")
63-
set(LIBOMPTARGET_DEVICE_ARCHITECTURES ${all_nvptx_architectures})
64-
elseif(LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "auto" OR
65-
LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "native")
66-
if(NOT LIBOMPTARGET_NVPTX_ARCH AND NOT LIBOMPTARGET_AMDGPU_ARCH)
67-
message(FATAL_ERROR
68-
"Could not find 'amdgpu-arch' and 'nvptx-arch' tools required for 'auto'")
69-
elseif(NOT LIBOMPTARGET_FOUND_NVIDIA_GPU AND NOT LIBOMPTARGET_FOUND_AMDGPU_GPU)
70-
message(FATAL_ERROR "No AMD or NVIDIA GPU found on the system when using 'auto'")
71-
endif()
72-
set(LIBOMPTARGET_DEVICE_ARCHITECTURES
73-
"${LIBOMPTARGET_NVPTX_DETECTED_ARCH_LIST};${LIBOMPTARGET_AMDGPU_DETECTED_ARCH_LIST}")
74-
endif()
75-
list(REMOVE_DUPLICATES LIBOMPTARGET_DEVICE_ARCHITECTURES)
76-
7745
set(include_files
7846
${include_directory}/Allocator.h
7947
${include_directory}/Configuration.h
@@ -141,20 +109,21 @@ set(bc_flags -c -foffload-lto -std=c++17 -fvisibility=hidden
141109

142110
# first create an object target
143111
add_library(omptarget.devicertl.all_objs OBJECT IMPORTED)
144-
function(compileDeviceRTLLibrary target_cpu target_name target_triple)
112+
function(compileDeviceRTLLibrary target_name target_triple)
145113
set(target_bc_flags ${ARGN})
146114

147115
set(bc_files "")
148116
foreach(src ${src_files})
149117
get_filename_component(infile ${src} ABSOLUTE)
150118
get_filename_component(outfile ${src} NAME)
151-
set(outfile "${outfile}-${target_cpu}.bc")
119+
set(outfile "${outfile}-${target_name}.bc")
152120
set(depfile "${outfile}.d")
153121

154122
add_custom_command(OUTPUT ${outfile}
155123
COMMAND ${CLANG_TOOL}
156124
${bc_flags}
157-
--offload-arch=${target_cpu}
125+
-fopenmp-targets=${target_triple}
126+
-Xopenmp-target=${target_triple} -march=
158127
${target_bc_flags}
159128
-MD -MF ${depfile}
160129
${infile} -o ${outfile}
@@ -177,7 +146,7 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple)
177146
list(APPEND bc_files ${outfile})
178147
endforeach()
179148

180-
set(bclib_name "libomptarget-${target_name}-${target_cpu}.bc")
149+
set(bclib_name "libomptarget-${target_name}.bc")
181150

182151
# Link to a bitcode library.
183152
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/linked_${bclib_name}
@@ -217,7 +186,7 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple)
217186
APPEND)
218187
endif()
219188

220-
set(bclib_target_name "omptarget-${target_name}-${target_cpu}-bc")
189+
set(bclib_target_name "omptarget-${target_name}-bc")
221190
add_custom_target(${bclib_target_name} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${bclib_name})
222191

223192
# Copy library to destination.
@@ -239,7 +208,7 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple)
239208
# Package the bitcode in the bitcode and embed it in an ELF for the static library
240209
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/packaged_${bclib_name}
241210
COMMAND ${PACKAGER_TOOL} -o ${CMAKE_CURRENT_BINARY_DIR}/packaged_${bclib_name}
242-
"--image=file=${CMAKE_CURRENT_BINARY_DIR}/${bclib_name},${target_feature},triple=${target_triple},arch=${target_cpu},kind=openmp"
211+
"--image=file=${CMAKE_CURRENT_BINARY_DIR}/${bclib_name},${target_feature},triple=${target_triple},arch=generic,kind=openmp"
243212
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${bclib_name}
244213
COMMENT "Packaging LLVM offloading binary ${bclib_name}.out"
245214
)
@@ -249,14 +218,14 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple)
249218
APPEND)
250219
endif()
251220

252-
set(output_name "${CMAKE_CURRENT_BINARY_DIR}/devicertl-${target_name}-${target_cpu}.o")
221+
set(output_name "${CMAKE_CURRENT_BINARY_DIR}/devicertl-${target_name}.o")
253222
add_custom_command(OUTPUT ${output_name}
254223
COMMAND ${CLANG_TOOL} --std=c++17 -c -nostdlib
255224
-Xclang -fembed-offload-object=${CMAKE_CURRENT_BINARY_DIR}/packaged_${bclib_name}
256225
-o ${output_name}
257226
${source_directory}/Stub.cpp
258227
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/packaged_${bclib_name} ${source_directory}/Stub.cpp
259-
COMMENT "Embedding LLVM offloading binary in devicertl-${target_name}-${target_cpu}.o"
228+
COMMENT "Embedding LLVM offloading binary in devicertl-${target_name}.o"
260229
VERBATIM
261230
)
262231
if(TARGET clang)
@@ -269,11 +238,11 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple)
269238
set_property(TARGET omptarget.devicertl.all_objs APPEND PROPERTY IMPORTED_OBJECTS ${output_name})
270239

271240
if (CMAKE_EXPORT_COMPILE_COMMANDS)
272-
set(ide_target_name omptarget-ide-${target_name}-${target_cpu})
241+
set(ide_target_name omptarget-ide-${target_name})
273242
add_library(${ide_target_name} STATIC EXCLUDE_FROM_ALL ${src_files})
274243
target_compile_options(${ide_target_name} PRIVATE
275-
-fopenmp --offload-arch=${target_cpu} -fopenmp-cuda-mode
276-
-mllvm -openmp-opt-disable
244+
-fopenmp-targets=${target_triple} -Xopenmp-target=${target_triple} -march=
245+
-fopenmp -fopenmp-cuda-mode -mllvm -openmp-opt-disable
277246
-foffload-lto -fvisibility=hidden --offload-device-only
278247
-nocudalib -nogpulib -nogpuinc -nostdlibinc -Wno-unknown-cuda-version
279248
)
@@ -288,18 +257,11 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple)
288257
endif()
289258
endfunction()
290259

291-
# Generate a Bitcode library for all the gpu architectures the user requested.
292-
add_custom_target(omptarget.devicertl.nvptx)
293260
add_custom_target(omptarget.devicertl.amdgpu)
294-
foreach(gpu_arch ${LIBOMPTARGET_DEVICE_ARCHITECTURES})
295-
if("${gpu_arch}" IN_LIST all_amdgpu_architectures)
296-
compileDeviceRTLLibrary(${gpu_arch} amdgpu amdgcn-amd-amdhsa -Xclang -mcode-object-version=none)
297-
elseif("${gpu_arch}" IN_LIST all_nvptx_architectures)
298-
compileDeviceRTLLibrary(${gpu_arch} nvptx nvptx64-nvidia-cuda --cuda-feature=+ptx63)
299-
else()
300-
message(FATAL_ERROR "Unknown GPU architecture '${gpu_arch}'")
301-
endif()
302-
endforeach()
261+
compileDeviceRTLLibrary(amdgpu amdgcn-amd-amdhsa -Xclang -mcode-object-version=none)
262+
263+
add_custom_target(omptarget.devicertl.nvptx)
264+
compileDeviceRTLLibrary(nvptx nvptx64-nvidia-cuda --cuda-feature=+ptx63)
303265

304266
# Archive all the object files generated above into a static library
305267
add_library(omptarget.devicertl STATIC)

offload/DeviceRTL/src/Reduction.cpp

Lines changed: 45 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@ void gpu_irregular_warp_reduce(void *reduce_data, ShuffleReductFnTy shflFct,
4444
}
4545
}
4646

47-
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
4847
static uint32_t gpu_irregular_simd_reduce(void *reduce_data,
4948
ShuffleReductFnTy shflFct) {
5049
uint32_t size, remote_id, physical_lane_id;
@@ -63,7 +62,6 @@ static uint32_t gpu_irregular_simd_reduce(void *reduce_data,
6362
} while (logical_lane_id % 2 == 0 && size > 1);
6463
return (logical_lane_id == 0);
6564
}
66-
#endif
6765

6866
static int32_t nvptx_parallel_reduce_nowait(void *reduce_data,
6967
ShuffleReductFnTy shflFct,
@@ -74,49 +72,53 @@ static int32_t nvptx_parallel_reduce_nowait(void *reduce_data,
7472
uint32_t NumThreads = omp_get_num_threads();
7573
if (NumThreads == 1)
7674
return 1;
77-
/*
78-
* This reduce function handles reduction within a team. It handles
79-
* parallel regions in both L1 and L2 parallelism levels. It also
80-
* supports Generic, SPMD, and NoOMP modes.
81-
*
82-
* 1. Reduce within a warp.
83-
* 2. Warp master copies value to warp 0 via shared memory.
84-
* 3. Warp 0 reduces to a single value.
85-
* 4. The reduced value is available in the thread that returns 1.
86-
*/
87-
88-
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
89-
uint32_t WarpsNeeded =
90-
(NumThreads + mapping::getWarpSize() - 1) / mapping::getWarpSize();
91-
uint32_t WarpId = mapping::getWarpIdInBlock();
92-
93-
// Volta execution model:
94-
// For the Generic execution mode a parallel region either has 1 thread and
95-
// beyond that, always a multiple of 32. For the SPMD execution mode we may
96-
// have any number of threads.
97-
if ((NumThreads % mapping::getWarpSize() == 0) || (WarpId < WarpsNeeded - 1))
98-
gpu_regular_warp_reduce(reduce_data, shflFct);
99-
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
100-
gpu_irregular_warp_reduce(reduce_data, shflFct,
101-
/*LaneCount=*/NumThreads % mapping::getWarpSize(),
102-
/*LaneId=*/mapping::getThreadIdInBlock() %
103-
mapping::getWarpSize());
10475

105-
// When we have more than [mapping::getWarpSize()] number of threads
106-
// a block reduction is performed here.
10776
//
108-
// Only L1 parallel region can enter this if condition.
109-
if (NumThreads > mapping::getWarpSize()) {
110-
// Gather all the reduced values from each warp
111-
// to the first warp.
112-
cpyFct(reduce_data, WarpsNeeded);
77+
// This reduce function handles reduction within a team. It handles
78+
// parallel regions in both L1 and L2 parallelism levels. It also
79+
// supports Generic, SPMD, and NoOMP modes.
80+
//
81+
// 1. Reduce within a warp.
82+
// 2. Warp master copies value to warp 0 via shared memory.
83+
// 3. Warp 0 reduces to a single value.
84+
// 4. The reduced value is available in the thread that returns 1.
85+
//
11386

114-
if (WarpId == 0)
115-
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
116-
BlockThreadId);
87+
#if __has_builtin(__nvvm_reflect)
88+
if (__nvvm_reflect("__CUDA_ARCH") >= 700) {
89+
uint32_t WarpsNeeded =
90+
(NumThreads + mapping::getWarpSize() - 1) / mapping::getWarpSize();
91+
uint32_t WarpId = mapping::getWarpIdInBlock();
92+
93+
// Volta execution model:
94+
// For the Generic execution mode a parallel region either has 1 thread and
95+
// beyond that, always a multiple of 32. For the SPMD execution mode we may
96+
// have any number of threads.
97+
if ((NumThreads % mapping::getWarpSize() == 0) ||
98+
(WarpId < WarpsNeeded - 1))
99+
gpu_regular_warp_reduce(reduce_data, shflFct);
100+
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
101+
gpu_irregular_warp_reduce(
102+
reduce_data, shflFct,
103+
/*LaneCount=*/NumThreads % mapping::getWarpSize(),
104+
/*LaneId=*/mapping::getThreadIdInBlock() % mapping::getWarpSize());
105+
106+
// When we have more than [mapping::getWarpSize()] number of threads
107+
// a block reduction is performed here.
108+
//
109+
// Only L1 parallel region can enter this if condition.
110+
if (NumThreads > mapping::getWarpSize()) {
111+
// Gather all the reduced values from each warp
112+
// to the first warp.
113+
cpyFct(reduce_data, WarpsNeeded);
114+
115+
if (WarpId == 0)
116+
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
117+
BlockThreadId);
118+
}
119+
return BlockThreadId == 0;
117120
}
118-
return BlockThreadId == 0;
119-
#else
121+
#endif
120122
__kmpc_impl_lanemask_t Liveness = mapping::activemask();
121123
if (Liveness == lanes::All) // Full warp
122124
gpu_regular_warp_reduce(reduce_data, shflFct);
@@ -150,10 +152,9 @@ static int32_t nvptx_parallel_reduce_nowait(void *reduce_data,
150152
return BlockThreadId == 0;
151153
}
152154

153-
// Get the OMP thread Id. This is different from BlockThreadId in the case of
154-
// an L2 parallel region.
155+
// Get the OMP thread Id. This is different from BlockThreadId in the case
156+
// of an L2 parallel region.
155157
return BlockThreadId == 0;
156-
#endif // __CUDA_ARCH__ >= 700
157158
}
158159

159160
uint32_t roundToWarpsize(uint32_t s) {

0 commit comments

Comments
 (0)