Skip to content

Commit 2860450

Browse files
committed
[AMDGPU] Add backward compatibility layer for kernarg preloading
Add a prologue to the kernel entry to handle cases where code designed for kernarg preloading is executed on hardware equipped with incompatible firmware. If hardware has compatible firmware the 256 bytes at the start of the kernel entry will be skipped. This skipping is done automatically by hardware that supports the feature. A pass is added which is intended to be run at the very end of the pipeline to avoid any optimizations that would assume the prologue is a real predecessor block to the actual code start. In reality we have two possible entry points for the function. 1. The optimized path that supports kernarg preloading which begins at an offset of 256 bytes. 2. The backwards compatible entry point which starts at offset 0.
1 parent 4228a6e commit 2860450

15 files changed

+7277
-400
lines changed

llvm/docs/AMDGPUUsage.rst

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5914,10 +5914,7 @@ additional 256 bytes to the kernel_code_entry_byte_offset. This addition
59145914
facilitates the incorporation of a prologue to the kernel entry to handle cases
59155915
where code designed for kernarg preloading is executed on hardware equipped with
59165916
incompatible firmware. If hardware has compatible firmware the 256 bytes at the
5917-
start of the kernel entry will be skipped. Additionally, the compiler backend
5918-
may insert a trap instruction at the start of the kernel prologue to manage
5919-
situations where kernarg preloading is attempted on hardware with incompatible
5920-
firmware.
5917+
start of the kernel entry will be skipped.
59215918

59225919
With code object V5 and later, hidden kernel arguments that are normally
59235920
accessed through the Implicit Argument Ptr, may be preloaded into User SGPRs.

llvm/lib/Target/AMDGPU/AMDGPU.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM = nullptr);
6464
ModulePass *createAMDGPULowerBufferFatPointersPass();
6565
FunctionPass *createSIModeRegisterPass();
6666
FunctionPass *createGCNPreRAOptimizationsPass();
67+
FunctionPass *createAMDGPUPreloadKernargHeaderLegacyPass();
6768

6869
struct AMDGPUSimplifyLibCallsPass : PassInfoMixin<AMDGPUSimplifyLibCallsPass> {
6970
AMDGPUSimplifyLibCallsPass() {}
@@ -230,6 +231,9 @@ extern char &AMDGPUPerfHintAnalysisLegacyID;
230231
void initializeGCNRegPressurePrinterPass(PassRegistry &);
231232
extern char &GCNRegPressurePrinterID;
232233

234+
void initializeAMDGPUPreloadKernargHeaderLegacyPass(PassRegistry &);
235+
extern char &AMDGPUPreloadKernargHeaderLegacyID;
236+
233237
// Passes common to R600 and SI
234238
FunctionPass *createAMDGPUPromoteAlloca();
235239
void initializeAMDGPUPromoteAllocaPass(PassRegistry&);

llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUARGUMENTUSAGEINFO_H
1010
#define LLVM_LIB_TARGET_AMDGPU_AMDGPUARGUMENTUSAGEINFO_H
1111

12+
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
1213
#include "llvm/ADT/DenseMap.h"
1314
#include "llvm/CodeGen/Register.h"
1415
#include "llvm/Pass.h"
@@ -161,6 +162,7 @@ struct AMDGPUFunctionArgInfo {
161162

162163
// Map the index of preloaded kernel arguments to its descriptor.
163164
SmallDenseMap<int, KernArgPreloadDescriptor> PreloadKernArgs{};
165+
Register FirstKernArgPreloadReg = AMDGPU::NoRegister;
164166

165167
std::tuple<const ArgDescriptor *, const TargetRegisterClass *, LLT>
166168
getPreloadedValue(PreloadedValue Value) const;

llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -207,12 +207,6 @@ void AMDGPUAsmPrinter::emitFunctionBodyStart() {
207207

208208
if (STM.isAmdHsaOS())
209209
HSAMetadataStream->emitKernel(*MF, CurrentProgramInfo);
210-
211-
if (MFI.getNumKernargPreloadedSGPRs() > 0) {
212-
assert(AMDGPU::hasKernargPreload(STM));
213-
getTargetStreamer()->EmitKernargPreloadHeader(*getGlobalSTI(),
214-
STM.isAmdHsaOS());
215-
}
216210
}
217211

218212
void AMDGPUAsmPrinter::emitFunctionBodyEnd() {
Lines changed: 229 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,229 @@
1+
//===- AMDGPUPreloadKernargHeader.cpp - Preload Kernarg Header ------------===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
//
9+
/// \file This pass handles the creation of the backwards compatability layer
10+
/// for kernarg prealoding. Code may be compiled with the feature enabled, while
11+
/// the kernel is executed on hardware without firmware support.
12+
///
13+
/// To avoid the need for recompilation, we insert a block at the beginning of
14+
/// the kernel that is responsible for loading the kernel arguments into SGPRs
15+
/// using s_load instructions which setup the registers exactly as they would be
16+
/// by firmware if the code were executed on a system that supported kernarg
17+
/// preladoing.
18+
///
19+
/// This essentially allows for two entry points for the kernel. Firmware that
20+
/// supports the feature will automatically jump past the first 256 bytes of the
21+
/// program, skipping the backwards compatibility layer and directly beginning
22+
/// execution on the fast code path.
23+
///
24+
/// This pass should be run as late as possible, to avoid any optimization that
25+
/// may assume that padding is dead code or that the prologue added here is a
26+
/// true predecessor of the kernel entry block.
27+
//===----------------------------------------------------------------------===//
28+
29+
#include "AMDGPUPreloadKernargHeader.h"
30+
#include "AMDGPU.h"
31+
#include "GCNSubtarget.h"
32+
#include "SIMachineFunctionInfo.h"
33+
#include "llvm/CodeGen/MachineFunctionPass.h"
34+
#include "llvm/TargetParser/TargetParser.h"
35+
36+
using namespace llvm;
37+
38+
#define DEBUG_TYPE "amdgpu-preload-kernarg-header"
39+
40+
namespace {
41+
42+
struct LoadConfig {
43+
unsigned Size;
44+
const TargetRegisterClass *RegClass;
45+
unsigned Opcode;
46+
Register LoadReg;
47+
48+
// Constructor for the static config array
49+
LoadConfig(unsigned S, const TargetRegisterClass *RC, unsigned Op)
50+
: Size(S), RegClass(RC), Opcode(Op), LoadReg(AMDGPU::NoRegister) {}
51+
52+
// Constructor for the return value
53+
LoadConfig(unsigned S, const TargetRegisterClass *RC, unsigned Op,
54+
Register Reg)
55+
: Size(S), RegClass(RC), Opcode(Op), LoadReg(Reg) {}
56+
};
57+
58+
class AMDGPUPreloadKernargHeader {
59+
public:
60+
AMDGPUPreloadKernargHeader(MachineFunction &MF);
61+
62+
bool run();
63+
64+
private:
65+
MachineFunction &MF;
66+
const GCNSubtarget &ST;
67+
const SIMachineFunctionInfo &MFI;
68+
const SIInstrInfo &TII;
69+
const TargetRegisterInfo &TRI;
70+
71+
// Create a new block before the entry point to the kernel. Firmware that
72+
// supports preloading kernel arguments will automatically jump past this
73+
// block to the alternative kernel entry point.
74+
void createBackCompatBlock();
75+
76+
// Add instructions to load kernel arguments into SGPRs, returns the number of
77+
// s_load instructions added.
78+
unsigned addBackCompatLoads(MachineBasicBlock *BackCompatMBB,
79+
Register KernargSegmentPtr,
80+
unsigned NumKernargPreloadSGPRs);
81+
};
82+
83+
class AMDGPUPreloadKernargHeaderLegacy : public MachineFunctionPass {
84+
public:
85+
static char ID;
86+
87+
AMDGPUPreloadKernargHeaderLegacy() : MachineFunctionPass(ID) {}
88+
89+
StringRef getPassName() const override {
90+
return "AMDGPU Preload Kernarg Header";
91+
}
92+
93+
bool runOnMachineFunction(MachineFunction &MF) override;
94+
};
95+
96+
} // end anonymous namespace
97+
98+
char AMDGPUPreloadKernargHeaderLegacy::ID = 0;
99+
100+
INITIALIZE_PASS(AMDGPUPreloadKernargHeaderLegacy, DEBUG_TYPE,
101+
"AMDGPU Preload Kernarg Header", false, false)
102+
103+
char &llvm::AMDGPUPreloadKernargHeaderLegacyID =
104+
AMDGPUPreloadKernargHeaderLegacy::ID;
105+
106+
FunctionPass *llvm::createAMDGPUPreloadKernargHeaderLegacyPass() {
107+
return new AMDGPUPreloadKernargHeaderLegacy();
108+
}
109+
110+
bool AMDGPUPreloadKernargHeaderLegacy::runOnMachineFunction(
111+
MachineFunction &MF) {
112+
return AMDGPUPreloadKernargHeader(MF).run();
113+
}
114+
115+
AMDGPUPreloadKernargHeader::AMDGPUPreloadKernargHeader(MachineFunction &MF)
116+
: MF(MF), ST(MF.getSubtarget<GCNSubtarget>()),
117+
MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(*ST.getInstrInfo()),
118+
TRI(*ST.getRegisterInfo()) {}
119+
120+
bool AMDGPUPreloadKernargHeader::run() {
121+
if (!ST.hasKernargPreload())
122+
return false;
123+
124+
unsigned NumPreloadSGPRs = MFI.getNumKernargPreloadedSGPRs();
125+
if (NumPreloadSGPRs <= 0)
126+
return false;
127+
128+
if (MF.begin() == MF.end())
129+
return false;
130+
131+
createBackCompatBlock();
132+
133+
return true;
134+
}
135+
136+
void AMDGPUPreloadKernargHeader::createBackCompatBlock() {
137+
auto KernelEntryMBB = MF.begin();
138+
MachineBasicBlock *BackCompatMBB = MF.CreateMachineBasicBlock();
139+
MF.insert(KernelEntryMBB, BackCompatMBB);
140+
BackCompatMBB->addSuccessor(&*KernelEntryMBB);
141+
142+
assert(MFI.getUserSGPRInfo().hasKernargSegmentPtr());
143+
Register KernargSegmentPtr = MFI.getArgInfo().KernargSegmentPtr.getRegister();
144+
BackCompatMBB->addLiveIn(KernargSegmentPtr);
145+
146+
unsigned NumKernargPreloadSGPRs = MFI.getNumKernargPreloadedSGPRs();
147+
unsigned NumInstrs = 0;
148+
149+
// Load kernel arguments to SGPRs
150+
NumInstrs += addBackCompatLoads(BackCompatMBB, KernargSegmentPtr,
151+
NumKernargPreloadSGPRs);
152+
153+
AMDGPU::IsaVersion IV = AMDGPU::getIsaVersion(ST.getCPU());
154+
unsigned Waitcnt =
155+
AMDGPU::encodeWaitcnt(IV, getVmcntBitMask(IV), getExpcntBitMask(IV), 0);
156+
157+
// Wait for loads to complete
158+
BuildMI(BackCompatMBB, DebugLoc(), TII.get(AMDGPU::S_WAITCNT))
159+
.addImm(Waitcnt);
160+
NumInstrs++;
161+
162+
// Set PC to the actual kernel entry point. Add padding to fill out the rest
163+
// of the backcompat block. The total number of bytes must be 256.
164+
for (unsigned I = 0; I < 64 - NumInstrs; ++I) {
165+
BuildMI(BackCompatMBB, DebugLoc(), TII.get(AMDGPU::S_BRANCH))
166+
.addMBB(&*KernelEntryMBB);
167+
}
168+
}
169+
170+
// Find the largest possible load size that fits with SGRP alignment
171+
static LoadConfig getLoadParameters(const TargetRegisterInfo &TRI,
172+
Register KernargPreloadSGPR,
173+
unsigned NumKernargPreloadSGPRs) {
174+
static const LoadConfig Configs[] = {
175+
{8, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM},
176+
{4, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM},
177+
{2, &AMDGPU::SReg_64RegClass, AMDGPU::S_LOAD_DWORDX2_IMM},
178+
{1, &AMDGPU::SReg_32RegClass, AMDGPU::S_LOAD_DWORD_IMM}};
179+
180+
// Find the largest possible load size
181+
for (const auto &Config : Configs) {
182+
if (NumKernargPreloadSGPRs >= Config.Size) {
183+
Register LoadReg = TRI.getMatchingSuperReg(KernargPreloadSGPR,
184+
AMDGPU::sub0, Config.RegClass);
185+
if (LoadReg != AMDGPU::NoRegister)
186+
return LoadConfig(Config.Size, Config.RegClass, Config.Opcode, LoadReg);
187+
}
188+
}
189+
190+
// Fallback to a single register
191+
return LoadConfig(1, &AMDGPU::SReg_32RegClass, AMDGPU::S_LOAD_DWORD_IMM,
192+
KernargPreloadSGPR);
193+
}
194+
195+
unsigned AMDGPUPreloadKernargHeader::addBackCompatLoads(
196+
MachineBasicBlock *BackCompatMBB, Register KernargSegmentPtr,
197+
unsigned NumKernargPreloadSGPRs) {
198+
Register KernargPreloadSGPR = MFI.getArgInfo().FirstKernArgPreloadReg;
199+
unsigned Offset = 0;
200+
unsigned NumLoads = 0;
201+
202+
// Fill all user SGPRs used for kernarg preloading with sequential data from
203+
// the kernarg segment
204+
while (NumKernargPreloadSGPRs > 0) {
205+
LoadConfig Config =
206+
getLoadParameters(TRI, KernargPreloadSGPR, NumKernargPreloadSGPRs);
207+
208+
BuildMI(BackCompatMBB, DebugLoc(), TII.get(Config.Opcode), Config.LoadReg)
209+
.addReg(KernargSegmentPtr)
210+
.addImm(Offset)
211+
.addImm(0);
212+
213+
Offset += 4 * Config.Size;
214+
KernargPreloadSGPR = KernargPreloadSGPR.asMCReg() + Config.Size;
215+
NumKernargPreloadSGPRs -= Config.Size;
216+
NumLoads++;
217+
}
218+
219+
return NumLoads;
220+
}
221+
222+
PreservedAnalyses
223+
AMDGPUPreloadKernargHeaderPass::run(MachineFunction &MF,
224+
MachineFunctionAnalysisManager &) {
225+
if (!AMDGPUPreloadKernargHeader(MF).run())
226+
return PreservedAnalyses::all();
227+
228+
return PreservedAnalyses::none();
229+
}
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
//===- AMDGPUPreloadKernargHeader.h ----------------------------*- C++ -*-===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
9+
#ifndef LLVM_LIB_TARGET_AMDGPU_PRELOAD_KERNARG_HEADER_H
10+
#define LLVM_LIB_TARGET_AMDGPU_PRELOAD_KERNARG_HEADER_H
11+
12+
#include "llvm/CodeGen/MachinePassManager.h"
13+
14+
namespace llvm {
15+
16+
class AMDGPUPreloadKernargHeaderPass
17+
: public PassInfoMixin<AMDGPUPreloadKernargHeaderPass> {
18+
public:
19+
PreservedAnalyses run(MachineFunction &MF,
20+
MachineFunctionAnalysisManager &AM);
21+
};
22+
23+
} // end namespace llvm
24+
25+
#endif // LLVM_LIB_TARGET_AMDGPU_PRELOAD_KERNARG_HEADER_H

llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -535,6 +535,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
535535
initializeGCNPreRALongBranchRegPass(*PR);
536536
initializeGCNRewritePartialRegUsesPass(*PR);
537537
initializeGCNRegPressurePrinterPass(*PR);
538+
initializeAMDGPUPreloadKernargHeaderLegacyPass(*PR);
538539
}
539540

540541
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
@@ -1658,6 +1659,7 @@ void GCNPassConfig::addPreEmitPass() {
16581659
addPass(&AMDGPUInsertDelayAluID);
16591660

16601661
addPass(&BranchRelaxationPassID);
1662+
addPass(createAMDGPUPreloadKernargHeaderLegacyPass());
16611663
}
16621664

16631665
TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {

llvm/lib/Target/AMDGPU/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ add_llvm_target(AMDGPUCodeGen
8888
AMDGPUPerfHintAnalysis.cpp
8989
AMDGPUPostLegalizerCombiner.cpp
9090
AMDGPUPreLegalizerCombiner.cpp
91+
AMDGPUPreloadKernargHeader.cpp
9192
AMDGPUPrintfRuntimeBinding.cpp
9293
AMDGPUPromoteAlloca.cpp
9394
AMDGPUPromoteKernelArguments.cpp

llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -338,15 +338,6 @@ bool AMDGPUTargetAsmStreamer::EmitHSAMetadata(
338338
return true;
339339
}
340340

341-
bool AMDGPUTargetAsmStreamer::EmitKernargPreloadHeader(
342-
const MCSubtargetInfo &STI, bool TrapEnabled) {
343-
OS << (TrapEnabled ? "\ts_trap 2" : "\ts_endpgm")
344-
<< " ; Kernarg preload header. Trap with incompatible firmware that "
345-
"doesn't support preloading kernel arguments.\n";
346-
OS << "\t.fill 63, 4, 0xbf800000 ; s_nop 0\n";
347-
return true;
348-
}
349-
350341
bool AMDGPUTargetAsmStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
351342
const uint32_t Encoded_s_code_end = 0xbf9f0000;
352343
const uint32_t Encoded_s_nop = 0xbf800000;
@@ -935,20 +926,6 @@ bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc,
935926
return true;
936927
}
937928

938-
bool AMDGPUTargetELFStreamer::EmitKernargPreloadHeader(
939-
const MCSubtargetInfo &STI, bool TrapEnabled) {
940-
const uint32_t Encoded_s_nop = 0xbf800000;
941-
const uint32_t Encoded_s_trap = 0xbf920002;
942-
const uint32_t Encoded_s_endpgm = 0xbf810000;
943-
const uint32_t TrapInstr = TrapEnabled ? Encoded_s_trap : Encoded_s_endpgm;
944-
MCStreamer &OS = getStreamer();
945-
OS.emitInt32(TrapInstr);
946-
for (int i = 0; i < 63; ++i) {
947-
OS.emitInt32(Encoded_s_nop);
948-
}
949-
return true;
950-
}
951-
952929
bool AMDGPUTargetELFStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
953930
const uint32_t Encoded_s_code_end = 0xbf9f0000;
954931
const uint32_t Encoded_s_nop = 0xbf800000;

0 commit comments

Comments
 (0)