Skip to content

[AMDGPU] Add backward compatibility layer for kernarg preloading #119167

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jan 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions llvm/docs/AMDGPUUsage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5914,10 +5914,7 @@ additional 256 bytes to the kernel_code_entry_byte_offset. This addition
facilitates the incorporation of a prologue to the kernel entry to handle cases
where code designed for kernarg preloading is executed on hardware equipped with
incompatible firmware. If hardware has compatible firmware the 256 bytes at the
start of the kernel entry will be skipped. Additionally, the compiler backend
may insert a trap instruction at the start of the kernel prologue to manage
situations where kernarg preloading is attempted on hardware with incompatible
firmware.
start of the kernel entry will be skipped.

With code object V5 and later, hidden kernel arguments that are normally
accessed through the Implicit Argument Ptr, may be preloaded into User SGPRs.
Expand Down
4 changes: 4 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPU.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM = nullptr);
ModulePass *createAMDGPULowerBufferFatPointersPass();
FunctionPass *createSIModeRegisterPass();
FunctionPass *createGCNPreRAOptimizationsPass();
FunctionPass *createAMDGPUPreloadKernArgPrologLegacyPass();

struct AMDGPUSimplifyLibCallsPass : PassInfoMixin<AMDGPUSimplifyLibCallsPass> {
AMDGPUSimplifyLibCallsPass() {}
Expand Down Expand Up @@ -230,6 +231,9 @@ extern char &AMDGPUPerfHintAnalysisLegacyID;
void initializeGCNRegPressurePrinterPass(PassRegistry &);
extern char &GCNRegPressurePrinterID;

void initializeAMDGPUPreloadKernArgPrologLegacyPass(PassRegistry &);
extern char &AMDGPUPreloadKernArgPrologLegacyID;

// Passes common to R600 and SI
FunctionPass *createAMDGPUPromoteAlloca();
void initializeAMDGPUPromoteAllocaPass(PassRegistry&);
Expand Down
3 changes: 3 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUARGUMENTUSAGEINFO_H
#define LLVM_LIB_TARGET_AMDGPU_AMDGPUARGUMENTUSAGEINFO_H

#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/Pass.h"
Expand Down Expand Up @@ -161,6 +162,8 @@ struct AMDGPUFunctionArgInfo {

// Map the index of preloaded kernel arguments to its descriptor.
SmallDenseMap<int, KernArgPreloadDescriptor> PreloadKernArgs{};
// The first user SGPR allocated for kernarg preloading.
Register FirstKernArgPreloadReg;

std::tuple<const ArgDescriptor *, const TargetRegisterClass *, LLT>
getPreloadedValue(PreloadedValue Value) const;
Expand Down
6 changes: 0 additions & 6 deletions llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,12 +207,6 @@ void AMDGPUAsmPrinter::emitFunctionBodyStart() {

if (STM.isAmdHsaOS())
HSAMetadataStream->emitKernel(*MF, CurrentProgramInfo);

if (MFI.getNumKernargPreloadedSGPRs() > 0) {
assert(AMDGPU::hasKernargPreload(STM));
getTargetStreamer()->EmitKernargPreloadHeader(*getGlobalSTI(),
STM.isAmdHsaOS());
}
}

void AMDGPUAsmPrinter::emitFunctionBodyEnd() {
Expand Down
211 changes: 211 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUPreloadKernArgProlog.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,211 @@
//===- AMDGPUPreloadKernArgProlog.cpp - Preload KernArg Prolog ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This pass creates a backward compatibility layer for kernel argument
/// preloading in situations where code is compiled with kernel argument
/// preloading enabled but executed on hardware without firmware support for it.
///
/// To avoid recompilation, the pass inserts a block at the beginning of the
/// program that loads the kernel arguments into SGPRs using s_load
/// instructions. This sets up the registers exactly as they would be on systems
/// with compatible firmware.
///
/// This effectively creates two entry points for the kernel. Firmware that
/// supports the feature will automatically jump past the first 256 bytes of the
/// program, skipping the compatibility layer and directly starting execution on
/// the optimized code path.
///
/// This pass should be run as late as possible to prevent any optimizations
/// that might assume the padding is dead code or that the added prologue is a
/// true predecessor of the kernel entry block.
//
//===----------------------------------------------------------------------===//

#include "AMDGPUPreloadKernArgProlog.h"
#include "AMDGPU.h"
#include "GCNSubtarget.h"
#include "SIMachineFunctionInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/TargetParser/TargetParser.h"

using namespace llvm;

#define DEBUG_TYPE "amdgpu-preload-kern-arg-prolog"

namespace {

// Used to build s_loads maping user SGPRs to kernel arguments
struct LoadConfig {
unsigned Size;
const TargetRegisterClass *RegClass;
unsigned Opcode;
Register LoadReg = Register();
};

class AMDGPUPreloadKernArgProlog {
public:
AMDGPUPreloadKernArgProlog(MachineFunction &MF);

bool run();

private:
MachineFunction &MF;
const GCNSubtarget &ST;
const SIMachineFunctionInfo &MFI;
const SIInstrInfo &TII;
const TargetRegisterInfo &TRI;

// Create a new block before the entry point to the kernel. Firmware that
// supports preloading kernel arguments will automatically jump past this
// block to the alternative kernel entry point.
void createBackCompatBlock(unsigned NumKernArgPreloadSGPRs);

// Add instructions to load kernel arguments into SGPRs.
void addBackCompatLoads(MachineBasicBlock *BackCompatMBB,
Register KernArgSegmentPtr,
unsigned NumKernArgPreloadSGPRs);
};

class AMDGPUPreloadKernArgPrologLegacy : public MachineFunctionPass {
public:
static char ID;

AMDGPUPreloadKernArgPrologLegacy() : MachineFunctionPass(ID) {}

StringRef getPassName() const override {
return "AMDGPU Preload Kernel Arguments Prolog";
}

bool runOnMachineFunction(MachineFunction &MF) override;
};

} // end anonymous namespace

char AMDGPUPreloadKernArgPrologLegacy::ID = 0;

INITIALIZE_PASS(AMDGPUPreloadKernArgPrologLegacy, DEBUG_TYPE,
"AMDGPU Preload Kernel Arguments Prolog", false, false)

char &llvm::AMDGPUPreloadKernArgPrologLegacyID =
AMDGPUPreloadKernArgPrologLegacy::ID;

FunctionPass *llvm::createAMDGPUPreloadKernArgPrologLegacyPass() {
return new AMDGPUPreloadKernArgPrologLegacy();
}

bool AMDGPUPreloadKernArgPrologLegacy::runOnMachineFunction(
MachineFunction &MF) {
return AMDGPUPreloadKernArgProlog(MF).run();
}

AMDGPUPreloadKernArgProlog::AMDGPUPreloadKernArgProlog(MachineFunction &MF)
: MF(MF), ST(MF.getSubtarget<GCNSubtarget>()),
MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(*ST.getInstrInfo()),
TRI(*ST.getRegisterInfo()) {}

bool AMDGPUPreloadKernArgProlog::run() {
if (!ST.hasKernargPreload())
return false;

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Didn't you need some other feature for whether this is necessary?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No reason to differentiate right now unless some target didn't need any padding.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought gfx950 didn't need the compatibility?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it still needs it because of the generic subtarget, and AFAIK there are no planned FW updates to facilitate removal of the compatibility layer.

unsigned NumKernArgPreloadSGPRs = MFI.getNumKernargPreloadedSGPRs();
if (!NumKernArgPreloadSGPRs)
return false;

createBackCompatBlock(NumKernArgPreloadSGPRs);
return true;
}

void AMDGPUPreloadKernArgProlog::createBackCompatBlock(
unsigned NumKernArgPreloadSGPRs) {
auto KernelEntryMBB = MF.begin();
MachineBasicBlock *BackCompatMBB = MF.CreateMachineBasicBlock();
MF.insert(KernelEntryMBB, BackCompatMBB);

assert(MFI.getUserSGPRInfo().hasKernargSegmentPtr() &&
"Kernel argument segment pointer register not set.");
Register KernArgSegmentPtr = MFI.getArgInfo().KernargSegmentPtr.getRegister();
BackCompatMBB->addLiveIn(KernArgSegmentPtr);

// Load kernel arguments to SGPRs
addBackCompatLoads(BackCompatMBB, KernArgSegmentPtr, NumKernArgPreloadSGPRs);

// Wait for loads to complete
AMDGPU::IsaVersion IV = AMDGPU::getIsaVersion(ST.getCPU());
unsigned Waitcnt =
AMDGPU::encodeWaitcnt(IV, getVmcntBitMask(IV), getExpcntBitMask(IV), 0);
BuildMI(BackCompatMBB, DebugLoc(), TII.get(AMDGPU::S_WAITCNT))
.addImm(Waitcnt);

// Branch to kernel start
BuildMI(BackCompatMBB, DebugLoc(), TII.get(AMDGPU::S_BRANCH))
.addMBB(&*KernelEntryMBB);
BackCompatMBB->addSuccessor(&*KernelEntryMBB);

// Create a new basic block for padding to 256 bytes
MachineBasicBlock *PadMBB = MF.CreateMachineBasicBlock();
MF.insert(++BackCompatMBB->getIterator(), PadMBB);
PadMBB->setAlignment(Align(256));
PadMBB->addSuccessor(&*KernelEntryMBB);
}

/// Find the largest possible load size that fits with SGPR alignment
static LoadConfig getLoadParameters(const TargetRegisterInfo &TRI,
Register KernArgPreloadSGPR,
unsigned NumKernArgPreloadSGPRs) {
static constexpr LoadConfig Configs[] = {
{8, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM},
{4, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM},
{2, &AMDGPU::SReg_64RegClass, AMDGPU::S_LOAD_DWORDX2_IMM}};

for (const auto &Config : Configs) {
if (NumKernArgPreloadSGPRs >= Config.Size) {
Register LoadReg = TRI.getMatchingSuperReg(KernArgPreloadSGPR,
AMDGPU::sub0, Config.RegClass);
if (LoadReg) {
LoadConfig C(Config);
C.LoadReg = LoadReg;
return C;
}
}
}

// Fallback to a single register
return LoadConfig{1, &AMDGPU::SReg_32RegClass, AMDGPU::S_LOAD_DWORD_IMM,
KernArgPreloadSGPR};
}

void AMDGPUPreloadKernArgProlog::addBackCompatLoads(
MachineBasicBlock *BackCompatMBB, Register KernArgSegmentPtr,
unsigned NumKernArgPreloadSGPRs) {
Register KernArgPreloadSGPR = MFI.getArgInfo().FirstKernArgPreloadReg;
unsigned Offset = 0;
// Fill all user SGPRs used for kernarg preloading with sequential data from
// the kernarg segment
while (NumKernArgPreloadSGPRs > 0) {
LoadConfig Config =
getLoadParameters(TRI, KernArgPreloadSGPR, NumKernArgPreloadSGPRs);

BuildMI(BackCompatMBB, DebugLoc(), TII.get(Config.Opcode), Config.LoadReg)
.addReg(KernArgSegmentPtr)
.addImm(Offset)
.addImm(0);

Offset += 4 * Config.Size;
KernArgPreloadSGPR = KernArgPreloadSGPR.asMCReg() + Config.Size;
NumKernArgPreloadSGPRs -= Config.Size;
}
}

PreservedAnalyses
AMDGPUPreloadKernArgPrologPass::run(MachineFunction &MF,
MachineFunctionAnalysisManager &) {
if (!AMDGPUPreloadKernArgProlog(MF).run())
return PreservedAnalyses::all();

return PreservedAnalyses::none();
}
25 changes: 25 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUPreloadKernArgProlog.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
//===- AMDGPUPreloadKernargProlog.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LIB_TARGET_AMDGPU_PRELOAD_KERNARG_PROLOG_H
#define LLVM_LIB_TARGET_AMDGPU_PRELOAD_KERNARG_PROLOG_H

#include "llvm/CodeGen/MachinePassManager.h"

namespace llvm {

class AMDGPUPreloadKernArgPrologPass
: public PassInfoMixin<AMDGPUPreloadKernArgPrologPass> {
public:
PreservedAnalyses run(MachineFunction &MF,
MachineFunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_LIB_TARGET_AMDGPU_PRELOAD_KERNARG_PROLOG_H
2 changes: 2 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -540,6 +540,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
initializeGCNPreRALongBranchRegPass(*PR);
initializeGCNRewritePartialRegUsesPass(*PR);
initializeGCNRegPressurePrinterPass(*PR);
initializeAMDGPUPreloadKernArgPrologLegacyPass(*PR);
}

static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
Expand Down Expand Up @@ -1669,6 +1670,7 @@ void GCNPassConfig::addPreEmitPass() {
addPass(&AMDGPUInsertDelayAluID);

addPass(&BranchRelaxationPassID);
addPass(createAMDGPUPreloadKernArgPrologLegacyPass());
}

TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/AMDGPU/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUPerfHintAnalysis.cpp
AMDGPUPostLegalizerCombiner.cpp
AMDGPUPreLegalizerCombiner.cpp
AMDGPUPreloadKernArgProlog.cpp
AMDGPUPrintfRuntimeBinding.cpp
AMDGPUPromoteAlloca.cpp
AMDGPUPromoteKernelArguments.cpp
Expand Down
23 changes: 0 additions & 23 deletions llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -338,15 +338,6 @@ bool AMDGPUTargetAsmStreamer::EmitHSAMetadata(
return true;
}

bool AMDGPUTargetAsmStreamer::EmitKernargPreloadHeader(
const MCSubtargetInfo &STI, bool TrapEnabled) {
OS << (TrapEnabled ? "\ts_trap 2" : "\ts_endpgm")
<< " ; Kernarg preload header. Trap with incompatible firmware that "
"doesn't support preloading kernel arguments.\n";
OS << "\t.fill 63, 4, 0xbf800000 ; s_nop 0\n";
return true;
}

bool AMDGPUTargetAsmStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
const uint32_t Encoded_s_code_end = 0xbf9f0000;
const uint32_t Encoded_s_nop = 0xbf800000;
Expand Down Expand Up @@ -935,20 +926,6 @@ bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc,
return true;
}

bool AMDGPUTargetELFStreamer::EmitKernargPreloadHeader(
const MCSubtargetInfo &STI, bool TrapEnabled) {
const uint32_t Encoded_s_nop = 0xbf800000;
const uint32_t Encoded_s_trap = 0xbf920002;
const uint32_t Encoded_s_endpgm = 0xbf810000;
const uint32_t TrapInstr = TrapEnabled ? Encoded_s_trap : Encoded_s_endpgm;
MCStreamer &OS = getStreamer();
OS.emitInt32(TrapInstr);
for (int i = 0; i < 63; ++i) {
OS.emitInt32(Encoded_s_nop);
}
return true;
}

bool AMDGPUTargetELFStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
const uint32_t Encoded_s_code_end = 0xbf9f0000;
const uint32_t Encoded_s_nop = 0xbf800000;
Expand Down
14 changes: 0 additions & 14 deletions llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,12 +96,6 @@ class AMDGPUTargetStreamer : public MCTargetStreamer {
/// \returns True on success, false on failure.
virtual bool EmitCodeEnd(const MCSubtargetInfo &STI) { return true; }

/// \returns True on success, false on failure.
virtual bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
bool TrapEnabled) {
return true;
}

virtual void
EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName,
const AMDGPU::MCKernelDescriptor &KernelDescriptor,
Expand Down Expand Up @@ -168,10 +162,6 @@ class AMDGPUTargetAsmStreamer final : public AMDGPUTargetStreamer {
/// \returns True on success, false on failure.
bool EmitCodeEnd(const MCSubtargetInfo &STI) override;

/// \returns True on success, false on failure.
bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
bool TrapEnabled) override;

void
EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName,
const AMDGPU::MCKernelDescriptor &KernelDescriptor,
Expand Down Expand Up @@ -225,10 +215,6 @@ class AMDGPUTargetELFStreamer final : public AMDGPUTargetStreamer {
/// \returns True on success, false on failure.
bool EmitCodeEnd(const MCSubtargetInfo &STI) override;

/// \returns True on success, false on failure.
bool EmitKernargPreloadHeader(const MCSubtargetInfo &STI,
bool TrapEnabled) override;

void
EmitAmdhsaKernelDescriptor(const MCSubtargetInfo &STI, StringRef KernelName,
const AMDGPU::MCKernelDescriptor &KernelDescriptor,
Expand Down
Loading
Loading