-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[AMDGPU] New image intrinsic optimizer pass #67151
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Implement a new pass to combine multiple image_load_2dmsaa and 2darraymsaa intrinsic calls into a single image_msaa_load if: - they refer to the same vaddr except for sample_id, - they use a constant sample_id and they fall into the same group, - they have the same dmask and the number of instructions and the number of vaddr/vdata dword transfers is reduced by the combine This should be valid on all GFX11 but a hardware bug renders it unworkable on GFX11.0.* so it is only enabled for GFX11.5. Based on a patch by Rodrigo Dominguez!
@@ -739,6 +739,8 @@ | |||
; GCN-O2-NEXT: AMDGPU Remove Incompatible Functions | |||
; GCN-O2-NEXT: AMDGPU Printf lowering | |||
; GCN-O2-NEXT: Lower ctors and dtors for AMDGPU | |||
; GCN-O2-NEXT: FunctionPass Manager |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is it worth moving the pass slightly just to avoid instantiating another function pass manager here?
@llvm/pr-subscribers-backend-amdgpu ChangesImplement a new pass to combine multiple image_load_2dmsaa and
This should be valid on all GFX11 but a hardware bug renders it Based on a patch by Rodrigo Dominguez! Patch is 58.49 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/67151.diff 9 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index b7101f401154706..97a413296c55e55 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -49,6 +49,7 @@ FunctionPass *createSIPreAllocateWWMRegsPass();
FunctionPass *createSIFormMemoryClausesPass();
FunctionPass *createSIPostRABundlerPass();
+FunctionPass *createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *);
ModulePass *createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *);
FunctionPass *createAMDGPUCodeGenPreparePass();
FunctionPass *createAMDGPULateCodeGenPreparePass();
@@ -64,6 +65,15 @@ struct AMDGPUSimplifyLibCallsPass : PassInfoMixin<AMDGPUSimplifyLibCallsPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
+struct AMDGPUImageIntrinsicOptimizerPass
+ : PassInfoMixin<AMDGPUImageIntrinsicOptimizerPass> {
+ AMDGPUImageIntrinsicOptimizerPass(TargetMachine &TM) : TM(TM) {}
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+private:
+ TargetMachine &TM;
+};
+
struct AMDGPUUseNativeCallsPass : PassInfoMixin<AMDGPUUseNativeCallsPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
@@ -175,6 +185,9 @@ extern char &SIOptimizeExecMaskingID;
void initializeSIPreAllocateWWMRegsPass(PassRegistry &);
extern char &SIPreAllocateWWMRegsID;
+void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &);
+extern char &AMDGPUImageIntrinsicOptimizerID;
+
void initializeAMDGPUPerfHintAnalysisPass(PassRegistry &);
extern char &AMDGPUPerfHintAnalysisID;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index efa1cc0696d2f7c..d5356d1be3d758a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -281,6 +281,12 @@ def FeatureMADIntraFwdBug : SubtargetFeature<"mad-intra-fwd-bug",
"MAD_U64/I64 intra instruction forwarding bug"
>;
+def FeatureMSAALoadDstSelBug : SubtargetFeature<"msaa-load-dst-sel-bug",
+ "HasMSAALoadDstSelBug",
+ "true",
+ "MSAA loads not honoring dst_sel bug"
+>;
+
class SubtargetFeatureLDSBankCount <int Value> : SubtargetFeature <
"ldsbankcount"#Value,
"LDSBankCount",
@@ -1355,7 +1361,8 @@ def FeatureISAVersion11_Common : FeatureSet<
def FeatureISAVersion11_0_Common : FeatureSet<
!listconcat(FeatureISAVersion11_Common.Features,
- [FeatureVALUTransUseHazard])>;
+ [FeatureMSAALoadDstSelBug,
+ FeatureVALUTransUseHazard])>;
def FeatureISAVersion11_0_0 : FeatureSet<
!listconcat(FeatureISAVersion11_0_Common.Features,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp
new file mode 100644
index 000000000000000..c392cc4fd1ebebe
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp
@@ -0,0 +1,336 @@
+//===- AMDGPUImageIntrinsicOptimizer.cpp ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass tries to combine multiple image_load intrinsics with dim=2dmsaa
+// or dim=2darraymsaa into a single image_msaa_load intrinsic if:
+//
+// - they refer to the same vaddr except for sample_id,
+// - they use a constant sample_id and they fall into the same group,
+// - they have the same dmask and the number of intrinsics and the number of
+// vaddr/vdata dword transfers is reduced by the combine.
+//
+// Examples for the tradeoff (all are assuming 2DMsaa for vaddr):
+//
+// +----------+-----+-----+-------+---------+------------+---------+----------+
+// | popcount | a16 | d16 | #load | vaddr / | #msaa_load | vaddr / | combine? |
+// | (dmask) | | | | vdata | | vdata | |
+// +----------+-----+-----+-------+---------+------------+---------+----------+
+// | 1 | 0 | 0 | 4 | 12 / 4 | 1 | 3 / 4 | yes |
+// +----------+-----+-----+-------+---------+------------+---------+----------+
+// | 1 | 0 | 0 | 2 | 6 / 2 | 1 | 3 / 4 | yes? |
+// +----------+-----+-----+-------+---------+------------+---------+----------+
+// | 2 | 0 | 0 | 4 | 12 / 8 | 2 | 6 / 8 | yes |
+// +----------+-----+-----+-------+---------+------------+---------+----------+
+// | 2 | 0 | 0 | 2 | 6 / 4 | 2 | 6 / 8 | no |
+// +----------+-----+-----+-------+---------+------------+---------+----------+
+// | 1 | 0 | 1 | 2 | 6 / 2 | 1 | 3 / 2 | yes |
+// +----------+-----+-----+-------+---------+------------+---------+----------+
+//
+// Some cases are of questionable benefit, like the one marked with "yes?"
+// above: fewer intrinsics and fewer vaddr and fewer total transfers between SP
+// and TX, but higher vdata. We start by erring on the side of converting these
+// to MSAA_LOAD.
+//
+// This pass will combine intrinsics such as (not neccessarily consecutive):
+// call float @llvm.amdgcn.image.load.2dmsaa.f32.i32(i32 1, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+// call float @llvm.amdgcn.image.load.2dmsaa.f32.i32(i32 1, i32 %s, i32 %t, i32 1, <8 x i32> %rsrc, i32 0, i32 0)
+// call float @llvm.amdgcn.image.load.2dmsaa.f32.i32(i32 1, i32 %s, i32 %t, i32 2, <8 x i32> %rsrc, i32 0, i32 0)
+// call float @llvm.amdgcn.image.load.2dmsaa.f32.i32(i32 1, i32 %s, i32 %t, i32 3, <8 x i32> %rsrc, i32 0, i32 0)
+// ==>
+// call <4 x float> @llvm.amdgcn.image.msaa.load.2dmsaa.v4f32.i32(i32 1, i32 %s, i32 %t, i32 0, <8 x i32> %rsrc, i32 0, i32 0)
+//
+// Future improvements:
+//
+// - We may occasionally not want to do the combine if it increases the maximum
+// register pressure.
+//
+// - Ensure clausing when multiple MSAA_LOAD are generated.
+//
+// Note: Even though the image_msaa_load intrinsic already exists on gfx10, this
+// combine only applies to gfx11, due to a limitation in gfx10: the gfx10
+// IMAGE_MSAA_LOAD only works correctly with single-channel texture formats, and
+// we don't know the format at compile time.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "AMDGPUInstrInfo.h"
+#include "AMDGPUTargetMachine.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "amdgpu-image-intrinsic-opt"
+
+namespace {
+class AMDGPUImageIntrinsicOptimizer : public FunctionPass {
+ const TargetMachine *TM;
+
+public:
+ static char ID;
+
+ AMDGPUImageIntrinsicOptimizer(const TargetMachine *TM = nullptr)
+ : FunctionPass(ID), TM(TM) {}
+
+ bool runOnFunction(Function &F) override;
+
+}; // End of class AMDGPUImageIntrinsicOptimizer
+} // End anonymous namespace
+
+INITIALIZE_PASS(AMDGPUImageIntrinsicOptimizer, DEBUG_TYPE,
+ "AMDGPU Image Intrinsic Optimizer", false, false)
+
+char AMDGPUImageIntrinsicOptimizer::ID = 0;
+
+void addInstToMergeableList(
+ IntrinsicInst *II, std::list<std::list<IntrinsicInst *>> &MergeableInsts,
+ const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr) {
+ for (std::list<IntrinsicInst *> &IIList : MergeableInsts) {
+ // Check Dim.
+ if (IIList.front()->getIntrinsicID() != II->getIntrinsicID())
+ continue;
+
+ // Check D16.
+ if (IIList.front()->getType() != II->getType())
+ continue;
+
+ // Check DMask.
+ Value *DMaskList = IIList.front()->getArgOperand(ImageDimIntr->DMaskIndex);
+ Value *DMask = II->getArgOperand(ImageDimIntr->DMaskIndex);
+ if (DMaskList != DMask)
+ continue;
+
+ // Check VAddr (except FragId).
+ int I = ImageDimIntr->VAddrStart;
+ for (; I < ImageDimIntr->VAddrEnd - 1; ++I) {
+ if (IIList.front()->getArgOperand(I) != II->getArgOperand(I))
+ break;
+ }
+
+ if (I != ImageDimIntr->VAddrEnd - 1)
+ continue;
+
+ // Check FragId group.
+ const uint8_t FragIdIndex = ImageDimIntr->VAddrEnd - 1;
+ Value *FragIdList = IIList.front()->getArgOperand(FragIdIndex);
+ auto IIListFragId = cast<ConstantInt>(FragIdList);
+ auto IIFragId = cast<ConstantInt>(II->getArgOperand(FragIdIndex));
+ if (IIListFragId->getValue().udiv(4) != IIFragId->getValue().udiv(4))
+ continue;
+
+ // Add to the list.
+ IIList.emplace_back(II);
+ return;
+ }
+
+ // Similar instruction not found, so add a new list.
+ MergeableInsts.emplace_back(1, II);
+ LLVM_DEBUG(dbgs() << "New: " << *II << "\n");
+}
+
+// Collect list of all instructions we know how to merge in a subset of the
+// block. It returns an iterator to the instruction after the last one analyzed.
+BasicBlock::iterator
+collectMergeableInsts(BasicBlock::iterator I, BasicBlock::iterator E,
+ std::list<std::list<IntrinsicInst *>> &MergeableInsts) {
+ for (; I != E; ++I) {
+ // Don't combine if there is a store in the middle or if there is a memory
+ // barrier.
+ if (I->mayHaveSideEffects()) {
+ ++I;
+ break;
+ }
+
+ // Ignore non-intrinsics.
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ Intrinsic::ID IntrinID = II->getIntrinsicID();
+
+ // Ignore other intrinsics.
+ if (IntrinID != Intrinsic::amdgcn_image_load_2dmsaa &&
+ IntrinID != Intrinsic::amdgcn_image_load_2darraymsaa)
+ continue;
+
+ // Check for constant FragId.
+ const auto *ImageDimIntr = AMDGPU::getImageDimIntrinsicInfo(IntrinID);
+ const uint8_t FragIdIndex = ImageDimIntr->VAddrEnd - 1;
+ if (!isa<ConstantInt>(II->getArgOperand(FragIdIndex)))
+ continue;
+
+ LLVM_DEBUG(dbgs() << "Merge: " << *II << "\n");
+ addInstToMergeableList(II, MergeableInsts, ImageDimIntr);
+ }
+ }
+
+ return I;
+}
+
+bool optimizeSection(std::list<std::list<IntrinsicInst *>> &MergeableInsts) {
+ bool Modified = false;
+
+ SmallVector<Instruction *, 4> InstrsToErase;
+ for (auto IIList : MergeableInsts) {
+ if (IIList.size() <= 1)
+ continue;
+
+ // Assume the arguments are unchanged and later override them, if needed.
+ SmallVector<Value *, 16> Args(IIList.front()->args());
+
+ // Validate function argument and return types, extracting overloaded
+ // types along the way.
+ SmallVector<Type *, 6> OverloadTys;
+ Function *F = IIList.front()->getCalledFunction();
+ if (!Intrinsic::getIntrinsicSignature(F, OverloadTys))
+ continue;
+
+ Intrinsic::ID IntrinID = IIList.front()->getIntrinsicID();
+ const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
+ AMDGPU::getImageDimIntrinsicInfo(IntrinID);
+
+ Type *EltTy = IIList.front()->getType()->getScalarType();
+ Type *NewTy = FixedVectorType::get(EltTy, 4);
+ OverloadTys[0] = NewTy;
+ bool isD16 = EltTy->isHalfTy();
+
+ ConstantInt *DMask = cast<ConstantInt>(
+ IIList.front()->getArgOperand(ImageDimIntr->DMaskIndex));
+ unsigned DMaskVal = DMask->getZExtValue() & 0xf;
+ unsigned NumElts = popcount(DMaskVal);
+
+ // Number of instructions and the number of vaddr/vdata dword transfers
+ // should be reduced.
+ unsigned NumLoads = IIList.size();
+ unsigned NumMsaas = NumElts;
+ unsigned NumVAddrLoads = 3 * NumLoads;
+ unsigned NumVDataLoads = divideCeil(NumElts, isD16 ? 2 : 1) * NumLoads;
+ unsigned NumVAddrMsaas = 3 * NumMsaas;
+ unsigned NumVDataMsaas = divideCeil(4, isD16 ? 2 : 1) * NumMsaas;
+
+ if (NumLoads < NumMsaas ||
+ (NumVAddrLoads + NumVDataLoads < NumVAddrMsaas + NumVDataMsaas))
+ continue;
+
+ const uint8_t FragIdIndex = ImageDimIntr->VAddrEnd - 1;
+ auto FragId = cast<ConstantInt>(IIList.front()->getArgOperand(FragIdIndex));
+ const APInt &NewFragIdVal = FragId->getValue().udiv(4) * 4;
+
+ // Create the new instructions.
+ IRBuilder<> B(IIList.front());
+
+ // Create the new image_msaa_load intrinsic.
+ SmallVector<Instruction *, 4> NewCalls;
+ while (DMaskVal != 0) {
+ unsigned NewMaskVal = 1 << countr_zero(DMaskVal);
+
+ Intrinsic::ID NewIntrinID;
+ if (IntrinID == Intrinsic::amdgcn_image_load_2dmsaa)
+ NewIntrinID = Intrinsic::amdgcn_image_msaa_load_2dmsaa;
+ else
+ NewIntrinID = Intrinsic::amdgcn_image_msaa_load_2darraymsaa;
+
+ Function *NewIntrin = Intrinsic::getDeclaration(
+ IIList.front()->getModule(), NewIntrinID, OverloadTys);
+ Args[ImageDimIntr->DMaskIndex] =
+ ConstantInt::get(DMask->getType(), NewMaskVal);
+ Args[FragIdIndex] = ConstantInt::get(FragId->getType(), NewFragIdVal);
+ CallInst *NewCall = B.CreateCall(NewIntrin, Args);
+ LLVM_DEBUG(dbgs() << "Optimize: " << *NewCall << "\n");
+
+ NewCalls.push_back(NewCall);
+ DMaskVal -= NewMaskVal;
+ }
+
+ // Create the new extractelement instructions.
+ for (auto &II : IIList) {
+ Value *VecOp = UndefValue::get(II->getType());
+ auto Idx = cast<ConstantInt>(II->getArgOperand(FragIdIndex));
+ if (NumElts == 1) {
+ VecOp = B.CreateExtractElement(NewCalls[0], Idx->getValue().urem(4));
+ LLVM_DEBUG(dbgs() << "Add: " << *VecOp << "\n");
+ } else {
+ for (unsigned I = 0; I < NumElts; ++I) {
+ VecOp = B.CreateInsertElement(
+ VecOp, B.CreateExtractElement(
+ NewCalls[I], Idx->getValue().urem(4)), I);
+ LLVM_DEBUG(dbgs() << "Add: " << *VecOp << "\n");
+ }
+ }
+
+ // Replace the old instruction.
+ II->replaceAllUsesWith(VecOp);
+ InstrsToErase.push_back(II);
+ }
+
+ Modified = true;
+ }
+
+ for (auto I : InstrsToErase) {
+ I->eraseFromParent();
+ }
+
+ MergeableInsts.clear();
+
+ return Modified;
+}
+
+static bool imageIntrinsicOptimizerImpl(Function &F, const TargetMachine *TM) {
+ if (!TM)
+ return false;
+
+ // This optimization only applies to GFX11 and beyond.
+ const GCNSubtarget &ST = TM->getSubtarget<GCNSubtarget>(F);
+ if (!AMDGPU::isGFX11Plus(ST) || ST.hasMSAALoadDstSelBug())
+ return false;
+
+ Module *M = F.getParent();
+
+ // Early test to determine if the intrinsics are used.
+ if (std::none_of(M->begin(), M->end(), [](Function &F) {
+ return !F.users().empty() &&
+ (F.getIntrinsicID() == Intrinsic::amdgcn_image_load_2dmsaa ||
+ F.getIntrinsicID() == Intrinsic::amdgcn_image_load_2darraymsaa);
+ }))
+ return false;
+
+ bool Modified = false;
+ for (auto &BB : F) {
+ BasicBlock::iterator SectionEnd;
+ for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E;
+ I = SectionEnd) {
+ std::list<std::list<IntrinsicInst *>> MergeableInsts;
+
+ SectionEnd = collectMergeableInsts(I, E, MergeableInsts);
+ Modified |= optimizeSection(MergeableInsts);
+ }
+ }
+
+ return Modified;
+}
+
+bool AMDGPUImageIntrinsicOptimizer::runOnFunction(Function &F) {
+ if (skipFunction(F))
+ return false;
+
+ return imageIntrinsicOptimizerImpl(F, TM);
+}
+
+FunctionPass *
+llvm::createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *TM) {
+ return new AMDGPUImageIntrinsicOptimizer(TM);
+}
+
+PreservedAnalyses
+AMDGPUImageIntrinsicOptimizerPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+
+ bool Changed = imageIntrinsicOptimizerImpl(F, &TM);
+ return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 481fbaf1543a4ea..bcbc03eb2559c4f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -333,6 +333,11 @@ static cl::opt<bool> EnablePromoteKernelArguments(
cl::desc("Enable promotion of flat kernel pointer arguments to global"),
cl::Hidden, cl::init(true));
+static cl::opt<bool> EnableImageIntrinsicOptimizer(
+ "amdgpu-enable-image-intrinsic-optimizer",
+ cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
+ cl::Hidden);
+
static cl::opt<bool> EnableMaxIlpSchedStrategy(
"amdgpu-enable-max-ilp-scheduling-strategy",
cl::desc("Enable scheduling strategy to maximize ILP for a single wave."),
@@ -410,6 +415,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
initializeAMDGPUAAWrapperPassPass(*PR);
initializeAMDGPUExternalAAWrapperPass(*PR);
+ initializeAMDGPUImageIntrinsicOptimizerPass(*PR);
initializeAMDGPUPrintfRuntimeBindingPass(*PR);
initializeAMDGPUResourceUsageAnalysisPass(*PR);
initializeGCNNSAReassignPass(*PR);
@@ -626,6 +632,10 @@ void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) {
PM.addPass(AMDGPUSimplifyLibCallsPass());
return true;
}
+ if (PassName == "amdgpu-image-intrinsic-opt") {
+ PM.addPass(AMDGPUImageIntrinsicOptimizerPass(*this));
+ return true;
+ }
if (PassName == "amdgpu-usenative") {
PM.addPass(AMDGPUUseNativeCallsPass());
return true;
@@ -980,6 +990,9 @@ void AMDGPUPassConfig::addIRPasses() {
if (LowerCtorDtor)
addPass(createAMDGPUCtorDtorLoweringLegacyPass());
+ if (isPassEnabled(EnableImageIntrinsicOptimizer))
+ addPass(createAMDGPUImageIntrinsicOptimizerPass(&TM));
+
// Function calls are not supported, so make sure we inline everything.
addPass(createAMDGPUAlwaysInlinePass());
addPass(createAlwaysInlinerLegacyPass());
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index 0922e8d99deb3aa..8124fdd5ddfefec 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -66,6 +66,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPULateCodeGenPrepare.cpp
AMDGPULegalizerInfo.cpp
AMDGPULibCalls.cpp
+ AMDGPUImageIntrinsicOptimizer.cpp
AMDGPULibFunc.cpp
AMDGPULowerKernelArguments.cpp
AMDGPULowerKernelAttributes.cpp
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 970ce48de9f47c2..744eb50aaebd36a 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -206,6 +206,7 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
bool HasFlatSegmentOffsetBug = false;
bool HasImageStoreD16Bug = false;
bool HasImageGather4D16Bug = false;
+ bool HasMSAALoadDstSelBug = false;
bool HasGFX11FullVGPRs = false;
bool HasMADIntraFwdBug = false;
bool HasVOPDInsts = false;
@@ -954,6 +955,8 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
bool hasMADIntraFwdBug() const { return HasMADIntraFwdBug; }
+ bool hasMSAALoadDstSelBug() const { return HasMSAALoadDstSelBug; }
+
bool hasNSAEncoding() const { return HasNSAEncoding; }
bool hasPartialNSAEncoding() const { return HasPartialNSAEncoding; }
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 84f67b3faac3c07..b939c8d2e339de4 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -739,6 +739,8 @@
; GCN-O2-NEXT: AMDGPU Remove Incompatible Functions
; GCN-O2-NEXT: AMDGPU Printf lowering
; GCN-O2-NEXT: Lower ctors and dtors for AMDGPU
+; GCN-O2-NEXT: FunctionPass Manager
+; GCN-O2-NEXT: AMDGPU Image Intrinsic Optimizer
; GCN-O2-NEXT: AMDGPU Inline All Functions
; GCN-O2-NEXT: Inliner for always_inline functions
; GCN-O2-NEXT: FunctionPass Manager
@@ -1043,6 +1045,8 @@
; GCN-O3-NEXT: AMDGPU Remove Incompatible Functions
; GCN-O3-NEXT: AMDGPU Printf lowering
; GCN-O3-NEXT: Lower ctors and dtors for AMDGPU
+; GCN-O3-NEXT: FunctionPass Manager
+; GCN-O3-NEXT: AMDGPU Image Intrinsic Optimizer
; GCN-O3-NEXT: AMDGPU Inline All Functions
; GCN-O3-NEXT: Inliner for always_inline functions
; GCN-O3-NEXT: FunctionPass Manager
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.2dmsaa.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.load.2dmsaa.ll
new file mode 100644
index 000000000000000..45afac52a6a5ceb
--- /dev/n...
[truncated]
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM! Thanks!
Implement a new pass to combine multiple image_load_2dmsaa and
2darraymsaa intrinsic calls into a single image_msaa_load if:
number of vaddr/vdata dword transfers is reduced by the combine
This should be valid on all GFX11 but a hardware bug renders it
unworkable on GFX11.0.* so it is only enabled for GFX11.5.
Based on a patch by Rodrigo Dominguez!