-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[AMDGPU] Handle invariant marks in AMDGPUPromoteAllocaPass
#124607
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-amdgpu Author: Shilei Tian (shiltian) ChangesFull diff: https://github.com/llvm/llvm-project/pull/124607.diff 2 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 907f82ed7fc528..ee812cf4a81a02 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -1556,12 +1556,34 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::launder_invariant_group:
- case Intrinsic::strip_invariant_group:
+ case Intrinsic::strip_invariant_group: {
+ SmallVector<Type *> ArgTy;
+ SmallVector<Value *> Args;
+ if (Intr->getIntrinsicID() == Intrinsic::invariant_start) {
+ Value *Size = Intr->getArgOperand(0);
+ ArgTy.emplace_back(Offset->getType());
+ Args.emplace_back(Size);
+ Args.emplace_back(Offset);
+ } else if (Intr->getIntrinsicID() == Intrinsic::invariant_end) {
+ Value *InvarientPtr = Intr->getArgOperand(0);
+ Value *Size = Intr->getArgOperand(1);
+ ArgTy.emplace_back(Offset->getType());
+ Args.emplace_back(InvarientPtr);
+ Args.emplace_back(Size);
+ Args.emplace_back(Offset);
+ } else {
+ ArgTy.emplace_back(Offset->getType());
+ Args.emplace_back(Offset);
+ }
+ Function *F = Intrinsic::getOrInsertDeclaration(
+ Intr->getModule(), Intr->getIntrinsicID(), ArgTy);
+ CallInst *NewIntr =
+ CallInst::Create(F, Args, Intr->getName(), Intr->getIterator());
+ Intr->mutateType(NewIntr->getType());
+ Intr->replaceAllUsesWith(NewIntr);
Intr->eraseFromParent();
- // FIXME: I think the invariant marker should still theoretically apply,
- // but the intrinsics need to be changed to accept pointers with any
- // address space.
continue;
+ }
case Intrinsic::objectsize: {
Value *Src = Intr->getOperand(0);
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-marks.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-marks.ll
new file mode 100644
index 00000000000000..fca4be5e76daea
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-marks.ll
@@ -0,0 +1,75 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-promote-alloca %s -o - | FileCheck %s
+
+declare ptr @llvm.invariant.start.p5(i64, ptr addrspace(5) nocapture)
+declare void @llvm.invariant.end.p5(ptr, i64, ptr addrspace(5) nocapture)
+declare ptr addrspace(5) @llvm.launder.invariant.group.p5(ptr addrspace(5))
+declare ptr addrspace(5) @llvm.strip.invariant.group.p5(ptr addrspace(5))
+
+define amdgpu_kernel void @use_invariant_start_and_end() {
+; CHECK-LABEL: define amdgpu_kernel void @use_invariant_start_and_end() {
+; CHECK-NEXT: [[BB:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call noalias nonnull dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr addrspace(4) [[TMP0]], i64 1
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4, !invariant.load [[META0:![0-9]+]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(4) [[TMP0]], i64 2
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(4) [[TMP3]], align 4, !range [[RNG1:![0-9]+]], !invariant.load [[META0]]
+; CHECK-NEXT: [[TMP5:%.*]] = lshr i32 [[TMP2]], 16
+; CHECK-NEXT: [[TMP6:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: [[TMP7:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT: [[TMP8:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT: [[TMP9:%.*]] = mul nuw nsw i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[TMP9]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = mul nuw nsw i32 [[TMP7]], [[TMP4]]
+; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[TMP10]], [[TMP11]]
+; CHECK-NEXT: [[TMP13:%.*]] = add i32 [[TMP12]], [[TMP8]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1024 x i32], ptr addrspace(3) @use_invariant_start_and_end.alloca, i32 0, i32 [[TMP13]]
+; CHECK-NEXT: [[INVARIENT1:%.*]] = call ptr @llvm.invariant.start.p3(i64 0, ptr addrspace(3) [[TMP14]])
+; CHECK-NEXT: store <2 x i1> zeroinitializer, ptr [[INVARIENT1]], align 1
+; CHECK-NEXT: call void @llvm.invariant.end.p3(ptr [[INVARIENT1]], i64 0, ptr addrspace(3) [[TMP14]])
+; CHECK-NEXT: ret void
+;
+bb:
+ %alloca = alloca i32, align 4, addrspace(5)
+ %invarient = call ptr @llvm.invariant.start.p5(i64 0, ptr addrspace(5) %alloca)
+ store <2 x i1> zeroinitializer, ptr %invarient, align 1
+ call void @llvm.invariant.end.p5(ptr %invarient, i64 0, ptr addrspace(5) %alloca)
+ ret void
+}
+
+define amdgpu_kernel void @use_invariant_group_and_strip() {
+; CHECK-LABEL: define amdgpu_kernel void @use_invariant_group_and_strip() {
+; CHECK-NEXT: [[BB:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call noalias nonnull dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr addrspace(4) [[TMP0]], i64 1
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4, !invariant.load [[META0]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(4) [[TMP0]], i64 2
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(4) [[TMP3]], align 4, !range [[RNG1]], !invariant.load [[META0]]
+; CHECK-NEXT: [[TMP5:%.*]] = lshr i32 [[TMP2]], 16
+; CHECK-NEXT: [[TMP6:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: [[TMP7:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT: [[TMP8:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT: [[TMP9:%.*]] = mul nuw nsw i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[TMP9]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = mul nuw nsw i32 [[TMP7]], [[TMP4]]
+; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[TMP10]], [[TMP11]]
+; CHECK-NEXT: [[TMP13:%.*]] = add i32 [[TMP12]], [[TMP8]]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1024 x i32], ptr addrspace(3) @use_invariant_group_and_strip.alloca, i32 0, i32 [[TMP13]]
+; CHECK-NEXT: [[INVARIENT2:%.*]] = call ptr addrspace(3) @llvm.launder.invariant.group.p3(ptr addrspace(3) [[TMP14]])
+; CHECK-NEXT: store <2 x i1> zeroinitializer, ptr addrspace(3) [[INVARIENT2]], align 1
+; CHECK-NEXT: [[STRIP1:%.*]] = call ptr addrspace(3) @llvm.strip.invariant.group.p3(ptr addrspace(3) [[TMP14]])
+; CHECK-NEXT: store <2 x i1> zeroinitializer, ptr addrspace(3) [[STRIP1]], align 1
+; CHECK-NEXT: ret void
+;
+bb:
+ %alloca = alloca i32, align 4, addrspace(5)
+ %invarient = call ptr addrspace(5) @llvm.launder.invariant.group.p5(ptr addrspace(5) %alloca)
+ store <2 x i1> zeroinitializer, ptr addrspace(5) %invarient, align 1
+ %strip = call ptr addrspace(5) @llvm.strip.invariant.group.p5(ptr addrspace(5) %alloca)
+ store <2 x i1> zeroinitializer, ptr addrspace(5) %strip, align 1
+ ret void
+}
+;.
+; CHECK: [[META0]] = !{}
+; CHECK: [[RNG1]] = !{i32 0, i32 1025}
+;.
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
5f17deb
to
5e48dd7
Compare
; | ||
bb: | ||
%alloca = alloca i32, align 4, addrspace(5) | ||
%invarient = call ptr @llvm.invariant.start.p5(i64 0, ptr addrspace(5) %alloca) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Typo invarient
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
oops, my poor English…
%alloca = alloca i32, align 4, addrspace(5) | ||
%invarient = call ptr @llvm.invariant.start.p5(i64 0, ptr addrspace(5) %alloca) | ||
store <2 x i1> zeroinitializer, ptr %invarient, align 1 | ||
call void @llvm.invariant.end.p5(ptr %invarient, i64 0, ptr addrspace(5) %alloca) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is the return type an addrspace(0) pointer? Should this really be using a token?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is the return type an addrspace(0) pointer?
That is based on the LangRef.
Should this really be using a token?
I'm not sure what you meant by using a token.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I mean the token IR type. What is this pointer value used for
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm not very sure actually, but based on llvm/test/Transforms/RewriteStatepointsForGC/drop-invalid-metadata.ll
, it seems feasible.
%invst = call ptr @llvm.invariant.start.p1(i64 1, ptr addrspace(1) %v1)
...
%foo = call i32 @escaping.invariant.start(ptr %invst)
ArgTy.emplace_back(Offset->getType()); | ||
Args.emplace_back(Size); | ||
Args.emplace_back(Offset); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Assign from initializer list?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure, will do a follow up.
Args.emplace_back(Size); | ||
Args.emplace_back(Offset); | ||
} else { | ||
ArgTy.emplace_back(Offset->getType()); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is common to all cases, you could initialize the vectors with it
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure, will do a follow up.
Fixes SWDEV-509327.