-
Notifications
You must be signed in to change notification settings - Fork 14.3k
InferAddressSpaces: Handle llvm.fake.use #109567
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
InferAddressSpaces: Handle llvm.fake.use #109567
Conversation
This stack of pull requests is managed by Graphite. Learn more about stacking. |
@llvm/pr-subscribers-llvm-transforms Author: Matt Arsenault (arsenm) ChangesFull diff: https://github.com/llvm/llvm-project/pull/109567.diff 2 Files Affected:
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 566cdc51f6e74a..60fd2a286119b3 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -414,6 +414,10 @@ bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
II->setCalledFunction(NewDecl);
return true;
}
+ case Intrinsic::fake_use: {
+ II->replaceUsesOfWith(OldV, NewV);
+ return true;
+ }
default: {
Value *Rewrite = TTI->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
if (!Rewrite)
@@ -455,6 +459,16 @@ void InferAddressSpacesImpl::collectRewritableIntrinsicOperands(
appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(1),
PostorderStack, Visited);
break;
+ case Intrinsic::fake_use: {
+ for (Value *Op : II->operands()) {
+ if (Op->getType()->isPtrOrPtrVectorTy()) {
+ appendsFlatAddressExpressionToPostorderStack(Op, PostorderStack,
+ Visited);
+ }
+ }
+
+ break;
+ }
default:
SmallVector<int, 2> OpIndexes;
if (TTI->collectFlatAddressOperands(OpIndexes, IID)) {
diff --git a/llvm/test/Transforms/InferAddressSpaces/fake-use.ll b/llvm/test/Transforms/InferAddressSpaces/fake-use.ll
new file mode 100644
index 00000000000000..ad7f621dc40e85
--- /dev/null
+++ b/llvm/test/Transforms/InferAddressSpaces/fake-use.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=infer-address-spaces -assume-default-is-flat-addrspace %s | FileCheck %s
+
+declare void @llvm.fake.use(...)
+
+@gv = internal addrspace(3) global float 0.0, align 4
+
+define void @one_fake_use(ptr addrspace(1) %global.ptr) {
+; CHECK-LABEL: define void @one_fake_use(
+; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]]) {
+; CHECK-NEXT: call void (...) @llvm.fake.use(ptr addrspace(1) [[GLOBAL_PTR]])
+; CHECK-NEXT: ret void
+;
+ %cast0 = addrspacecast ptr addrspace(1) %global.ptr to ptr
+ call void (...) @llvm.fake.use(ptr %cast0)
+ ret void
+}
+
+define void @one_fake_use_repeat_operands(ptr addrspace(1) %global.ptr) {
+; CHECK-LABEL: define void @one_fake_use_repeat_operands(
+; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]]) {
+; CHECK-NEXT: call void (...) @llvm.fake.use(ptr addrspace(1) [[GLOBAL_PTR]], ptr addrspace(1) [[GLOBAL_PTR]])
+; CHECK-NEXT: ret void
+;
+ %cast0 = addrspacecast ptr addrspace(1) %global.ptr to ptr
+ call void (...) @llvm.fake.use(ptr %cast0, ptr %cast0)
+ ret void
+}
+
+define void @one_fake_use_refers_original_ptr(ptr addrspace(1) %global.ptr) {
+; CHECK-LABEL: define void @one_fake_use_refers_original_ptr(
+; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]]) {
+; CHECK-NEXT: call void (...) @llvm.fake.use(ptr addrspace(1) [[GLOBAL_PTR]], ptr addrspace(1) [[GLOBAL_PTR]])
+; CHECK-NEXT: ret void
+;
+ %cast0 = addrspacecast ptr addrspace(1) %global.ptr to ptr
+ call void (...) @llvm.fake.use(ptr %cast0, ptr addrspace(1) %global.ptr)
+ ret void
+}
+
+define void @multiple_inferrable_fake_use(ptr addrspace(1) %global.ptr0, ptr addrspace(1) %global.ptr1) {
+; CHECK-LABEL: define void @multiple_inferrable_fake_use(
+; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR0:%.*]], ptr addrspace(1) [[GLOBAL_PTR1:%.*]]) {
+; CHECK-NEXT: call void (...) @llvm.fake.use(ptr addrspace(1) [[GLOBAL_PTR0]], ptr addrspace(1) [[GLOBAL_PTR1]])
+; CHECK-NEXT: ret void
+;
+ %cast0 = addrspacecast ptr addrspace(1) %global.ptr0 to ptr
+ %cast1 = addrspacecast ptr addrspace(1) %global.ptr1 to ptr
+ call void (...) @llvm.fake.use(ptr %cast0, ptr %cast1)
+ ret void
+}
+
+define void @multiple_fake_use_one_inferrable(ptr %flat.ptr0, ptr addrspace(1) %global.ptr1) {
+; CHECK-LABEL: define void @multiple_fake_use_one_inferrable(
+; CHECK-SAME: ptr [[FLAT_PTR0:%.*]], ptr addrspace(1) [[GLOBAL_PTR1:%.*]]) {
+; CHECK-NEXT: call void (...) @llvm.fake.use(ptr [[FLAT_PTR0]], ptr addrspace(1) [[GLOBAL_PTR1]])
+; CHECK-NEXT: call void (...) @llvm.fake.use(ptr addrspace(1) [[GLOBAL_PTR1]], ptr [[FLAT_PTR0]])
+; CHECK-NEXT: ret void
+;
+ %cast1 = addrspacecast ptr addrspace(1) %global.ptr1 to ptr
+ call void (...) @llvm.fake.use(ptr %flat.ptr0, ptr %cast1)
+ call void (...) @llvm.fake.use(ptr %cast1, ptr %flat.ptr0)
+ ret void
+}
+
+define void @vector_of_pointers(<2 x ptr addrspace(1)> %global.ptr) {
+; CHECK-LABEL: define void @vector_of_pointers(
+; CHECK-SAME: <2 x ptr addrspace(1)> [[GLOBAL_PTR:%.*]]) {
+; CHECK-NEXT: call void (...) @llvm.fake.use(<2 x ptr addrspace(1)> [[GLOBAL_PTR]])
+; CHECK-NEXT: ret void
+;
+ %cast0 = addrspacecast <2 x ptr addrspace(1)> %global.ptr to <2 x ptr>
+ call void (...) @llvm.fake.use(<2 x ptr> %cast0)
+ ret void
+}
+
+define void @use_global_var() {
+; CHECK-LABEL: define void @use_global_var() {
+; CHECK-NEXT: call void (...) @llvm.fake.use(ptr addrspace(3) @gv)
+; CHECK-NEXT: ret void
+;
+ call void (...) @llvm.fake.use(ptr addrspacecast (ptr addrspace(3) @gv to ptr))
+ ret void
+}
+
+define void @use_gep_cast(ptr addrspace(1) %global.ptr) {
+; CHECK-LABEL: define void @use_gep_cast(
+; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]]) {
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr addrspace(1) [[GLOBAL_PTR]], i64 16
+; CHECK-NEXT: call void (...) @llvm.fake.use(ptr addrspace(1) [[GEP]], ptr addrspace(1) [[GLOBAL_PTR]])
+; CHECK-NEXT: ret void
+;
+ %cast0 = addrspacecast ptr addrspace(1) %global.ptr to ptr
+ %gep = getelementptr i8, ptr %cast0, i64 16
+ call void (...) @llvm.fake.use(ptr %gep, ptr %cast0)
+ ret void
+}
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Although llvm.fake.use
is variadic, it's actually an error to have more than one argument (not enforced in the verifier, I'll add a patch for that) - the variadic type is only used to allow it to be generic, since the expectation is that it can take any type (and we don't want a unique declaration for every type that may be used). The tests can be updated to remove the multiple_
cases, and the code can assume that there is only one operand. Aside from that, this LGTM, acknowledging that I have limited understanding of InferAddressSpaces (but gave the relevant functions a read to make sure the updates make sense).
I do not think this is a reasonable restriction to have. It adds a new constraint any IR pass has to consider. Any of the many flavors of CSE would need to deal with it, for example. If this wants to be a minimally intrusive intrinsic, it needs to have as few constraints as possible |
That might be fine - I'm not the original author so I don't know the justification for the restriction when it was written, but it's likely simply that we never produce fake use intrinsics with more than one argument, and so code that handles it uses the hardcoded assumption that there is one argument; now that this intrinsic is part of upstream LLVM, allowing it to be used as a full variadic would be reasonable unless there are other technical constraints. |
OK, I misread this. I was interpreting this as a single value cannot appear multiple times in the argument list. If the restriction is only one argument, that's a more reasonable restriction |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm inclined to go ahead with this, including tests for multiple uses. In the absence of a verifier check this is valid IR that must be handled correctly.
I also still don't see a reason why fake.use would need to have this restriction
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't think there's a problem with landing this as-is; for full context, the current state of llvm.fake.use is that there is no verifier check to enforce having exactly one operand, but the documentation states that it is an error for it to have more than one operand, and SelectionDAG currently assumes a single operand only.
To my knowledge there's not a principled reason for this to be the case, it just simplified the implementation (particularly around ISel), so I'm fine to make the change for variadic fake uses to be properly supported in a future patch, or else if it turns out to cause problems then the verifier check can be added and the tests that depend on it being variadic can be adjusted or removed.
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/123/builds/7192 Here is the relevant piece of the build log for the reference
|
No description provided.