Skip to content

Commit 3b2b7ec

Browse files
authored
[AMDGPU] Handle invariant marks in AMDGPUPromoteAllocaPass (#124607)
Fixes SWDEV-509327.
1 parent 5a81a55 commit 3b2b7ec

File tree

2 files changed

+101
-4
lines changed

2 files changed

+101
-4
lines changed

llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp

Lines changed: 26 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1556,12 +1556,34 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
15561556
case Intrinsic::invariant_start:
15571557
case Intrinsic::invariant_end:
15581558
case Intrinsic::launder_invariant_group:
1559-
case Intrinsic::strip_invariant_group:
1559+
case Intrinsic::strip_invariant_group: {
1560+
SmallVector<Type *> ArgTy;
1561+
SmallVector<Value *> Args;
1562+
if (Intr->getIntrinsicID() == Intrinsic::invariant_start) {
1563+
Value *Size = Intr->getArgOperand(0);
1564+
ArgTy.emplace_back(Offset->getType());
1565+
Args.emplace_back(Size);
1566+
Args.emplace_back(Offset);
1567+
} else if (Intr->getIntrinsicID() == Intrinsic::invariant_end) {
1568+
Value *InvariantPtr = Intr->getArgOperand(0);
1569+
Value *Size = Intr->getArgOperand(1);
1570+
ArgTy.emplace_back(Offset->getType());
1571+
Args.emplace_back(InvariantPtr);
1572+
Args.emplace_back(Size);
1573+
Args.emplace_back(Offset);
1574+
} else {
1575+
ArgTy.emplace_back(Offset->getType());
1576+
Args.emplace_back(Offset);
1577+
}
1578+
Function *F = Intrinsic::getOrInsertDeclaration(
1579+
Intr->getModule(), Intr->getIntrinsicID(), ArgTy);
1580+
CallInst *NewIntr =
1581+
CallInst::Create(F, Args, Intr->getName(), Intr->getIterator());
1582+
Intr->mutateType(NewIntr->getType());
1583+
Intr->replaceAllUsesWith(NewIntr);
15601584
Intr->eraseFromParent();
1561-
// FIXME: I think the invariant marker should still theoretically apply,
1562-
// but the intrinsics need to be changed to accept pointers with any
1563-
// address space.
15641585
continue;
1586+
}
15651587
case Intrinsic::objectsize: {
15661588
Value *Src = Intr->getOperand(0);
15671589

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2+
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-promote-alloca %s -o - | FileCheck %s
3+
4+
declare ptr @llvm.invariant.start.p5(i64, ptr addrspace(5) nocapture)
5+
declare void @llvm.invariant.end.p5(ptr, i64, ptr addrspace(5) nocapture)
6+
declare ptr addrspace(5) @llvm.launder.invariant.group.p5(ptr addrspace(5))
7+
declare ptr addrspace(5) @llvm.strip.invariant.group.p5(ptr addrspace(5))
8+
9+
define amdgpu_kernel void @use_invariant_start_and_end() {
10+
; CHECK-LABEL: define amdgpu_kernel void @use_invariant_start_and_end() {
11+
; CHECK-NEXT: [[BB:.*:]]
12+
; CHECK-NEXT: [[TMP0:%.*]] = call noalias nonnull dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
13+
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr addrspace(4) [[TMP0]], i64 1
14+
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4, !invariant.load [[META0:![0-9]+]]
15+
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(4) [[TMP0]], i64 2
16+
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(4) [[TMP3]], align 4, !range [[RNG1:![0-9]+]], !invariant.load [[META0]]
17+
; CHECK-NEXT: [[TMP5:%.*]] = lshr i32 [[TMP2]], 16
18+
; CHECK-NEXT: [[TMP6:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x()
19+
; CHECK-NEXT: [[TMP7:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.y()
20+
; CHECK-NEXT: [[TMP8:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.z()
21+
; CHECK-NEXT: [[TMP9:%.*]] = mul nuw nsw i32 [[TMP5]], [[TMP4]]
22+
; CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[TMP9]], [[TMP6]]
23+
; CHECK-NEXT: [[TMP11:%.*]] = mul nuw nsw i32 [[TMP7]], [[TMP4]]
24+
; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[TMP10]], [[TMP11]]
25+
; CHECK-NEXT: [[TMP13:%.*]] = add i32 [[TMP12]], [[TMP8]]
26+
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1024 x i32], ptr addrspace(3) @use_invariant_start_and_end.alloca, i32 0, i32 [[TMP13]]
27+
; CHECK-NEXT: [[INVARIENT1:%.*]] = call ptr @llvm.invariant.start.p3(i64 0, ptr addrspace(3) [[TMP14]])
28+
; CHECK-NEXT: store <2 x i1> zeroinitializer, ptr [[INVARIENT1]], align 1
29+
; CHECK-NEXT: call void @llvm.invariant.end.p3(ptr [[INVARIENT1]], i64 0, ptr addrspace(3) [[TMP14]])
30+
; CHECK-NEXT: ret void
31+
;
32+
bb:
33+
%alloca = alloca i32, align 4, addrspace(5)
34+
%invarient = call ptr @llvm.invariant.start.p5(i64 0, ptr addrspace(5) %alloca)
35+
store <2 x i1> zeroinitializer, ptr %invarient, align 1
36+
call void @llvm.invariant.end.p5(ptr %invarient, i64 0, ptr addrspace(5) %alloca)
37+
ret void
38+
}
39+
40+
define amdgpu_kernel void @use_invariant_group_and_strip() {
41+
; CHECK-LABEL: define amdgpu_kernel void @use_invariant_group_and_strip() {
42+
; CHECK-NEXT: [[BB:.*:]]
43+
; CHECK-NEXT: [[TMP0:%.*]] = call noalias nonnull dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
44+
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr addrspace(4) [[TMP0]], i64 1
45+
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4, !invariant.load [[META0]]
46+
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(4) [[TMP0]], i64 2
47+
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(4) [[TMP3]], align 4, !range [[RNG1]], !invariant.load [[META0]]
48+
; CHECK-NEXT: [[TMP5:%.*]] = lshr i32 [[TMP2]], 16
49+
; CHECK-NEXT: [[TMP6:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x()
50+
; CHECK-NEXT: [[TMP7:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.y()
51+
; CHECK-NEXT: [[TMP8:%.*]] = call range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.z()
52+
; CHECK-NEXT: [[TMP9:%.*]] = mul nuw nsw i32 [[TMP5]], [[TMP4]]
53+
; CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[TMP9]], [[TMP6]]
54+
; CHECK-NEXT: [[TMP11:%.*]] = mul nuw nsw i32 [[TMP7]], [[TMP4]]
55+
; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[TMP10]], [[TMP11]]
56+
; CHECK-NEXT: [[TMP13:%.*]] = add i32 [[TMP12]], [[TMP8]]
57+
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1024 x i32], ptr addrspace(3) @use_invariant_group_and_strip.alloca, i32 0, i32 [[TMP13]]
58+
; CHECK-NEXT: [[INVARIENT2:%.*]] = call ptr addrspace(3) @llvm.launder.invariant.group.p3(ptr addrspace(3) [[TMP14]])
59+
; CHECK-NEXT: store <2 x i1> zeroinitializer, ptr addrspace(3) [[INVARIENT2]], align 1
60+
; CHECK-NEXT: [[STRIP1:%.*]] = call ptr addrspace(3) @llvm.strip.invariant.group.p3(ptr addrspace(3) [[TMP14]])
61+
; CHECK-NEXT: store <2 x i1> zeroinitializer, ptr addrspace(3) [[STRIP1]], align 1
62+
; CHECK-NEXT: ret void
63+
;
64+
bb:
65+
%alloca = alloca i32, align 4, addrspace(5)
66+
%invarient = call ptr addrspace(5) @llvm.launder.invariant.group.p5(ptr addrspace(5) %alloca)
67+
store <2 x i1> zeroinitializer, ptr addrspace(5) %invarient, align 1
68+
%strip = call ptr addrspace(5) @llvm.strip.invariant.group.p5(ptr addrspace(5) %alloca)
69+
store <2 x i1> zeroinitializer, ptr addrspace(5) %strip, align 1
70+
ret void
71+
}
72+
;.
73+
; CHECK: [[META0]] = !{}
74+
; CHECK: [[RNG1]] = !{i32 0, i32 1025}
75+
;.

0 commit comments

Comments
 (0)