Skip to content

Commit 02a184f

Browse files
committed
[AMDGPU][CodeGen] Update support (soffset + offset) s_buffer_load's
getBaseWithConstantOffset() is used for scalar and non-scalar buffer loads. Diffrence between s_load and load instruction is that s_load instruction extends 32-bit offset to 64-bits, so a 32-bit (address + offset) should not cause unsigned 32-bit integer wraparound, because it performs addition in 64-bits.
1 parent 3a35ca0 commit 02a184f

File tree

4 files changed

+81
-4
lines changed

4 files changed

+81
-4
lines changed

llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.cpp

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ using namespace MIPatternMatch;
1818

1919
std::pair<Register, unsigned>
2020
AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
21-
GISelKnownBits *KnownBits) {
21+
GISelKnownBits *KnownBits, bool CheckNUW) {
2222
MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
2323
if (Def->getOpcode() == TargetOpcode::G_CONSTANT) {
2424
unsigned Offset;
@@ -33,6 +33,12 @@ AMDGPU::getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
3333

3434
int64_t Offset;
3535
if (Def->getOpcode() == TargetOpcode::G_ADD) {
36+
// A 32-bit (address + offset) should not cause unsigned 32-bit integer
37+
// wraparound, because s_load instructions perform the addition in 64 bits.
38+
if (CheckNUW && !Def->getFlag(MachineInstr::NoUWrap)) {
39+
assert(MRI.getType(Reg).getScalarSizeInBits() == 32);
40+
return std::pair(Reg, 0);
41+
}
3642
// TODO: Handle G_OR used for add case
3743
if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(Offset)))
3844
return std::pair(Def->getOperand(1).getReg(), Offset);

llvm/lib/Target/AMDGPU/AMDGPUGlobalISelUtils.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@ namespace AMDGPU {
2525
/// Returns base register and constant offset.
2626
std::pair<Register, unsigned>
2727
getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg,
28-
GISelKnownBits *KnownBits = nullptr);
28+
GISelKnownBits *KnownBits = nullptr,
29+
bool CheckNUW = false);
2930

3031
bool hasAtomicFaddRtnForTy(const GCNSubtarget &Subtarget, const LLT &Ty);
3132
}

llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4997,8 +4997,8 @@ AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
49974997
// an immediate offset.
49984998
Register SOffset;
49994999
unsigned Offset;
5000-
std::tie(SOffset, Offset) =
5001-
AMDGPU::getBaseWithConstantOffset(*MRI, Root.getReg(), KB);
5000+
std::tie(SOffset, Offset) = AMDGPU::getBaseWithConstantOffset(
5001+
*MRI, Root.getReg(), KB, /*CheckNUW*/ true);
50025002
if (!SOffset)
50035003
return std::nullopt;
50045004

llvm/test/CodeGen/AMDGPU/amdgcn-load-offset-from-reg.ll

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,76 @@ define amdgpu_cs void @test_buffer_load_sgpr_plus_imm_offset(<4 x i32> inreg %ba
109109
ret void
110110
}
111111

112+
; GCN-LABEL: name: test_buffer_load_sgpr_plus_imm_offset_nuw
113+
; SDAG-DAG: %[[BASE0:.*]]:sgpr_32 = COPY $sgpr0
114+
; SDAG-DAG: %[[BASE1:.*]]:sgpr_32 = COPY $sgpr1
115+
; SDAG-DAG: %[[BASE2:.*]]:sgpr_32 = COPY $sgpr2
116+
; SDAG-DAG: %[[BASE3:.*]]:sgpr_32 = COPY $sgpr3
117+
; SDAG-DAG: %[[OFFSET:.*]]:sgpr_32 = COPY $sgpr4
118+
; SDAG-DAG: %[[BASE:.*]]:sgpr_128 = REG_SEQUENCE %[[BASE0]], %subreg.sub0, %[[BASE1]], %subreg.sub1, %[[BASE2]], %subreg.sub2, %[[BASE3]], %subreg.sub3
119+
; SDAG: S_BUFFER_LOAD_DWORD_SGPR_IMM killed %[[BASE]], %[[OFFSET]], 77,
120+
; GISEL-DAG: %[[BASE0:.*]]:sreg_32 = COPY $sgpr0
121+
; GISEL-DAG: %[[BASE1:.*]]:sreg_32 = COPY $sgpr1
122+
; GISEL-DAG: %[[BASE2:.*]]:sreg_32 = COPY $sgpr2
123+
; GISEL-DAG: %[[BASE3:.*]]:sreg_32 = COPY $sgpr3
124+
; GISEL-DAG: %[[OFFSET:.*]]:sreg_32 = COPY $sgpr4
125+
; GISEL-DAG: %[[BASE:.*]]:sgpr_128 = REG_SEQUENCE %[[BASE0]], %subreg.sub0, %[[BASE1]], %subreg.sub1, %[[BASE2]], %subreg.sub2, %[[BASE3]], %subreg.sub3
126+
; GISEL: S_BUFFER_LOAD_DWORD_SGPR_IMM %[[BASE]], %[[OFFSET]], 77,
127+
define amdgpu_cs void @test_buffer_load_sgpr_plus_imm_offset_nuw(<4 x i32> inreg %base, i32 inreg %i, ptr addrspace(1) inreg %out) #0 {
128+
%off = add nuw i32 %i, 77
129+
%v = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %base, i32 %off, i32 0)
130+
store i32 %v, ptr addrspace(1) %out, align 4
131+
ret void
132+
}
133+
134+
; GCN-LABEL: name: test_buffer_load_sgpr_plus_imm_offset_nsw
135+
; SDAG-DAG: %[[BASE0:.*]]:sgpr_32 = COPY $sgpr0
136+
; SDAG-DAG: %[[BASE1:.*]]:sgpr_32 = COPY $sgpr1
137+
; SDAG-DAG: %[[BASE2:.*]]:sgpr_32 = COPY $sgpr2
138+
; SDAG-DAG: %[[BASE3:.*]]:sgpr_32 = COPY $sgpr3
139+
; SDAG-DAG: %[[OFFSET:.*]]:sgpr_32 = COPY $sgpr4
140+
; SDAG-DAG: %[[BASE:.*]]:sgpr_128 = REG_SEQUENCE %[[BASE0]], %subreg.sub0, %[[BASE1]], %subreg.sub1, %[[BASE2]], %subreg.sub2, %[[BASE3]], %subreg.sub3
141+
; SDAG-DAG: %[[ADD:.*]]:sreg_32 = nsw S_ADD_I32 %4, killed %11, implicit-def dead $scc
142+
; SDAG: S_BUFFER_LOAD_DWORD_SGPR_IMM killed %[[BASE]], killed %[[ADD]], 0,
143+
; GISEL-DAG: %[[BASE0:.*]]:sreg_32 = COPY $sgpr0
144+
; GISEL-DAG: %[[BASE1:.*]]:sreg_32 = COPY $sgpr1
145+
; GISEL-DAG: %[[BASE2:.*]]:sreg_32 = COPY $sgpr2
146+
; GISEL-DAG: %[[BASE3:.*]]:sreg_32 = COPY $sgpr3
147+
; GISEL-DAG: %[[OFFSET:.*]]:sreg_32 = COPY $sgpr4
148+
; GISEL-DAG: %[[BASE:.*]]:sgpr_128 = REG_SEQUENCE %[[BASE0]], %subreg.sub0, %[[BASE1]], %subreg.sub1, %[[BASE2]], %subreg.sub2, %[[BASE3]], %subreg.sub3
149+
; GISEL-DAG: %[[ADD:.*]]:sreg_32 = nsw S_ADD_I32 %1, %10, implicit-def dead $scc
150+
; GISEL: S_BUFFER_LOAD_DWORD_SGPR_IMM %[[BASE]], %[[ADD]], 0,
151+
define amdgpu_cs void @test_buffer_load_sgpr_plus_imm_offset_nsw(<4 x i32> inreg %base, i32 inreg %i, ptr addrspace(1) inreg %out) #0 {
152+
%off = add nsw i32 %i, 77
153+
%v = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %base, i32 %off, i32 0)
154+
store i32 %v, ptr addrspace(1) %out, align 4
155+
ret void
156+
}
157+
158+
; GCN-LABEL: name: test_buffer_load_sgpr_plus_imm_offset_noflags
159+
; SDAG-DAG: %[[BASE0:.*]]:sgpr_32 = COPY $sgpr0
160+
; SDAG-DAG: %[[BASE1:.*]]:sgpr_32 = COPY $sgpr1
161+
; SDAG-DAG: %[[BASE2:.*]]:sgpr_32 = COPY $sgpr2
162+
; SDAG-DAG: %[[BASE3:.*]]:sgpr_32 = COPY $sgpr3
163+
; SDAG-DAG: %[[OFFSET:.*]]:sgpr_32 = COPY $sgpr4
164+
; SDAG-DAG: %[[BASE:.*]]:sgpr_128 = REG_SEQUENCE %[[BASE0]], %subreg.sub0, %[[BASE1]], %subreg.sub1, %[[BASE2]], %subreg.sub2, %[[BASE3]], %subreg.sub3
165+
; SDAG-DAG: %[[ADD:.*]]:sreg_32 = S_ADD_I32 %4, killed %11, implicit-def dead $scc
166+
; SDAG: S_BUFFER_LOAD_DWORD_SGPR_IMM killed %[[BASE]], killed %[[ADD]], 0,
167+
; GISEL-DAG: %[[BASE0:.*]]:sreg_32 = COPY $sgpr0
168+
; GISEL-DAG: %[[BASE1:.*]]:sreg_32 = COPY $sgpr1
169+
; GISEL-DAG: %[[BASE2:.*]]:sreg_32 = COPY $sgpr2
170+
; GISEL-DAG: %[[BASE3:.*]]:sreg_32 = COPY $sgpr3
171+
; GISEL-DAG: %[[OFFSET:.*]]:sreg_32 = COPY $sgpr4
172+
; GISEL-DAG: %[[BASE:.*]]:sgpr_128 = REG_SEQUENCE %[[BASE0]], %subreg.sub0, %[[BASE1]], %subreg.sub1, %[[BASE2]], %subreg.sub2, %[[BASE3]], %subreg.sub3
173+
; GISEL-DAG: %[[ADD:.*]]:sreg_32 = S_ADD_I32 %1, %10, implicit-def dead $scc
174+
; GISEL: S_BUFFER_LOAD_DWORD_SGPR_IMM %[[BASE]], %[[ADD]], 0,
175+
define amdgpu_cs void @test_buffer_load_sgpr_plus_imm_offset_noflags(<4 x i32> inreg %base, i32 inreg %i, ptr addrspace(1) inreg %out) #0 {
176+
%off = add i32 %i, 77
177+
%v = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %base, i32 %off, i32 0)
178+
store i32 %v, ptr addrspace(1) %out, align 4
179+
ret void
180+
}
181+
112182
; GCN-LABEL: name: test_buffer_load_sgpr_or_imm_offset
113183
; SDAG-DAG: %[[BASE0:.*]]:sgpr_32 = COPY $sgpr0
114184
; SDAG-DAG: %[[BASE1:.*]]:sgpr_32 = COPY $sgpr1

0 commit comments

Comments
 (0)