Skip to content

Commit 9ac52ce

Browse files
authored
[AMDGPU] Add iglp_opt(3) for simple mfma / exp interleaving (llvm#117269)
Adds a minimal iglp_opt to do simple exp / mfma interleaving.
1 parent 0ee364d commit 9ac52ce

File tree

5 files changed

+220
-4
lines changed

5 files changed

+220
-4
lines changed

llvm/docs/AMDGPUUsage.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1376,6 +1376,8 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
13761376

13771377
0. Interleave DS and MFMA instructions for small GEMM kernels.
13781378
1. Interleave DS and MFMA instructions for single wave small GEMM kernels.
1379+
2. Interleave TRANS and MFMA instructions, as well as their VALU and DS predecessors, for attention kernels.
1380+
3. Interleave TRANS and MFMA instructions, with no predecessor interleaving, for attention kernels.
13791381

13801382
Only one iglp_opt intrinsic may be used in a scheduling region. The iglp_opt intrinsic
13811383
cannot be combined with sched_barrier or sched_group_barrier.

llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp

Lines changed: 47 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -832,7 +832,8 @@ void PipelineSolver::solve() {
832832
enum IGLPStrategyID : int {
833833
MFMASmallGemmOptID = 0,
834834
MFMASmallGemmSingleWaveOptID = 1,
835-
MFMAExpInterleave = 2
835+
MFMAExpInterleaveID = 2,
836+
MFMAExpSimpleInterleaveID = 3
836837
};
837838

838839
// Implement a IGLP scheduling strategy.
@@ -1845,6 +1846,48 @@ bool MFMAExpInterleaveOpt::applyIGLPStrategy(
18451846
return true;
18461847
}
18471848

1849+
class MFMAExpSimpleInterleaveOpt final : public IGLPStrategy {
1850+
public:
1851+
bool applyIGLPStrategy(
1852+
DenseMap<int, SUnitsToCandidateSGsMap> &SyncedInstrs,
1853+
DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
1854+
AMDGPU::SchedulingPhase Phase) override;
1855+
1856+
bool shouldApplyStrategy(ScheduleDAGInstrs *DAG,
1857+
AMDGPU::SchedulingPhase Phase) override {
1858+
return true;
1859+
}
1860+
1861+
MFMAExpSimpleInterleaveOpt(ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
1862+
: IGLPStrategy(DAG, TII) {
1863+
IsBottomUp = true;
1864+
}
1865+
};
1866+
1867+
bool MFMAExpSimpleInterleaveOpt::applyIGLPStrategy(
1868+
DenseMap<int, SUnitsToCandidateSGsMap> &SyncedInstrs,
1869+
DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
1870+
AMDGPU::SchedulingPhase Phase) {
1871+
// Count the number of MFMA instructions.
1872+
unsigned MFMACount = 0;
1873+
for (const MachineInstr &I : *DAG)
1874+
if (TII->isMFMAorWMMA(I))
1875+
++MFMACount;
1876+
1877+
const unsigned PipelineSyncID = 0;
1878+
for (unsigned I = 0; I < MFMACount * 3; ++I) {
1879+
SchedGroup *SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1880+
SchedGroupMask::TRANS, 1, PipelineSyncID, DAG, TII);
1881+
SG->initSchedGroup(SyncedInstrs[SG->getSyncID()]);
1882+
1883+
SG = &SyncedSchedGroups[PipelineSyncID].emplace_back(
1884+
SchedGroupMask::MFMA, 1, PipelineSyncID, DAG, TII);
1885+
SG->initSchedGroup(SyncedInstrs[SG->getSyncID()]);
1886+
}
1887+
1888+
return true;
1889+
}
1890+
18481891
class MFMASmallGemmSingleWaveOpt final : public IGLPStrategy {
18491892
private:
18501893
// Whether the DS_READ is a predecessor of first four MFMA in region
@@ -2308,8 +2351,10 @@ createIGLPStrategy(IGLPStrategyID ID, ScheduleDAGInstrs *DAG,
23082351
return std::make_unique<MFMASmallGemmOpt>(DAG, TII);
23092352
case MFMASmallGemmSingleWaveOptID:
23102353
return std::make_unique<MFMASmallGemmSingleWaveOpt>(DAG, TII);
2311-
case MFMAExpInterleave:
2354+
case MFMAExpInterleaveID:
23122355
return std::make_unique<MFMAExpInterleaveOpt>(DAG, TII);
2356+
case MFMAExpSimpleInterleaveID:
2357+
return std::make_unique<MFMAExpSimpleInterleaveOpt>(DAG, TII);
23132358
}
23142359

23152360
llvm_unreachable("Unknown IGLPStrategyID");

llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.exp.large.mir

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1146,6 +1146,7 @@
11461146
; GCN-NEXT: s_waitcnt vmcnt(8)
11471147
; GCN-NEXT: ;;#ASMEND
11481148
; GCN-NEXT: s_endpgm
1149+
11491150
attributes #0 = {"amdgpu-flat-work-group-size"="256,256"}
11501151
!0 = !{i64 2862105}
11511152

Lines changed: 170 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,170 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -o - %s | FileCheck -check-prefix=GCN %s
3+
4+
define amdgpu_kernel void @MFMAExpInterleave(ptr addrspace(1) %out0, ptr addrspace(1) %out1, float %in0, <4 x float> %in1) {
5+
; GCN-LABEL: MFMAExpInterleave:
6+
; GCN: ; %bb.0:
7+
; GCN-NEXT: s_load_dword s6, s[4:5], 0x10
8+
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x20
9+
; GCN-NEXT: v_mov_b32_e32 v1, 0x3fb8aa3b
10+
; GCN-NEXT: v_mov_b32_e32 v0, 1.0
11+
; GCN-NEXT: s_mov_b32 s7, 0x42b17218
12+
; GCN-NEXT: s_waitcnt lgkmcnt(0)
13+
; GCN-NEXT: v_mul_f32_e32 v2, s6, v1
14+
; GCN-NEXT: v_rndne_f32_e32 v3, v2
15+
; GCN-NEXT: v_sub_f32_e32 v4, v2, v3
16+
; GCN-NEXT: v_fma_f32 v1, s6, v1, -v2
17+
; GCN-NEXT: v_mov_b32_e32 v2, 0x32a5705f
18+
; GCN-NEXT: v_accvgpr_write_b32 a0, s0
19+
; GCN-NEXT: v_fmac_f32_e32 v1, s6, v2
20+
; GCN-NEXT: v_accvgpr_write_b32 a1, s1
21+
; GCN-NEXT: v_accvgpr_write_b32 a2, s2
22+
; GCN-NEXT: v_accvgpr_write_b32 a3, s3
23+
; GCN-NEXT: v_add_f32_e32 v1, v4, v1
24+
; GCN-NEXT: v_cvt_i32_f32_e32 v2, v3
25+
; GCN-NEXT: v_mfma_f32_4x4x1_16b_f32 a[0:3], v0, v0, a[0:3]
26+
; GCN-NEXT: v_exp_f32_e32 v1, v1
27+
; GCN-NEXT: s_mov_b32 s0, 0x3fb8aa3b
28+
; GCN-NEXT: v_mfma_f32_4x4x1_16b_f32 a[0:3], v0, v0, a[0:3]
29+
; GCN-NEXT: ; iglp_opt mask(0x00000003)
30+
; GCN-NEXT: v_ldexp_f32 v1, v1, v2
31+
; GCN-NEXT: v_mov_b32_e32 v2, 0xc2ce8ed0
32+
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s6, v2
33+
; GCN-NEXT: v_mov_b32_e32 v2, 0x42b17218
34+
; GCN-NEXT: s_nop 0
35+
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
36+
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v2
37+
; GCN-NEXT: v_mov_b32_e32 v2, 0x7f800000
38+
; GCN-NEXT: s_mov_b32 s6, 0xc2ce8ed0
39+
; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
40+
; GCN-NEXT: v_mul_f32_e32 v3, 0x3fb8aa3b, v1
41+
; GCN-NEXT: v_fma_f32 v4, v1, s0, -v3
42+
; GCN-NEXT: v_rndne_f32_e32 v5, v3
43+
; GCN-NEXT: v_fmac_f32_e32 v4, 0x32a5705f, v1
44+
; GCN-NEXT: v_sub_f32_e32 v3, v3, v5
45+
; GCN-NEXT: v_add_f32_e32 v3, v3, v4
46+
; GCN-NEXT: v_exp_f32_e32 v3, v3
47+
; GCN-NEXT: v_cvt_i32_f32_e32 v4, v5
48+
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v1
49+
; GCN-NEXT: v_mfma_f32_4x4x1_16b_f32 a[0:3], v0, v0, a[0:3]
50+
; GCN-NEXT: v_ldexp_f32 v3, v3, v4
51+
; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
52+
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v1
53+
; GCN-NEXT: s_nop 1
54+
; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
55+
; GCN-NEXT: v_mul_f32_e32 v3, 0x3fb8aa3b, v1
56+
; GCN-NEXT: v_fma_f32 v4, v1, s0, -v3
57+
; GCN-NEXT: v_rndne_f32_e32 v5, v3
58+
; GCN-NEXT: v_fmac_f32_e32 v4, 0x32a5705f, v1
59+
; GCN-NEXT: v_sub_f32_e32 v3, v3, v5
60+
; GCN-NEXT: v_add_f32_e32 v3, v3, v4
61+
; GCN-NEXT: v_exp_f32_e32 v3, v3
62+
; GCN-NEXT: v_cvt_i32_f32_e32 v4, v5
63+
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v1
64+
; GCN-NEXT: v_mfma_f32_4x4x1_16b_f32 a[0:3], v0, v0, a[0:3]
65+
; GCN-NEXT: v_ldexp_f32 v3, v3, v4
66+
; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
67+
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v1
68+
; GCN-NEXT: s_nop 1
69+
; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
70+
; GCN-NEXT: v_mul_f32_e32 v3, 0x3fb8aa3b, v1
71+
; GCN-NEXT: v_fma_f32 v4, v1, s0, -v3
72+
; GCN-NEXT: v_rndne_f32_e32 v5, v3
73+
; GCN-NEXT: v_fmac_f32_e32 v4, 0x32a5705f, v1
74+
; GCN-NEXT: v_sub_f32_e32 v3, v3, v5
75+
; GCN-NEXT: v_add_f32_e32 v3, v3, v4
76+
; GCN-NEXT: v_exp_f32_e32 v3, v3
77+
; GCN-NEXT: v_cvt_i32_f32_e32 v4, v5
78+
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v1
79+
; GCN-NEXT: v_mfma_f32_4x4x1_16b_f32 a[0:3], v0, v0, a[0:3]
80+
; GCN-NEXT: v_ldexp_f32 v3, v3, v4
81+
; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
82+
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v1
83+
; GCN-NEXT: s_nop 1
84+
; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
85+
; GCN-NEXT: v_mul_f32_e32 v3, 0x3fb8aa3b, v1
86+
; GCN-NEXT: v_fma_f32 v4, v1, s0, -v3
87+
; GCN-NEXT: v_rndne_f32_e32 v5, v3
88+
; GCN-NEXT: v_fmac_f32_e32 v4, 0x32a5705f, v1
89+
; GCN-NEXT: v_sub_f32_e32 v3, v3, v5
90+
; GCN-NEXT: v_add_f32_e32 v3, v3, v4
91+
; GCN-NEXT: v_exp_f32_e32 v3, v3
92+
; GCN-NEXT: v_cvt_i32_f32_e32 v4, v5
93+
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v1
94+
; GCN-NEXT: v_mfma_f32_4x4x1_16b_f32 a[0:3], v0, v0, a[0:3]
95+
; GCN-NEXT: v_ldexp_f32 v3, v3, v4
96+
; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
97+
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v1
98+
; GCN-NEXT: s_nop 1
99+
; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
100+
; GCN-NEXT: v_mul_f32_e32 v3, 0x3fb8aa3b, v1
101+
; GCN-NEXT: v_fma_f32 v4, v1, s0, -v3
102+
; GCN-NEXT: v_rndne_f32_e32 v5, v3
103+
; GCN-NEXT: v_fmac_f32_e32 v4, 0x32a5705f, v1
104+
; GCN-NEXT: v_sub_f32_e32 v3, v3, v5
105+
; GCN-NEXT: v_add_f32_e32 v3, v3, v4
106+
; GCN-NEXT: v_exp_f32_e32 v3, v3
107+
; GCN-NEXT: v_cvt_i32_f32_e32 v4, v5
108+
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v1
109+
; GCN-NEXT: v_mfma_f32_4x4x1_16b_f32 a[0:3], v0, v0, a[0:3]
110+
; GCN-NEXT: v_ldexp_f32 v3, v3, v4
111+
; GCN-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
112+
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v1
113+
; GCN-NEXT: s_nop 1
114+
; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
115+
; GCN-NEXT: v_mul_f32_e32 v3, 0x3fb8aa3b, v1
116+
; GCN-NEXT: v_fma_f32 v4, v1, s0, -v3
117+
; GCN-NEXT: v_rndne_f32_e32 v5, v3
118+
; GCN-NEXT: v_fmac_f32_e32 v4, 0x32a5705f, v1
119+
; GCN-NEXT: v_sub_f32_e32 v3, v3, v5
120+
; GCN-NEXT: v_add_f32_e32 v3, v3, v4
121+
; GCN-NEXT: v_exp_f32_e32 v3, v3
122+
; GCN-NEXT: v_cvt_i32_f32_e32 v4, v5
123+
; GCN-NEXT: v_mfma_f32_4x4x1_16b_f32 a[0:3], v0, v0, a[0:3]
124+
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v1
125+
; GCN-NEXT: v_ldexp_f32 v0, v3, v4
126+
; GCN-NEXT: s_nop 0
127+
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
128+
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v1
129+
; GCN-NEXT: s_nop 1
130+
; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
131+
; GCN-NEXT: v_mul_f32_e32 v1, 0x3fb8aa3b, v0
132+
; GCN-NEXT: v_fma_f32 v3, v0, s0, -v1
133+
; GCN-NEXT: v_rndne_f32_e32 v4, v1
134+
; GCN-NEXT: v_fmac_f32_e32 v3, 0x32a5705f, v0
135+
; GCN-NEXT: v_sub_f32_e32 v1, v1, v4
136+
; GCN-NEXT: v_add_f32_e32 v1, v1, v3
137+
; GCN-NEXT: v_exp_f32_e32 v1, v1
138+
; GCN-NEXT: v_cvt_i32_f32_e32 v3, v4
139+
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
140+
; GCN-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v0
141+
; GCN-NEXT: v_mov_b32_e32 v4, 0
142+
; GCN-NEXT: v_ldexp_f32 v1, v1, v3
143+
; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
144+
; GCN-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v0
145+
; GCN-NEXT: s_waitcnt lgkmcnt(0)
146+
; GCN-NEXT: global_store_dwordx4 v4, a[0:3], s[0:1]
147+
; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
148+
; GCN-NEXT: global_store_dword v4, v0, s[2:3]
149+
; GCN-NEXT: s_endpgm
150+
%mai0 = tail call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float 1.0, float 1.0, <4 x float> %in1, i32 0, i32 0, i32 0)
151+
%mai1 = tail call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float 1.0, float 1.0, <4 x float> %mai0, i32 0, i32 0, i32 0)
152+
%mai2 = tail call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float 1.0, float 1.0, <4 x float> %mai1, i32 0, i32 0, i32 0)
153+
%mai3 = tail call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float 1.0, float 1.0, <4 x float> %mai2, i32 0, i32 0, i32 0)
154+
%mai4 = tail call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float 1.0, float 1.0, <4 x float> %mai3, i32 0, i32 0, i32 0)
155+
%mai5 = tail call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float 1.0, float 1.0, <4 x float> %mai4, i32 0, i32 0, i32 0)
156+
%mai6 = tail call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float 1.0, float 1.0, <4 x float> %mai5, i32 0, i32 0, i32 0)
157+
%mai7 = tail call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float 1.0, float 1.0, <4 x float> %mai6, i32 0, i32 0, i32 0)
158+
%exp0 = call float @llvm.exp.f32(float %in0)
159+
%exp1 = call float @llvm.exp.f32(float %exp0)
160+
%exp2 = call float @llvm.exp.f32(float %exp1)
161+
%exp3 = call float @llvm.exp.f32(float %exp2)
162+
%exp4 = call float @llvm.exp.f32(float %exp3)
163+
%exp5 = call float @llvm.exp.f32(float %exp4)
164+
%exp6 = call float @llvm.exp.f32(float %exp5)
165+
%exp7 = call float @llvm.exp.f32(float %exp6)
166+
store <4 x float> %mai7, ptr addrspace(1) %out0
167+
store float %exp7, ptr addrspace(1) %out1
168+
tail call void @llvm.amdgcn.iglp.opt(i32 3)
169+
ret void
170+
}

llvm/test/CodeGen/AMDGPU/llvm.amdgcn.iglp.opt.exp.small.mir

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -492,7 +492,6 @@
492492
attributes #0 = {"amdgpu-flat-work-group-size"="256,256"}
493493

494494
!0 = !{i64 2862105}
495-
496495
...
497496

498497
---
@@ -899,4 +898,3 @@ body: |
899898
S_ENDPGM 0
900899
...
901900

902-

0 commit comments

Comments
 (0)