Skip to content

Commit d488f1f

Browse files
author
Bin Cheng
committed
[RISCV][NFC]: Refactor classes for load/store instructions of RVV
This patch refactors classes for load/store of V extension by: - Introduce new class for VUnitStrideLoadFF and VUnitStrideSegmentLoadFF so that uses of L/SUMOP* are not spread around different places. - Reorder classes for Unit-Stride load/store in line with table describing lumop/sumop in riscv-v-spec.pdf. Reviewed By: HsiangKai, craig.topper Differential Revision: https://reviews.llvm.org/D109318
1 parent 87e53a0 commit d488f1f

File tree

1 file changed

+75
-69
lines changed

1 file changed

+75
-69
lines changed

llvm/lib/Target/RISCV/RISCVInstrInfoV.td

Lines changed: 75 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -115,61 +115,68 @@ class VLFSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDFF" # n),
115115
//===----------------------------------------------------------------------===//
116116

117117
let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
118-
// load vd, (rs1)
118+
// unit-stride load vd, (rs1), vm
119+
class VUnitStrideLoad<RISCVWidth width, string opcodestr>
120+
: RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
121+
(outs VR:$vd),
122+
(ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
123+
124+
let vm = 1, RVVConstraint = NoConstraint in {
125+
// unit-stride whole register load vl<nf>r.v vd, (rs1)
126+
class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
127+
: RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
128+
width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
129+
opcodestr, "$vd, (${rs1})"> {
130+
let Uses = [];
131+
}
132+
133+
// unit-stride mask load vd, (rs1)
119134
class VUnitStrideLoadMask<string opcodestr>
120135
: RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
121136
(outs VR:$vd),
122-
(ins GPR:$rs1), opcodestr, "$vd, (${rs1})"> {
123-
let vm = 1;
124-
let RVVConstraint = NoConstraint;
125-
}
137+
(ins GPR:$rs1), opcodestr, "$vd, (${rs1})">;
138+
} // vm = 1, RVVConstraint = NoConstraint
126139

127-
// load vd, (rs1), vm
128-
class VUnitStrideLoad<RISCVLSUMOP lumop, RISCVWidth width,
129-
string opcodestr>
130-
: RVInstVLU<0b000, width.Value{3}, lumop, width.Value{2-0},
140+
// unit-stride fault-only-first load vd, (rs1), vm
141+
class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
142+
: RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
131143
(outs VR:$vd),
132144
(ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
133145

134-
// load vd, (rs1), rs2, vm
146+
// strided load vd, (rs1), rs2, vm
135147
class VStridedLoad<RISCVWidth width, string opcodestr>
136148
: RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
137149
(outs VR:$vd),
138150
(ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
139151
"$vd, (${rs1}), $rs2$vm">;
140152

141-
// load vd, (rs1), vs2, vm
153+
// indexed load vd, (rs1), vs2, vm
142154
class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
143155
: RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
144156
(outs VR:$vd),
145157
(ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
146158
"$vd, (${rs1}), $vs2$vm">;
147159

148-
// vl<nf>r.v vd, (rs1)
149-
class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
150-
: RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
151-
width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
152-
opcodestr, "$vd, (${rs1})"> {
153-
let vm = 1;
154-
let Uses = [];
155-
let RVVConstraint = NoConstraint;
156-
}
160+
// unit-stride segment load vd, (rs1), vm
161+
class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
162+
: RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
163+
(outs VR:$vd),
164+
(ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
157165

158-
// segment load vd, (rs1), vm
159-
class VUnitStrideSegmentLoad<bits<3> nf, RISCVLSUMOP lumop,
160-
RISCVWidth width, string opcodestr>
161-
: RVInstVLU<nf, width.Value{3}, lumop, width.Value{2-0},
166+
// segment fault-only-first load vd, (rs1), vm
167+
class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
168+
: RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
162169
(outs VR:$vd),
163170
(ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
164171

165-
// segment load vd, (rs1), rs2, vm
172+
// strided segment load vd, (rs1), rs2, vm
166173
class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
167174
: RVInstVLS<nf, width.Value{3}, width.Value{2-0},
168175
(outs VR:$vd),
169176
(ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
170177
"$vd, (${rs1}), $rs2$vm">;
171178

172-
// segment load vd, (rs1), vs2, vm
179+
// indexed segment load vd, (rs1), vs2, vm
173180
class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
174181
string opcodestr>
175182
: RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
@@ -179,42 +186,40 @@ class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
179186
} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
180187

181188
let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
182-
// store vd, vs3, (rs1)
189+
// unit-stride store vd, vs3, (rs1), vm
190+
class VUnitStrideStore<RISCVWidth width, string opcodestr>
191+
: RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
192+
(outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
193+
"$vs3, (${rs1})$vm">;
194+
195+
let vm = 1 in {
196+
// vs<nf>r.v vd, (rs1)
197+
class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
198+
: RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
199+
0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
200+
opcodestr, "$vs3, (${rs1})"> {
201+
let Uses = [];
202+
}
203+
204+
// unit-stride mask store vd, vs3, (rs1)
183205
class VUnitStrideStoreMask<string opcodestr>
184206
: RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
185207
(outs), (ins VR:$vs3, GPR:$rs1), opcodestr,
186-
"$vs3, (${rs1})"> {
187-
let vm = 1;
188-
}
208+
"$vs3, (${rs1})">;
209+
} // vm = 1
189210

190-
// store vd, vs3, (rs1), vm
191-
class VUnitStrideStore<RISCVLSUMOP sumop, RISCVWidth width,
192-
string opcodestr>
193-
: RVInstVSU<0b000, width.Value{3}, sumop, width.Value{2-0},
194-
(outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
195-
"$vs3, (${rs1})$vm">;
196-
197-
// store vd, vs3, (rs1), rs2, vm
211+
// strided store vd, vs3, (rs1), rs2, vm
198212
class VStridedStore<RISCVWidth width, string opcodestr>
199213
: RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
200214
(ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
201215
opcodestr, "$vs3, (${rs1}), $rs2$vm">;
202216

203-
// store vd, vs3, (rs1), vs2, vm
217+
// indexed store vd, vs3, (rs1), vs2, vm
204218
class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
205219
: RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
206220
(ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
207221
opcodestr, "$vs3, (${rs1}), $vs2$vm">;
208222

209-
// vs<nf>r.v vd, (rs1)
210-
class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
211-
: RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
212-
0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
213-
opcodestr, "$vs3, (${rs1})"> {
214-
let vm = 1;
215-
let Uses = [];
216-
}
217-
218223
// segment store vd, vs3, (rs1), vm
219224
class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
220225
: RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
@@ -840,22 +845,23 @@ def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
840845
} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
841846

842847
// Vector Unit-Stride Instructions
843-
def VLE8_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth8, "vle8.v">,
848+
def VLE8_V : VUnitStrideLoad<LSWidth8, "vle8.v">,
844849
VLESched<8>;
845-
def VLE16_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth16, "vle16.v">,
850+
def VLE16_V : VUnitStrideLoad<LSWidth16, "vle16.v">,
846851
VLESched<16>;
847-
def VLE32_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth32, "vle32.v">,
852+
def VLE32_V : VUnitStrideLoad<LSWidth32, "vle32.v">,
848853
VLESched<32>;
849-
def VLE64_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth64, "vle64.v">,
854+
def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
850855
VLESched<64>;
851856

852-
def VLE8FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth8, "vle8ff.v">,
857+
// Vector Unit-Stride Fault-only-First Loads
858+
def VLE8FF_V : VUnitStrideLoadFF<LSWidth8, "vle8ff.v">,
853859
VLFSched<8>;
854-
def VLE16FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth16, "vle16ff.v">,
860+
def VLE16FF_V : VUnitStrideLoadFF<LSWidth16, "vle16ff.v">,
855861
VLFSched<16>;
856-
def VLE32FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth32, "vle32ff.v">,
862+
def VLE32FF_V : VUnitStrideLoadFF<LSWidth32, "vle32ff.v">,
857863
VLFSched<32>;
858-
def VLE64FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth64, "vle64ff.v">,
864+
def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">,
859865
VLFSched<64>;
860866

861867
def VLM_V : VUnitStrideLoadMask<"vlm.v">,
@@ -867,13 +873,13 @@ def : InstAlias<"vle1.v $vd, (${rs1})",
867873
def : InstAlias<"vse1.v $vs3, (${rs1})",
868874
(VSM_V VR:$vs3, GPR:$rs1), 0>;
869875

870-
def VSE8_V : VUnitStrideStore<SUMOPUnitStride, LSWidth8, "vse8.v">,
876+
def VSE8_V : VUnitStrideStore<LSWidth8, "vse8.v">,
871877
VSESched<8>;
872-
def VSE16_V : VUnitStrideStore<SUMOPUnitStride, LSWidth16, "vse16.v">,
878+
def VSE16_V : VUnitStrideStore<LSWidth16, "vse16.v">,
873879
VSESched<16>;
874-
def VSE32_V : VUnitStrideStore<SUMOPUnitStride, LSWidth32, "vse32.v">,
880+
def VSE32_V : VUnitStrideStore<LSWidth32, "vse32.v">,
875881
VSESched<32>;
876-
def VSE64_V : VUnitStrideStore<SUMOPUnitStride, LSWidth64, "vse64.v">,
882+
def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
877883
VSESched<64>;
878884

879885
// Vector Strided Instructions
@@ -1501,15 +1507,15 @@ foreach n = [1, 2, 4, 8] in {
15011507

15021508
let Predicates = [HasStdExtZvlsseg] in {
15031509
foreach nf=2-8 in {
1504-
def VLSEG#nf#E8_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth8, "vlseg"#nf#"e8.v">;
1505-
def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth16, "vlseg"#nf#"e16.v">;
1506-
def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth32, "vlseg"#nf#"e32.v">;
1507-
def VLSEG#nf#E64_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth64, "vlseg"#nf#"e64.v">;
1508-
1509-
def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth8, "vlseg"#nf#"e8ff.v">;
1510-
def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth16, "vlseg"#nf#"e16ff.v">;
1511-
def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth32, "vlseg"#nf#"e32ff.v">;
1512-
def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth64, "vlseg"#nf#"e64ff.v">;
1510+
def VLSEG#nf#E8_V : VUnitStrideSegmentLoad<!add(nf, -1), LSWidth8, "vlseg"#nf#"e8.v">;
1511+
def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LSWidth16, "vlseg"#nf#"e16.v">;
1512+
def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LSWidth32, "vlseg"#nf#"e32.v">;
1513+
def VLSEG#nf#E64_V : VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">;
1514+
1515+
def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth8, "vlseg"#nf#"e8ff.v">;
1516+
def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth16, "vlseg"#nf#"e16ff.v">;
1517+
def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth32, "vlseg"#nf#"e32ff.v">;
1518+
def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">;
15131519

15141520
def VSSEG#nf#E8_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth8, "vsseg"#nf#"e8.v">;
15151521
def VSSEG#nf#E16_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth16, "vsseg"#nf#"e16.v">;

0 commit comments

Comments
 (0)