@@ -115,61 +115,68 @@ class VLFSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDFF" # n),
115
115
//===----------------------------------------------------------------------===//
116
116
117
117
let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
118
- // load vd, (rs1)
118
+ // unit-stride load vd, (rs1), vm
119
+ class VUnitStrideLoad<RISCVWidth width, string opcodestr>
120
+ : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
121
+ (outs VR:$vd),
122
+ (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
123
+
124
+ let vm = 1, RVVConstraint = NoConstraint in {
125
+ // unit-stride whole register load vl<nf>r.v vd, (rs1)
126
+ class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
127
+ : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
128
+ width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
129
+ opcodestr, "$vd, (${rs1})"> {
130
+ let Uses = [];
131
+ }
132
+
133
+ // unit-stride mask load vd, (rs1)
119
134
class VUnitStrideLoadMask<string opcodestr>
120
135
: RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
121
136
(outs VR:$vd),
122
- (ins GPR:$rs1), opcodestr, "$vd, (${rs1})"> {
123
- let vm = 1;
124
- let RVVConstraint = NoConstraint;
125
- }
137
+ (ins GPR:$rs1), opcodestr, "$vd, (${rs1})">;
138
+ } // vm = 1, RVVConstraint = NoConstraint
126
139
127
- // load vd, (rs1), vm
128
- class VUnitStrideLoad<RISCVLSUMOP lumop, RISCVWidth width,
129
- string opcodestr>
130
- : RVInstVLU<0b000, width.Value{3}, lumop, width.Value{2-0},
140
+ // unit-stride fault-only-first load vd, (rs1), vm
141
+ class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
142
+ : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
131
143
(outs VR:$vd),
132
144
(ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
133
145
134
- // load vd, (rs1), rs2, vm
146
+ // strided load vd, (rs1), rs2, vm
135
147
class VStridedLoad<RISCVWidth width, string opcodestr>
136
148
: RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
137
149
(outs VR:$vd),
138
150
(ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
139
151
"$vd, (${rs1}), $rs2$vm">;
140
152
141
- // load vd, (rs1), vs2, vm
153
+ // indexed load vd, (rs1), vs2, vm
142
154
class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
143
155
: RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
144
156
(outs VR:$vd),
145
157
(ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
146
158
"$vd, (${rs1}), $vs2$vm">;
147
159
148
- // vl<nf>r.v vd, (rs1)
149
- class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
150
- : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
151
- width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
152
- opcodestr, "$vd, (${rs1})"> {
153
- let vm = 1;
154
- let Uses = [];
155
- let RVVConstraint = NoConstraint;
156
- }
160
+ // unit-stride segment load vd, (rs1), vm
161
+ class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
162
+ : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
163
+ (outs VR:$vd),
164
+ (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
157
165
158
- // segment load vd, (rs1), vm
159
- class VUnitStrideSegmentLoad<bits<3> nf, RISCVLSUMOP lumop,
160
- RISCVWidth width, string opcodestr>
161
- : RVInstVLU<nf, width.Value{3}, lumop, width.Value{2-0},
166
+ // segment fault-only-first load vd, (rs1), vm
167
+ class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
168
+ : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
162
169
(outs VR:$vd),
163
170
(ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
164
171
165
- // segment load vd, (rs1), rs2, vm
172
+ // strided segment load vd, (rs1), rs2, vm
166
173
class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
167
174
: RVInstVLS<nf, width.Value{3}, width.Value{2-0},
168
175
(outs VR:$vd),
169
176
(ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
170
177
"$vd, (${rs1}), $rs2$vm">;
171
178
172
- // segment load vd, (rs1), vs2, vm
179
+ // indexed segment load vd, (rs1), vs2, vm
173
180
class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
174
181
string opcodestr>
175
182
: RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
@@ -179,42 +186,40 @@ class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
179
186
} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
180
187
181
188
let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
182
- // store vd, vs3, (rs1)
189
+ // unit-stride store vd, vs3, (rs1), vm
190
+ class VUnitStrideStore<RISCVWidth width, string opcodestr>
191
+ : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
192
+ (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
193
+ "$vs3, (${rs1})$vm">;
194
+
195
+ let vm = 1 in {
196
+ // vs<nf>r.v vd, (rs1)
197
+ class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
198
+ : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
199
+ 0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
200
+ opcodestr, "$vs3, (${rs1})"> {
201
+ let Uses = [];
202
+ }
203
+
204
+ // unit-stride mask store vd, vs3, (rs1)
183
205
class VUnitStrideStoreMask<string opcodestr>
184
206
: RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
185
207
(outs), (ins VR:$vs3, GPR:$rs1), opcodestr,
186
- "$vs3, (${rs1})"> {
187
- let vm = 1;
188
- }
208
+ "$vs3, (${rs1})">;
209
+ } // vm = 1
189
210
190
- // store vd, vs3, (rs1), vm
191
- class VUnitStrideStore<RISCVLSUMOP sumop, RISCVWidth width,
192
- string opcodestr>
193
- : RVInstVSU<0b000, width.Value{3}, sumop, width.Value{2-0},
194
- (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
195
- "$vs3, (${rs1})$vm">;
196
-
197
- // store vd, vs3, (rs1), rs2, vm
211
+ // strided store vd, vs3, (rs1), rs2, vm
198
212
class VStridedStore<RISCVWidth width, string opcodestr>
199
213
: RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
200
214
(ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
201
215
opcodestr, "$vs3, (${rs1}), $rs2$vm">;
202
216
203
- // store vd, vs3, (rs1), vs2, vm
217
+ // indexed store vd, vs3, (rs1), vs2, vm
204
218
class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
205
219
: RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
206
220
(ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
207
221
opcodestr, "$vs3, (${rs1}), $vs2$vm">;
208
222
209
- // vs<nf>r.v vd, (rs1)
210
- class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
211
- : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
212
- 0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
213
- opcodestr, "$vs3, (${rs1})"> {
214
- let vm = 1;
215
- let Uses = [];
216
- }
217
-
218
223
// segment store vd, vs3, (rs1), vm
219
224
class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
220
225
: RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
@@ -840,22 +845,23 @@ def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
840
845
} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
841
846
842
847
// Vector Unit-Stride Instructions
843
- def VLE8_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth8, "vle8.v">,
848
+ def VLE8_V : VUnitStrideLoad<LSWidth8, "vle8.v">,
844
849
VLESched<8>;
845
- def VLE16_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth16, "vle16.v">,
850
+ def VLE16_V : VUnitStrideLoad<LSWidth16, "vle16.v">,
846
851
VLESched<16>;
847
- def VLE32_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth32, "vle32.v">,
852
+ def VLE32_V : VUnitStrideLoad<LSWidth32, "vle32.v">,
848
853
VLESched<32>;
849
- def VLE64_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth64, "vle64.v">,
854
+ def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
850
855
VLESched<64>;
851
856
852
- def VLE8FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth8, "vle8ff.v">,
857
+ // Vector Unit-Stride Fault-only-First Loads
858
+ def VLE8FF_V : VUnitStrideLoadFF<LSWidth8, "vle8ff.v">,
853
859
VLFSched<8>;
854
- def VLE16FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth16, "vle16ff.v">,
860
+ def VLE16FF_V : VUnitStrideLoadFF< LSWidth16, "vle16ff.v">,
855
861
VLFSched<16>;
856
- def VLE32FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth32, "vle32ff.v">,
862
+ def VLE32FF_V : VUnitStrideLoadFF< LSWidth32, "vle32ff.v">,
857
863
VLFSched<32>;
858
- def VLE64FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth64, "vle64ff.v">,
864
+ def VLE64FF_V : VUnitStrideLoadFF< LSWidth64, "vle64ff.v">,
859
865
VLFSched<64>;
860
866
861
867
def VLM_V : VUnitStrideLoadMask<"vlm.v">,
@@ -867,13 +873,13 @@ def : InstAlias<"vle1.v $vd, (${rs1})",
867
873
def : InstAlias<"vse1.v $vs3, (${rs1})",
868
874
(VSM_V VR:$vs3, GPR:$rs1), 0>;
869
875
870
- def VSE8_V : VUnitStrideStore<SUMOPUnitStride, LSWidth8, "vse8.v">,
876
+ def VSE8_V : VUnitStrideStore<LSWidth8, "vse8.v">,
871
877
VSESched<8>;
872
- def VSE16_V : VUnitStrideStore<SUMOPUnitStride, LSWidth16, "vse16.v">,
878
+ def VSE16_V : VUnitStrideStore<LSWidth16, "vse16.v">,
873
879
VSESched<16>;
874
- def VSE32_V : VUnitStrideStore<SUMOPUnitStride, LSWidth32, "vse32.v">,
880
+ def VSE32_V : VUnitStrideStore<LSWidth32, "vse32.v">,
875
881
VSESched<32>;
876
- def VSE64_V : VUnitStrideStore<SUMOPUnitStride, LSWidth64, "vse64.v">,
882
+ def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
877
883
VSESched<64>;
878
884
879
885
// Vector Strided Instructions
@@ -1501,15 +1507,15 @@ foreach n = [1, 2, 4, 8] in {
1501
1507
1502
1508
let Predicates = [HasStdExtZvlsseg] in {
1503
1509
foreach nf=2-8 in {
1504
- def VLSEG#nf#E8_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth8, "vlseg"#nf#"e8.v">;
1505
- def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth16, "vlseg"#nf#"e16.v">;
1506
- def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth32, "vlseg"#nf#"e32.v">;
1507
- def VLSEG#nf#E64_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth64, "vlseg"#nf#"e64.v">;
1508
-
1509
- def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoad <!add(nf, -1), LUMOPUnitStrideFF , LSWidth8, "vlseg"#nf#"e8ff.v">;
1510
- def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoad <!add(nf, -1), LUMOPUnitStrideFF , LSWidth16, "vlseg"#nf#"e16ff.v">;
1511
- def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoad <!add(nf, -1), LUMOPUnitStrideFF , LSWidth32, "vlseg"#nf#"e32ff.v">;
1512
- def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoad <!add(nf, -1), LUMOPUnitStrideFF , LSWidth64, "vlseg"#nf#"e64ff.v">;
1510
+ def VLSEG#nf#E8_V : VUnitStrideSegmentLoad<!add(nf, -1), LSWidth8, "vlseg"#nf#"e8.v">;
1511
+ def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LSWidth16, "vlseg"#nf#"e16.v">;
1512
+ def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LSWidth32, "vlseg"#nf#"e32.v">;
1513
+ def VLSEG#nf#E64_V : VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">;
1514
+
1515
+ def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoadFF <!add(nf, -1), LSWidth8, "vlseg"#nf#"e8ff.v">;
1516
+ def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoadFF <!add(nf, -1), LSWidth16, "vlseg"#nf#"e16ff.v">;
1517
+ def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoadFF <!add(nf, -1), LSWidth32, "vlseg"#nf#"e32ff.v">;
1518
+ def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoadFF <!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">;
1513
1519
1514
1520
def VSSEG#nf#E8_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth8, "vsseg"#nf#"e8.v">;
1515
1521
def VSSEG#nf#E16_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth16, "vsseg"#nf#"e16.v">;
0 commit comments