Skip to content

Commit 15ce0ab

Browse files
committed
[RISCV] Refine vector load/store tablegen pattern, NFC.
Refine tablegen pattern for vector load/store, and follow D93012 to separate masked and unmasked definitions for pseudo load/store instructions. Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D93284
1 parent af7ef89 commit 15ce0ab

File tree

3 files changed

+144
-105
lines changed

3 files changed

+144
-105
lines changed

llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Lines changed: 121 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -277,6 +277,68 @@ class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
277277
let VLMul = m.value;
278278
}
279279

280+
class VPseudoUSLoadNoMask<VReg RetClass>:
281+
Pseudo<(outs RetClass:$rd),
282+
(ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
283+
RISCVVPseudo {
284+
let mayLoad = 1;
285+
let mayStore = 0;
286+
let hasSideEffects = 0;
287+
let usesCustomInserter = 1;
288+
let Uses = [VL, VTYPE];
289+
let VLIndex = 2;
290+
let SEWIndex = 3;
291+
let HasDummyMask = 1;
292+
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
293+
}
294+
295+
class VPseudoUSLoadMask<VReg RetClass>:
296+
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
297+
(ins GetVRegNoV0<RetClass>.R:$merge,
298+
GPR:$rs1,
299+
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
300+
RISCVVPseudo {
301+
let mayLoad = 1;
302+
let mayStore = 0;
303+
let hasSideEffects = 0;
304+
let usesCustomInserter = 1;
305+
let Constraints = "$rd = $merge";
306+
let Uses = [VL, VTYPE];
307+
let VLIndex = 4;
308+
let SEWIndex = 5;
309+
let MergeOpIndex = 1;
310+
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
311+
}
312+
313+
class VPseudoUSStoreNoMask<VReg StClass>:
314+
Pseudo<(outs),
315+
(ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
316+
RISCVVPseudo {
317+
let mayLoad = 0;
318+
let mayStore = 1;
319+
let hasSideEffects = 0;
320+
let usesCustomInserter = 1;
321+
let Uses = [VL, VTYPE];
322+
let VLIndex = 2;
323+
let SEWIndex = 3;
324+
let HasDummyMask = 1;
325+
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
326+
}
327+
328+
class VPseudoUSStoreMask<VReg StClass>:
329+
Pseudo<(outs),
330+
(ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
331+
RISCVVPseudo {
332+
let mayLoad = 0;
333+
let mayStore = 1;
334+
let hasSideEffects = 0;
335+
let usesCustomInserter = 1;
336+
let Uses = [VL, VTYPE];
337+
let VLIndex = 3;
338+
let SEWIndex = 4;
339+
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
340+
}
341+
280342
class VPseudoBinaryNoMask<VReg RetClass,
281343
VReg Op1Class,
282344
DAGOperand Op2Class,
@@ -342,6 +404,28 @@ class VPseudoBinaryCarryIn<VReg RetClass,
342404
let VLMul = MInfo.value;
343405
}
344406

407+
multiclass VPseudoUSLoad {
408+
foreach lmul = MxList.m in {
409+
defvar LInfo = lmul.MX;
410+
defvar vreg = lmul.vrclass;
411+
let VLMul = lmul.value in {
412+
def "_V_" # LInfo : VPseudoUSLoadNoMask<vreg>;
413+
def "_V_" # LInfo # "_MASK" : VPseudoUSLoadMask<vreg>;
414+
}
415+
}
416+
}
417+
418+
multiclass VPseudoUSStore {
419+
foreach lmul = MxList.m in {
420+
defvar LInfo = lmul.MX;
421+
defvar vreg = lmul.vrclass;
422+
let VLMul = lmul.value in {
423+
def "_V_" # LInfo : VPseudoUSStoreNoMask<vreg>;
424+
def "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg>;
425+
}
426+
}
427+
}
428+
345429
multiclass VPseudoBinary<VReg RetClass,
346430
VReg Op1Class,
347431
DAGOperand Op2Class,
@@ -519,8 +603,32 @@ multiclass VPseudoBinaryV_WV_WX_WI {
519603
}
520604

521605
//===----------------------------------------------------------------------===//
522-
// Helpers to define the different patterns.
606+
// Helpers to define the SDNode patterns.
523607
//===----------------------------------------------------------------------===//
608+
609+
multiclass VPatUSLoadStoreSDNode<LLVMType type,
610+
LLVMType mask_type,
611+
int sew,
612+
LMULInfo vlmul,
613+
RegisterClass reg_rs1,
614+
VReg reg_class>
615+
{
616+
defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX);
617+
defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX);
618+
// Load
619+
def : Pat<(type (load reg_rs1:$rs1)),
620+
(load_instr reg_rs1:$rs1, VLMax, sew)>;
621+
// Store
622+
def : Pat<(store type:$rs2, reg_rs1:$rs1),
623+
(store_instr reg_class:$rs2, reg_rs1:$rs1, VLMax, sew)>;
624+
}
625+
626+
multiclass VPatUSLoadStoreSDNodes<RegisterClass reg_rs1> {
627+
foreach vti = AllVectors in
628+
defm "" : VPatUSLoadStoreSDNode<vti.Vector, vti.Mask, vti.SEW, vti.LMul,
629+
reg_rs1, vti.RegClass>;
630+
}
631+
524632
class VPatBinarySDNode<SDNode vop,
525633
string instruction_name,
526634
ValueType result_type,
@@ -546,6 +654,9 @@ multiclass VPatBinarySDNode<SDNode vop, string instruction_name>
546654
vti.LMul, vti.RegClass, vti.RegClass>;
547655
}
548656

657+
//===----------------------------------------------------------------------===//
658+
// Helpers to define the intrinsic patterns.
659+
//===----------------------------------------------------------------------===//
549660
class VPatBinaryNoMask<string intrinsic_name,
550661
string inst,
551662
string kind,
@@ -922,83 +1033,10 @@ def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), []>
9221033
// 7. Vector Loads and Stores
9231034
//===----------------------------------------------------------------------===//
9241035

925-
// Pseudos.
1036+
// Pseudos Unit-Stride Loads and Stores
9261037
foreach eew = EEWList in {
927-
foreach lmul = MxList.m in {
928-
defvar LInfo = lmul.MX;
929-
defvar vreg = lmul.vrclass;
930-
defvar vlmul = lmul.value;
931-
defvar constraint = "$rd = $merge";
932-
933-
let mayLoad = 1, mayStore = 0, hasSideEffects = 0,
934-
usesCustomInserter = 1,
935-
VLMul = vlmul in
936-
{
937-
let Uses = [VL, VTYPE], VLIndex = 4, SEWIndex = 5, MergeOpIndex = 1,
938-
Constraints = constraint,
939-
BaseInstr = !cast<Instruction>("VLE" # eew # "_V") in
940-
def "PseudoVLE" # eew # "_V_" # LInfo
941-
: Pseudo<(outs vreg:$rd),
942-
(ins vreg:$merge, GPR:$rs1, VMaskOp:$mask, GPR:$vl,
943-
ixlenimm:$sew),
944-
[]>,
945-
RISCVVPseudo;
946-
}
947-
948-
let mayLoad = 0, mayStore = 1, hasSideEffects = 0,
949-
usesCustomInserter = 1,
950-
VLMul = vlmul in
951-
{
952-
// Masked stores do not have a merge operand as merge is done in memory
953-
let Uses = [VL, VTYPE],
954-
VLIndex = 3, SEWIndex = 4, MergeOpIndex = -1,
955-
BaseInstr = !cast<Instruction>("VSE" # eew # "_V") in
956-
def "PseudoVSE" # eew # "_V_" # LInfo
957-
: Pseudo<(outs),
958-
(ins vreg:$rd, GPR:$rs1, VMaskOp:$mask, GPR:$vl,
959-
ixlenimm:$sew),
960-
[]>,
961-
RISCVVPseudo;
962-
}
963-
}
964-
}
965-
966-
// Patterns.
967-
multiclass pat_load_store<LLVMType type,
968-
LLVMType mask_type,
969-
int sew,
970-
LMULInfo vlmul,
971-
VReg reg_class>
972-
{
973-
defvar load_instr = !cast<Instruction>("PseudoVLE" # sew # "_V_"# vlmul.MX);
974-
defvar store_instr = !cast<Instruction>("PseudoVSE" # sew # "_V_"# vlmul.MX);
975-
// Load
976-
def : Pat<(type (load GPR:$rs1)),
977-
(load_instr (type (IMPLICIT_DEF)),
978-
GPR:$rs1,
979-
(mask_type zero_reg),
980-
VLMax, sew)>;
981-
def : Pat<(type (load AddrFI:$rs1)),
982-
(load_instr (type (IMPLICIT_DEF)),
983-
AddrFI:$rs1,
984-
(mask_type zero_reg),
985-
VLMax, sew)>;
986-
987-
// Store
988-
def : Pat<(store type:$rs2, GPR:$rs1),
989-
(store_instr reg_class:$rs2, GPR:$rs1,
990-
(mask_type zero_reg),
991-
VLMax, sew)>;
992-
def : Pat<(store type:$rs2, AddrFI:$rs1),
993-
(store_instr reg_class:$rs2, AddrFI:$rs1,
994-
(mask_type zero_reg),
995-
VLMax, sew)>;
996-
}
997-
998-
foreach vti = AllVectors in
999-
{
1000-
defm : pat_load_store<vti.Vector, vti.Mask,
1001-
vti.SEW, vti.LMul, vti.RegClass>;
1038+
defm PseudoVLE # eew : VPseudoUSLoad;
1039+
defm PseudoVSE # eew : VPseudoUSStore;
10021040
}
10031041

10041042
//===----------------------------------------------------------------------===//
@@ -1078,6 +1116,12 @@ defm PseudoVFRSUB : VPseudoBinaryV_VX</*IsFloat=*/1>;
10781116
let Predicates = [HasStdExtV] in {
10791117

10801118
// Whole-register vector patterns.
1119+
1120+
// 7.4. Vector Unit-Stride Instructions
1121+
defm "" : VPatUSLoadStoreSDNodes<GPR>;
1122+
defm "" : VPatUSLoadStoreSDNodes<AddrFI>;
1123+
1124+
// 12.1. Vector Single-Width Integer Add and Subtract
10811125
defm "" : VPatBinarySDNode<add, "PseudoVADD">;
10821126

10831127
//===----------------------------------------------------------------------===//

llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -27,27 +27,26 @@ body: |
2727
%2:gpr = COPY $x12
2828
%1:gpr = COPY $x11
2929
%0:gpr = COPY $x10
30-
%5:vr = IMPLICIT_DEF
31-
%4:vr = PseudoVLE64_V_M1 %5, %1, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
32-
%7:vr = IMPLICIT_DEF
33-
%6:vr = PseudoVLE64_V_M1 %7, %2, $noreg, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
34-
%8:vr = PseudoVADD_VV_M1 killed %4, killed %6, %3, 64, implicit $vl, implicit $vtype
35-
PseudoVSE64_V_M1 killed %8, %0, $noreg, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
30+
%4:vr = PseudoVLE64_V_M1 %1, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
31+
%5:vr = PseudoVLE64_V_M1 %2, %3, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
32+
%6:vr = PseudoVADD_VV_M1 killed %4, killed %5, %3, 64, implicit $vl, implicit $vtype
33+
PseudoVSE64_V_M1 killed %6, %0, %3, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
3634
PseudoRET
3735
3836
...
3937

4038
# POST-INSERTER: %0:gpr = COPY $x13
41-
# POST-INSERTER: %4:vr = IMPLICIT_DEF
39+
# POST-INSERTER: %1:gpr = COPY $x12
40+
# POST-INSERTER: %2:gpr = COPY $x11
41+
# POST-INSERTER: %3:gpr = COPY $x10
42+
# POST-INSERTER: dead %7:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
43+
# POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
44+
# POST-INSERTER: dead %8:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
45+
# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
4246
# POST-INSERTER: dead %9:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
43-
# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %4, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
44-
# POST-INSERTER: %6:vr = IMPLICIT_DEF
47+
# POST-INSERTER: %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, $noreg, -1, implicit $vl, implicit $vtype
4548
# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
46-
# POST-INSERTER: %7:vr = PseudoVLE64_V_M1 %6, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
47-
# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
48-
# POST-INSERTER: %8:vr = PseudoVADD_VV_M1 killed %5, killed %7, $noreg, -1, implicit $vl, implicit $vtype
49-
# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
50-
# POST-INSERTER: PseudoVSE64_V_M1 killed %8, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
49+
# POST-INSERTER: PseudoVSE64_V_M1 killed %6, %3, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
5150

5251
# CODEGEN: vsetvli a3, a3, e64,m1,ta,mu
5352
# CODEGEN-NEXT: vle64.v v25, (a1)

llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,20 +20,16 @@ define void @vadd_vint64m1(
2020
ret void
2121
}
2222

23-
; PRE-INSERTER: %4:vr = IMPLICIT_DEF
24-
; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
25-
; PRE-INSERTER: %6:vr = IMPLICIT_DEF
26-
; PRE-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
27-
; PRE-INSERTER: %7:vr = PseudoVADD_VV_M1 killed %3, killed %5, $x0, 64, implicit $vl, implicit $vtype
28-
; PRE-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
23+
; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
24+
; PRE-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
25+
; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 64, implicit $vl, implicit $vtype
26+
; PRE-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
2927

30-
; POST-INSERTER: %4:vr = IMPLICIT_DEF
28+
; POST-INSERTER: dead %6:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
29+
; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
30+
; POST-INSERTER: dead %7:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
31+
; POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
3132
; POST-INSERTER: dead %8:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
32-
; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
33-
; POST-INSERTER: %6:vr = IMPLICIT_DEF
33+
; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, -1, implicit $vl, implicit $vtype
3434
; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
35-
; POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
36-
; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
37-
; POST-INSERTER: %7:vr = PseudoVADD_VV_M1 killed %3, killed %5, $noreg, -1, implicit $vl, implicit $vtype
38-
; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
39-
; POST-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
35+
; POST-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)

0 commit comments

Comments
 (0)