Skip to content

Commit 2b8f82b

Browse files
authored
[RISCV] Remove implicit $vl def on vleNff pseudos (#143935)
In #90049 we removed the side effect flag on the vleNff pseudos with the reasoning that we modelled the effect of setting vl as an output operand. This extends this further by removing the implicit def on vl, inserting it back in RISCVInsertVSETVLI when we also emit the PseudoReadVL. The motiviation for this is to make it easier to handle vleff in more places in RISCVVectorPeephole in a follow up patch, which in turn will make migrating the last vmerge peephole over from RISCVISelDAGToDAG easier. Some of these tests claim that the vleff shouldn't be deleted when none of its values are used, but these are from the initial commit in 3b5430e. I'm not sure if these still hold today? This also moves the fault-only-first predicate to RISCVInstrPredicates.td since we can't rely on the implicit vl operand anymore.
1 parent 2ee8fdb commit 2b8f82b

File tree

10 files changed

+17
-35
lines changed

10 files changed

+17
-35
lines changed

llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1101,7 +1101,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
11011101
if (RISCVII::hasRoundModeOp(TSFlags))
11021102
--NumOps;
11031103

1104-
bool hasVLOutput = RISCV::isFaultFirstLoad(*MI);
1104+
bool hasVLOutput = RISCVInstrInfo::isFaultOnlyFirstLoad(*MI);
11051105
for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) {
11061106
const MachineOperand &MO = MI->getOperand(OpNo);
11071107
// Skip vl output. It should be the second output.

llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1253,7 +1253,7 @@ void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &Info,
12531253
return;
12541254
}
12551255

1256-
if (RISCV::isFaultFirstLoad(MI)) {
1256+
if (RISCVInstrInfo::isFaultOnlyFirstLoad(MI)) {
12571257
// Update AVL to vl-output of the fault first load.
12581258
assert(MI.getOperand(1).getReg().isVirtual());
12591259
if (LIS) {
@@ -1756,7 +1756,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
17561756
void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) {
17571757
for (auto I = MBB.begin(), E = MBB.end(); I != E;) {
17581758
MachineInstr &MI = *I++;
1759-
if (RISCV::isFaultFirstLoad(MI)) {
1759+
if (RISCVInstrInfo::isFaultOnlyFirstLoad(MI)) {
17601760
Register VLOutput = MI.getOperand(1).getReg();
17611761
assert(VLOutput.isVirtual());
17621762
if (!MI.getOperand(1).isDead()) {
@@ -1774,6 +1774,7 @@ void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) {
17741774
}
17751775
// We don't use the vl output of the VLEFF/VLSEGFF anymore.
17761776
MI.getOperand(1).setReg(RISCV::X0);
1777+
MI.addRegisterDefined(RISCV::VL, MRI->getTargetRegisterInfo());
17771778
}
17781779
}
17791780
}

llvm/lib/Target/RISCV/RISCVInstrInfo.cpp

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4568,11 +4568,6 @@ RISCV::isRVVSpillForZvlsseg(unsigned Opcode) {
45684568
}
45694569
}
45704570

4571-
bool RISCV::isFaultFirstLoad(const MachineInstr &MI) {
4572-
return MI.getNumExplicitDefs() == 2 &&
4573-
MI.modifiesRegister(RISCV::VL, /*TRI=*/nullptr) && !MI.isInlineAsm();
4574-
}
4575-
45764571
bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) {
45774572
int16_t MI1FrmOpIdx =
45784573
RISCV::getNamedOperandIdx(MI1.getOpcode(), RISCV::OpName::frm);

llvm/lib/Target/RISCV/RISCVInstrInfo.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -357,8 +357,6 @@ bool isRVVSpill(const MachineInstr &MI);
357357
std::optional<std::pair<unsigned, unsigned>>
358358
isRVVSpillForZvlsseg(unsigned Opcode);
359359

360-
bool isFaultFirstLoad(const MachineInstr &MI);
361-
362360
// Return true if both input instructions have equal rounding mode. If at least
363361
// one of the instructions does not have rounding mode, false will be returned.
364362
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2);

llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6144,8 +6144,6 @@ defm PseudoVSUX : VPseudoIStore<Ordered=false>;
61446144
// 7.7. Unit-stride Fault-Only-First Loads
61456145
//===----------------------------------------------------------------------===//
61466146

6147-
// vleff may update VL register
6148-
let Defs = [VL] in
61496147
defm PseudoVL : VPseudoFFLoad;
61506148

61516149
//===----------------------------------------------------------------------===//
@@ -6159,11 +6157,7 @@ defm PseudoVSSEG : VPseudoUSSegStore;
61596157
defm PseudoVSSSEG : VPseudoSSegStore;
61606158
defm PseudoVSOXSEG : VPseudoISegStore<Ordered=true>;
61616159
defm PseudoVSUXSEG : VPseudoISegStore<Ordered=false>;
6162-
6163-
// vlseg<nf>e<eew>ff.v may update VL register
6164-
let Defs = [VL] in {
61656160
defm PseudoVLSEG : VPseudoUSSegLoadFF;
6166-
}
61676161

61686162
//===----------------------------------------------------------------------===//
61696163
// 11. Vector Integer Arithmetic Instructions

llvm/lib/Target/RISCV/RISCVInstrPredicates.td

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,13 @@ def isVSlideInstr
129129
!instances<Pseudo>("^PseudoVSLIDEUP_VI.*")
130130
])>>>;
131131

132+
def isFaultOnlyFirstLoad
133+
: TIIPredicate<"isFaultOnlyFirstLoad",
134+
MCReturnStatement<
135+
CheckOpcode<
136+
!instances<Pseudo>(
137+
"^PseudoVL(SEG[2-8])?E(8|16|32|64)FF_V.*")>>>;
138+
132139
def isNonZeroLoadImmediate
133140
: TIIPredicate<"isNonZeroLoadImmediate",
134141
MCReturnStatement<CheckAll<[

llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ define i64 @test_vleff_nxv8i8(ptr %p, i64 %vl) {
1414
; CHECK-NEXT: {{ $}}
1515
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
1616
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
17-
; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
17+
; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 1)
1818
; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
1919
; CHECK-NEXT: PseudoRET implicit $x10
2020
entry:
@@ -31,7 +31,7 @@ define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %passthru, ptr %p, i64 %vl) {
3131
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
3232
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
3333
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
34-
; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
34+
; CHECK-NEXT: [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 1)
3535
; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
3636
; CHECK-NEXT: PseudoRET implicit $x10
3737
entry:
@@ -50,7 +50,7 @@ define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, ptr %p, <vscale
5050
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
5151
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
5252
; CHECK-NEXT: [[COPY4:%[0-9]+]]:vmv0 = COPY [[COPY1]]
53-
; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
53+
; CHECK-NEXT: [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 0 /* tu, mu */ :: (load unknown-size from %ir.p, align 1)
5454
; CHECK-NEXT: $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]]
5555
; CHECK-NEXT: PseudoRET implicit $x10
5656
entry:
@@ -66,7 +66,7 @@ define i64 @test_vlseg2ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
6666
; CHECK-NEXT: {{ $}}
6767
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
6868
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
69-
; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
69+
; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */ :: (load unknown-size from %ir.base, align 1)
7070
; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
7171
; CHECK-NEXT: PseudoRET implicit $x10
7272
entry:
@@ -83,7 +83,7 @@ define i64 @test_vlseg2ff_nxv8i8_tu(target("riscv.vector.tuple", <vscale x 8 x i
8383
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
8484
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
8585
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY $v8_v9
86-
; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
86+
; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */ :: (load unknown-size from %ir.base, align 1)
8787
; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
8888
; CHECK-NEXT: PseudoRET implicit $x10
8989
entry:
@@ -102,7 +102,7 @@ define i64 @test_vlseg2ff_nxv8i8_mask(target("riscv.vector.tuple", <vscale x 8 x
102102
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
103103
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrn2m1nov0 = COPY $v8_v9
104104
; CHECK-NEXT: [[COPY4:%[0-9]+]]:vmv0 = COPY [[COPY1]]
105-
; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
105+
; CHECK-NEXT: [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[COPY3]], [[COPY2]], [[COPY4]], [[COPY]], 3 /* e8 */, 0 /* tu, mu */ :: (load unknown-size from %ir.base, align 1)
106106
; CHECK-NEXT: $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]]
107107
; CHECK-NEXT: PseudoRET implicit $x10
108108
entry:

llvm/test/CodeGen/RISCV/rvv/vleff.ll

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3016,12 +3016,9 @@ entry:
30163016
ret void
30173017
}
30183018

3019-
; Test with both outputs dead. Make sure the vleff isn't deleted.
30203019
define void @intrinsic_vleff_dead_all(ptr %0, iXLen %1, ptr %2) nounwind {
30213020
; CHECK-LABEL: intrinsic_vleff_dead_all:
30223021
; CHECK: # %bb.0: # %entry
3023-
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3024-
; CHECK-NEXT: vle64ff.v v8, (a0)
30253022
; CHECK-NEXT: ret
30263023
entry:
30273024
%a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
@@ -3034,8 +3031,6 @@ entry:
30343031
define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
30353032
; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
30363033
; CHECK: # %bb.0: # %entry
3037-
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3038-
; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
30393034
; CHECK-NEXT: ret
30403035
entry:
30413036
%a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(

llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,6 @@ entry:
6666
define void @test_vlseg2ff_dead_all(ptr %base, i32 %vl) {
6767
; CHECK-LABEL: test_vlseg2ff_dead_all:
6868
; CHECK: # %bb.0: # %entry
69-
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
70-
; CHECK-NEXT: vlseg2e16ff.v v8, (a0)
7169
; CHECK-NEXT: ret
7270
entry:
7371
tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) undef, ptr %base, i32 %vl, i32 4)
@@ -77,8 +75,6 @@ entry:
7775
define void @test_vlseg2ff_mask_dead_all(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask) {
7876
; CHECK-LABEL: test_vlseg2ff_mask_dead_all:
7977
; CHECK: # %bb.0: # %entry
80-
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
81-
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
8278
; CHECK-NEXT: ret
8379
entry:
8480
tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i32} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1, i32 4)

llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,6 @@ entry:
6666
define void @test_vlseg2ff_dead_all(ptr %base, i64 %vl) {
6767
; CHECK-LABEL: test_vlseg2ff_dead_all:
6868
; CHECK: # %bb.0: # %entry
69-
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
70-
; CHECK-NEXT: vlseg2e16ff.v v8, (a0)
7169
; CHECK-NEXT: ret
7270
entry:
7371
tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) undef, ptr %base, i64 %vl, i64 4)
@@ -77,8 +75,6 @@ entry:
7775
define void @test_vlseg2ff_mask_dead_all(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
7876
; CHECK-LABEL: test_vlseg2ff_mask_dead_all:
7977
; CHECK: # %bb.0: # %entry
80-
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
81-
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
8278
; CHECK-NEXT: ret
8379
entry:
8480
tail call {target("riscv.vector.tuple", <vscale x 32 x i8>, 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv32i8_2t.nxv16i1(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1, i64 4)

0 commit comments

Comments
 (0)