Skip to content

Commit f1fd5c9

Browse files
committed
[RISCV] Remove pseudos for whole register load, store, and move.
The MC layer instructions have the correct register classes, and the pseudos don't have any additional operands. So there doesn't seem to be any reason for them to exist. The pseudos were incorrectly going through code in RISCVMCInstLower that converted LMUL>1 register classes to LMUL1 register class. This makes the MCInst technically malformed, and prevented the vl2r.v, vl4r.v, and vl8r.v InstAliases from matching. This accounts for all of the .ll test diffs. Differential Revision: https://reviews.llvm.org/D139511
1 parent 50daddf commit f1fd5c9

File tree

81 files changed

+586
-610
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

81 files changed

+586
-610
lines changed

llvm/lib/Target/RISCV/RISCVInstrInfo.cpp

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -301,69 +301,69 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
301301
Opc = RISCV::FSGNJ_D;
302302
IsScalableVector = false;
303303
} else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) {
304-
Opc = RISCV::PseudoVMV1R_V;
304+
Opc = RISCV::VMV1R_V;
305305
LMul = RISCVII::LMUL_1;
306306
} else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) {
307-
Opc = RISCV::PseudoVMV2R_V;
307+
Opc = RISCV::VMV2R_V;
308308
LMul = RISCVII::LMUL_2;
309309
} else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) {
310-
Opc = RISCV::PseudoVMV4R_V;
310+
Opc = RISCV::VMV4R_V;
311311
LMul = RISCVII::LMUL_4;
312312
} else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) {
313-
Opc = RISCV::PseudoVMV8R_V;
313+
Opc = RISCV::VMV8R_V;
314314
LMul = RISCVII::LMUL_8;
315315
} else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) {
316-
Opc = RISCV::PseudoVMV1R_V;
316+
Opc = RISCV::VMV1R_V;
317317
SubRegIdx = RISCV::sub_vrm1_0;
318318
NF = 2;
319319
LMul = RISCVII::LMUL_1;
320320
} else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) {
321-
Opc = RISCV::PseudoVMV2R_V;
321+
Opc = RISCV::VMV2R_V;
322322
SubRegIdx = RISCV::sub_vrm2_0;
323323
NF = 2;
324324
LMul = RISCVII::LMUL_2;
325325
} else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) {
326-
Opc = RISCV::PseudoVMV4R_V;
326+
Opc = RISCV::VMV4R_V;
327327
SubRegIdx = RISCV::sub_vrm4_0;
328328
NF = 2;
329329
LMul = RISCVII::LMUL_4;
330330
} else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) {
331-
Opc = RISCV::PseudoVMV1R_V;
331+
Opc = RISCV::VMV1R_V;
332332
SubRegIdx = RISCV::sub_vrm1_0;
333333
NF = 3;
334334
LMul = RISCVII::LMUL_1;
335335
} else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) {
336-
Opc = RISCV::PseudoVMV2R_V;
336+
Opc = RISCV::VMV2R_V;
337337
SubRegIdx = RISCV::sub_vrm2_0;
338338
NF = 3;
339339
LMul = RISCVII::LMUL_2;
340340
} else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) {
341-
Opc = RISCV::PseudoVMV1R_V;
341+
Opc = RISCV::VMV1R_V;
342342
SubRegIdx = RISCV::sub_vrm1_0;
343343
NF = 4;
344344
LMul = RISCVII::LMUL_1;
345345
} else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) {
346-
Opc = RISCV::PseudoVMV2R_V;
346+
Opc = RISCV::VMV2R_V;
347347
SubRegIdx = RISCV::sub_vrm2_0;
348348
NF = 4;
349349
LMul = RISCVII::LMUL_2;
350350
} else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) {
351-
Opc = RISCV::PseudoVMV1R_V;
351+
Opc = RISCV::VMV1R_V;
352352
SubRegIdx = RISCV::sub_vrm1_0;
353353
NF = 5;
354354
LMul = RISCVII::LMUL_1;
355355
} else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) {
356-
Opc = RISCV::PseudoVMV1R_V;
356+
Opc = RISCV::VMV1R_V;
357357
SubRegIdx = RISCV::sub_vrm1_0;
358358
NF = 6;
359359
LMul = RISCVII::LMUL_1;
360360
} else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) {
361-
Opc = RISCV::PseudoVMV1R_V;
361+
Opc = RISCV::VMV1R_V;
362362
SubRegIdx = RISCV::sub_vrm1_0;
363363
NF = 7;
364364
LMul = RISCVII::LMUL_1;
365365
} else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) {
366-
Opc = RISCV::PseudoVMV1R_V;
366+
Opc = RISCV::VMV1R_V;
367367
SubRegIdx = RISCV::sub_vrm1_0;
368368
NF = 8;
369369
LMul = RISCVII::LMUL_1;
@@ -488,13 +488,13 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
488488
Opcode = RISCV::FSD;
489489
IsScalableVector = false;
490490
} else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
491-
Opcode = RISCV::PseudoVSPILL_M1;
491+
Opcode = RISCV::VS1R_V;
492492
} else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
493-
Opcode = RISCV::PseudoVSPILL_M2;
493+
Opcode = RISCV::VS2R_V;
494494
} else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
495-
Opcode = RISCV::PseudoVSPILL_M4;
495+
Opcode = RISCV::VS4R_V;
496496
} else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
497-
Opcode = RISCV::PseudoVSPILL_M8;
497+
Opcode = RISCV::VS8R_V;
498498
} else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
499499
Opcode = RISCV::PseudoVSPILL2_M1;
500500
else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
@@ -571,13 +571,13 @@ void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
571571
Opcode = RISCV::FLD;
572572
IsScalableVector = false;
573573
} else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
574-
Opcode = RISCV::PseudoVRELOAD_M1;
574+
Opcode = RISCV::VL1RE8_V;
575575
} else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
576-
Opcode = RISCV::PseudoVRELOAD_M2;
576+
Opcode = RISCV::VL2RE8_V;
577577
} else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
578-
Opcode = RISCV::PseudoVRELOAD_M4;
578+
Opcode = RISCV::VL4RE8_V;
579579
} else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
580-
Opcode = RISCV::PseudoVRELOAD_M8;
580+
Opcode = RISCV::VL8RE8_V;
581581
} else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
582582
Opcode = RISCV::PseudoVRELOAD2_M1;
583583
else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))

llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -5007,16 +5007,6 @@ let Predicates = [HasVInstructions] in {
50075007
//===----------------------------------------------------------------------===//
50085008
// Pseudo Instructions for CodeGen
50095009
//===----------------------------------------------------------------------===//
5010-
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
5011-
def PseudoVMV1R_V : VPseudo<VMV1R_V, V_M1, (outs VR:$vd), (ins VR:$vs2)>,
5012-
VMVRSched<1>;
5013-
def PseudoVMV2R_V : VPseudo<VMV2R_V, V_M2, (outs VRM2:$vd), (ins VRM2:$vs2)>,
5014-
VMVRSched<2>;
5015-
def PseudoVMV4R_V : VPseudo<VMV4R_V, V_M4, (outs VRM4:$vd), (ins VRM4:$vs2)>,
5016-
VMVRSched<4>;
5017-
def PseudoVMV8R_V : VPseudo<VMV8R_V, V_M8, (outs VRM8:$vd), (ins VRM8:$vs2)>,
5018-
VMVRSched<8>;
5019-
}
50205010

50215011
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
50225012
def PseudoReadVLENB : Pseudo<(outs GPR:$rd), (ins),
@@ -5028,20 +5018,6 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
50285018
Uses = [VL] in
50295019
def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>;
50305020

5031-
let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in {
5032-
def PseudoVSPILL_M1 : VPseudo<VS1R_V, V_M1, (outs), (ins VR:$rs1, GPR:$rs2)>;
5033-
def PseudoVSPILL_M2 : VPseudo<VS2R_V, V_M2, (outs), (ins VRM2:$rs1, GPR:$rs2)>;
5034-
def PseudoVSPILL_M4 : VPseudo<VS4R_V, V_M4, (outs), (ins VRM4:$rs1, GPR:$rs2)>;
5035-
def PseudoVSPILL_M8 : VPseudo<VS8R_V, V_M8, (outs), (ins VRM8:$rs1, GPR:$rs2)>;
5036-
}
5037-
5038-
let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 1 in {
5039-
def PseudoVRELOAD_M1 : VPseudo<VL1RE8_V, V_M1, (outs VR:$rs1), (ins GPR:$rs2)>;
5040-
def PseudoVRELOAD_M2 : VPseudo<VL2RE8_V, V_M2, (outs VRM2:$rs1), (ins GPR:$rs2)>;
5041-
def PseudoVRELOAD_M4 : VPseudo<VL4RE8_V, V_M4, (outs VRM4:$rs1), (ins GPR:$rs2)>;
5042-
def PseudoVRELOAD_M8 : VPseudo<VL8RE8_V, V_M8, (outs VRM8:$rs1), (ins GPR:$rs2)>;
5043-
}
5044-
50455021
foreach lmul = MxList in {
50465022
foreach nf = NFSet<lmul>.L in {
50475023
defvar vreg = SegRegClass<lmul, nf>.RC;

llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ define void @last_chance_recoloring_failure() {
6464
; CHECK-NEXT: slli a1, a1, 4
6565
; CHECK-NEXT: add a1, sp, a1
6666
; CHECK-NEXT: addi a1, a1, 16
67-
; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload
67+
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
6868
; CHECK-NEXT: vfwsub.wv v16, v8, v24
6969
; CHECK-NEXT: addi a1, sp, 16
7070
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -75,9 +75,9 @@ define void @last_chance_recoloring_failure() {
7575
; CHECK-NEXT: slli a0, a0, 4
7676
; CHECK-NEXT: add a0, sp, a0
7777
; CHECK-NEXT: addi a0, a0, 16
78-
; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
78+
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
7979
; CHECK-NEXT: addi a0, sp, 16
80-
; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload
80+
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
8181
; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t
8282
; CHECK-NEXT: vse32.v v8, (a0)
8383
; CHECK-NEXT: csrr a0, vlenb
@@ -135,7 +135,7 @@ define void @last_chance_recoloring_failure() {
135135
; SUBREGLIVENESS-NEXT: add a1, a1, a2
136136
; SUBREGLIVENESS-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
137137
; SUBREGLIVENESS-NEXT: addi a1, sp, 16
138-
; SUBREGLIVENESS-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload
138+
; SUBREGLIVENESS-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
139139
; SUBREGLIVENESS-NEXT: vfwsub.wv v8, v24, v20
140140
; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, tu, mu
141141
; SUBREGLIVENESS-NEXT: vssubu.vv v16, v16, v8, v0.t

llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1392,7 +1392,7 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
13921392
; RV32-NEXT: vand.vv v16, v16, v24
13931393
; RV32-NEXT: vor.vv v16, v16, v0
13941394
; RV32-NEXT: addi a5, sp, 16
1395-
; RV32-NEXT: vl8re8.v v0, (a5) # Unknown-size Folded Reload
1395+
; RV32-NEXT: vl8r.v v0, (a5) # Unknown-size Folded Reload
13961396
; RV32-NEXT: vor.vv v16, v16, v0
13971397
; RV32-NEXT: addi a5, sp, 16
13981398
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
@@ -1408,7 +1408,7 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
14081408
; RV32-NEXT: vlse64.v v16, (a3), zero
14091409
; RV32-NEXT: vor.vv v8, v0, v8
14101410
; RV32-NEXT: addi a0, sp, 16
1411-
; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
1411+
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
14121412
; RV32-NEXT: vor.vv v8, v8, v24
14131413
; RV32-NEXT: vsrl.vi v24, v8, 4
14141414
; RV32-NEXT: vand.vv v24, v24, v16

llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,7 @@ define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
528528
; RV32-NEXT: vand.vv v16, v16, v24
529529
; RV32-NEXT: vor.vv v16, v16, v0
530530
; RV32-NEXT: addi a4, sp, 16
531-
; RV32-NEXT: vl8re8.v v0, (a4) # Unknown-size Folded Reload
531+
; RV32-NEXT: vl8r.v v0, (a4) # Unknown-size Folded Reload
532532
; RV32-NEXT: vor.vv v16, v16, v0
533533
; RV32-NEXT: addi a4, sp, 16
534534
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
@@ -543,7 +543,7 @@ define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
543543
; RV32-NEXT: vor.vv v8, v8, v24
544544
; RV32-NEXT: vor.vv v8, v16, v8
545545
; RV32-NEXT: addi a0, sp, 16
546-
; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
546+
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
547547
; RV32-NEXT: vor.vv v8, v8, v16
548548
; RV32-NEXT: csrr a0, vlenb
549549
; RV32-NEXT: slli a0, a0, 3

llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1081,10 +1081,10 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
10811081
; RV32-NEXT: slli a0, a0, 4
10821082
; RV32-NEXT: add a0, sp, a0
10831083
; RV32-NEXT: addi a0, a0, 16
1084-
; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
1084+
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
10851085
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
10861086
; RV32-NEXT: addi a0, sp, 16
1087-
; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
1087+
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
10881088
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
10891089
; RV32-NEXT: csrr a0, vlenb
10901090
; RV32-NEXT: slli a0, a0, 4
@@ -1104,17 +1104,17 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
11041104
; RV32-NEXT: slli a0, a0, 3
11051105
; RV32-NEXT: add a0, sp, a0
11061106
; RV32-NEXT: addi a0, a0, 16
1107-
; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1107+
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
11081108
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
11091109
; RV32-NEXT: vor.vv v8, v8, v24, v0.t
11101110
; RV32-NEXT: addi a0, sp, 16
1111-
; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1111+
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
11121112
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
11131113
; RV32-NEXT: csrr a0, vlenb
11141114
; RV32-NEXT: slli a0, a0, 4
11151115
; RV32-NEXT: add a0, sp, a0
11161116
; RV32-NEXT: addi a0, a0, 16
1117-
; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1117+
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
11181118
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
11191119
; RV32-NEXT: csrr a0, vlenb
11201120
; RV32-NEXT: li a1, 24
@@ -1151,7 +1151,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
11511151
; RV64-NEXT: vsll.vx v16, v16, a4, v0.t
11521152
; RV64-NEXT: vor.vv v16, v24, v16, v0.t
11531153
; RV64-NEXT: addi a5, sp, 16
1154-
; RV64-NEXT: vl8re8.v v24, (a5) # Unknown-size Folded Reload
1154+
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
11551155
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
11561156
; RV64-NEXT: addi a5, sp, 16
11571157
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
@@ -1166,7 +1166,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
11661166
; RV64-NEXT: vor.vv v8, v8, v16, v0.t
11671167
; RV64-NEXT: vor.vv v8, v8, v24, v0.t
11681168
; RV64-NEXT: addi a0, sp, 16
1169-
; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1169+
; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
11701170
; RV64-NEXT: vor.vv v8, v16, v8, v0.t
11711171
; RV64-NEXT: csrr a0, vlenb
11721172
; RV64-NEXT: slli a0, a0, 3
@@ -1211,7 +1211,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
12111211
; RV32-NEXT: vsll.vi v24, v24, 8
12121212
; RV32-NEXT: vor.vv v24, v0, v24
12131213
; RV32-NEXT: addi a0, sp, 16
1214-
; RV32-NEXT: vl8re8.v v0, (a0) # Unknown-size Folded Reload
1214+
; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
12151215
; RV32-NEXT: vor.vv v24, v0, v24
12161216
; RV32-NEXT: addi a0, sp, 16
12171217
; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
@@ -1226,7 +1226,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
12261226
; RV32-NEXT: vor.vv v8, v16, v8
12271227
; RV32-NEXT: vor.vv v8, v8, v24
12281228
; RV32-NEXT: addi a0, sp, 16
1229-
; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1229+
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
12301230
; RV32-NEXT: vor.vv v8, v16, v8
12311231
; RV32-NEXT: csrr a0, vlenb
12321232
; RV32-NEXT: slli a0, a0, 3
@@ -1321,10 +1321,10 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
13211321
; RV32-NEXT: slli a0, a0, 4
13221322
; RV32-NEXT: add a0, sp, a0
13231323
; RV32-NEXT: addi a0, a0, 16
1324-
; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
1324+
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
13251325
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
13261326
; RV32-NEXT: addi a0, sp, 16
1327-
; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
1327+
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
13281328
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
13291329
; RV32-NEXT: csrr a0, vlenb
13301330
; RV32-NEXT: slli a0, a0, 4
@@ -1344,17 +1344,17 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
13441344
; RV32-NEXT: slli a0, a0, 3
13451345
; RV32-NEXT: add a0, sp, a0
13461346
; RV32-NEXT: addi a0, a0, 16
1347-
; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1347+
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
13481348
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
13491349
; RV32-NEXT: vor.vv v8, v8, v24, v0.t
13501350
; RV32-NEXT: addi a0, sp, 16
1351-
; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1351+
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
13521352
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
13531353
; RV32-NEXT: csrr a0, vlenb
13541354
; RV32-NEXT: slli a0, a0, 4
13551355
; RV32-NEXT: add a0, sp, a0
13561356
; RV32-NEXT: addi a0, a0, 16
1357-
; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1357+
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
13581358
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
13591359
; RV32-NEXT: csrr a0, vlenb
13601360
; RV32-NEXT: li a1, 24
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
13911391
; RV64-NEXT: vsll.vx v16, v16, a4, v0.t
13921392
; RV64-NEXT: vor.vv v16, v24, v16, v0.t
13931393
; RV64-NEXT: addi a5, sp, 16
1394-
; RV64-NEXT: vl8re8.v v24, (a5) # Unknown-size Folded Reload
1394+
; RV64-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
13951395
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
13961396
; RV64-NEXT: addi a5, sp, 16
13971397
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
@@ -1406,7 +1406,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
14061406
; RV64-NEXT: vor.vv v8, v8, v16, v0.t
14071407
; RV64-NEXT: vor.vv v8, v8, v24, v0.t
14081408
; RV64-NEXT: addi a0, sp, 16
1409-
; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1409+
; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
14101410
; RV64-NEXT: vor.vv v8, v16, v8, v0.t
14111411
; RV64-NEXT: csrr a0, vlenb
14121412
; RV64-NEXT: slli a0, a0, 3
@@ -1451,7 +1451,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
14511451
; RV32-NEXT: vsll.vi v24, v24, 8
14521452
; RV32-NEXT: vor.vv v24, v0, v24
14531453
; RV32-NEXT: addi a0, sp, 16
1454-
; RV32-NEXT: vl8re8.v v0, (a0) # Unknown-size Folded Reload
1454+
; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
14551455
; RV32-NEXT: vor.vv v24, v0, v24
14561456
; RV32-NEXT: addi a0, sp, 16
14571457
; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
@@ -1466,7 +1466,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
14661466
; RV32-NEXT: vor.vv v8, v16, v8
14671467
; RV32-NEXT: vor.vv v8, v8, v24
14681468
; RV32-NEXT: addi a0, sp, 16
1469-
; RV32-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1469+
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
14701470
; RV32-NEXT: vor.vv v8, v16, v8
14711471
; RV32-NEXT: csrr a0, vlenb
14721472
; RV32-NEXT: slli a0, a0, 3
@@ -1555,12 +1555,12 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
15551555
; CHECK-NEXT: slli a0, a0, 3
15561556
; CHECK-NEXT: add a0, sp, a0
15571557
; CHECK-NEXT: addi a0, a0, 16
1558-
; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload
1558+
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
15591559
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
15601560
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
15611561
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
15621562
; CHECK-NEXT: addi a0, sp, 16
1563-
; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
1563+
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
15641564
; CHECK-NEXT: csrr a0, vlenb
15651565
; CHECK-NEXT: slli a0, a0, 4
15661566
; CHECK-NEXT: add sp, sp, a0

0 commit comments

Comments
 (0)