Skip to content

Commit 602efe5

Browse files
committed
[CodeGen] Clear InitUndef pass new register cache between pass runs
Multiple invocations of the pass could interfere with eachother, preventing some undefs being initialised.
1 parent 7cdd53d commit 602efe5

File tree

5 files changed

+91
-91
lines changed

5 files changed

+91
-91
lines changed

llvm/lib/CodeGen/InitUndef.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,7 @@ bool InitUndef::runOnMachineFunction(MachineFunction &MF) {
272272
for (auto *DeadMI : DeadInsts)
273273
DeadMI->eraseFromParent();
274274
DeadInsts.clear();
275+
NewRegs.clear();
275276

276277
return Changed;
277278
}

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -567,16 +567,15 @@ define <8 x i32> @add_constant_rhs_8xi32_partial(<8 x i32> %vin, i32 %a, i32 %b,
567567
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
568568
; CHECK-NEXT: vslideup.vi v8, v10, 5
569569
; CHECK-NEXT: vmv.s.x v10, a2
570-
; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
571-
; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
572-
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
573-
; CHECK-NEXT: vle32.v v12, (a0)
574570
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
575571
; CHECK-NEXT: vslideup.vi v8, v10, 6
576-
; CHECK-NEXT: vmv.s.x v10, a3
572+
; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
573+
; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
577574
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
578-
; CHECK-NEXT: vslideup.vi v8, v10, 7
579-
; CHECK-NEXT: vadd.vv v8, v8, v12
575+
; CHECK-NEXT: vle32.v v10, (a0)
576+
; CHECK-NEXT: vmv.s.x v12, a3
577+
; CHECK-NEXT: vslideup.vi v8, v12, 7
578+
; CHECK-NEXT: vadd.vv v8, v8, v10
580579
; CHECK-NEXT: ret
581580
%vadd = add <8 x i32> %vin, <i32 1, i32 2, i32 3, i32 5, i32 undef, i32 undef, i32 undef, i32 undef>
582581
%e0 = add i32 %a, 23

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3456,17 +3456,17 @@ define void @mulhu_v4i64(ptr %x) {
34563456
; RV64: # %bb.0:
34573457
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
34583458
; RV64-NEXT: vle64.v v8, (a0)
3459-
; RV64-NEXT: lui a1, %hi(.LCPI184_0)
3460-
; RV64-NEXT: addi a1, a1, %lo(.LCPI184_0)
3461-
; RV64-NEXT: vle64.v v10, (a1)
34623459
; RV64-NEXT: li a1, -1
34633460
; RV64-NEXT: slli a1, a1, 63
3464-
; RV64-NEXT: vmv.s.x v12, a1
3461+
; RV64-NEXT: vmv.s.x v10, a1
3462+
; RV64-NEXT: lui a1, %hi(.LCPI184_0)
3463+
; RV64-NEXT: addi a1, a1, %lo(.LCPI184_0)
3464+
; RV64-NEXT: vle64.v v12, (a1)
34653465
; RV64-NEXT: vmv.v.i v14, 0
34663466
; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
3467-
; RV64-NEXT: vslideup.vi v14, v12, 2
3467+
; RV64-NEXT: vslideup.vi v14, v10, 2
34683468
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
3469-
; RV64-NEXT: vmulhu.vv v10, v8, v10
3469+
; RV64-NEXT: vmulhu.vv v10, v8, v12
34703470
; RV64-NEXT: vsub.vv v8, v8, v10
34713471
; RV64-NEXT: vmulhu.vv v8, v8, v14
34723472
; RV64-NEXT: vadd.vv v8, v8, v10

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll

Lines changed: 48 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -2474,9 +2474,9 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
24742474
; RV64ZVE32F-NEXT: add a2, a0, a2
24752475
; RV64ZVE32F-NEXT: lw a2, 0(a2)
24762476
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
2477-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2477+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
24782478
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
2479-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
2479+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
24802480
; RV64ZVE32F-NEXT: .LBB35_9: # %else14
24812481
; RV64ZVE32F-NEXT: andi a2, a1, 64
24822482
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -2519,8 +2519,8 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
25192519
; RV64ZVE32F-NEXT: add a2, a0, a2
25202520
; RV64ZVE32F-NEXT: lw a2, 0(a2)
25212521
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
2522-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2523-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
2522+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
2523+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
25242524
; RV64ZVE32F-NEXT: andi a2, a1, 32
25252525
; RV64ZVE32F-NEXT: bnez a2, .LBB35_8
25262526
; RV64ZVE32F-NEXT: j .LBB35_9
@@ -2624,9 +2624,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
26242624
; RV64ZVE32F-NEXT: add a2, a0, a2
26252625
; RV64ZVE32F-NEXT: lw a2, 0(a2)
26262626
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
2627-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2627+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
26282628
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
2629-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
2629+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
26302630
; RV64ZVE32F-NEXT: .LBB36_9: # %else14
26312631
; RV64ZVE32F-NEXT: andi a2, a1, 64
26322632
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -2669,8 +2669,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
26692669
; RV64ZVE32F-NEXT: add a2, a0, a2
26702670
; RV64ZVE32F-NEXT: lw a2, 0(a2)
26712671
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
2672-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2673-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
2672+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
2673+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
26742674
; RV64ZVE32F-NEXT: andi a2, a1, 32
26752675
; RV64ZVE32F-NEXT: bnez a2, .LBB36_8
26762676
; RV64ZVE32F-NEXT: j .LBB36_9
@@ -2779,9 +2779,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
27792779
; RV64ZVE32F-NEXT: add a2, a0, a2
27802780
; RV64ZVE32F-NEXT: lw a2, 0(a2)
27812781
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
2782-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2782+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
27832783
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
2784-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
2784+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
27852785
; RV64ZVE32F-NEXT: .LBB37_9: # %else14
27862786
; RV64ZVE32F-NEXT: andi a2, a1, 64
27872787
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -2827,8 +2827,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
28272827
; RV64ZVE32F-NEXT: add a2, a0, a2
28282828
; RV64ZVE32F-NEXT: lw a2, 0(a2)
28292829
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
2830-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2831-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
2830+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
2831+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
28322832
; RV64ZVE32F-NEXT: andi a2, a1, 32
28332833
; RV64ZVE32F-NEXT: bnez a2, .LBB37_8
28342834
; RV64ZVE32F-NEXT: j .LBB37_9
@@ -2936,9 +2936,9 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
29362936
; RV64ZVE32F-NEXT: add a2, a0, a2
29372937
; RV64ZVE32F-NEXT: lw a2, 0(a2)
29382938
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
2939-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2939+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
29402940
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
2941-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
2941+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
29422942
; RV64ZVE32F-NEXT: .LBB38_9: # %else14
29432943
; RV64ZVE32F-NEXT: andi a2, a1, 64
29442944
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -2981,8 +2981,8 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
29812981
; RV64ZVE32F-NEXT: add a2, a0, a2
29822982
; RV64ZVE32F-NEXT: lw a2, 0(a2)
29832983
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
2984-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2985-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
2984+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
2985+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
29862986
; RV64ZVE32F-NEXT: andi a2, a1, 32
29872987
; RV64ZVE32F-NEXT: bnez a2, .LBB38_8
29882988
; RV64ZVE32F-NEXT: j .LBB38_9
@@ -3087,9 +3087,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
30873087
; RV64ZVE32F-NEXT: add a2, a0, a2
30883088
; RV64ZVE32F-NEXT: lw a2, 0(a2)
30893089
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
3090-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
3090+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
30913091
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
3092-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
3092+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
30933093
; RV64ZVE32F-NEXT: .LBB39_9: # %else14
30943094
; RV64ZVE32F-NEXT: andi a2, a1, 64
30953095
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -3132,8 +3132,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
31323132
; RV64ZVE32F-NEXT: add a2, a0, a2
31333133
; RV64ZVE32F-NEXT: lw a2, 0(a2)
31343134
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
3135-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
3136-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
3135+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
3136+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
31373137
; RV64ZVE32F-NEXT: andi a2, a1, 32
31383138
; RV64ZVE32F-NEXT: bnez a2, .LBB39_8
31393139
; RV64ZVE32F-NEXT: j .LBB39_9
@@ -3243,9 +3243,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
32433243
; RV64ZVE32F-NEXT: add a3, a0, a3
32443244
; RV64ZVE32F-NEXT: lw a3, 0(a3)
32453245
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
3246-
; RV64ZVE32F-NEXT: vmv.s.x v8, a3
3246+
; RV64ZVE32F-NEXT: vmv.s.x v12, a3
32473247
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
3248-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
3248+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
32493249
; RV64ZVE32F-NEXT: .LBB40_9: # %else14
32503250
; RV64ZVE32F-NEXT: andi a3, a2, 64
32513251
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -3291,8 +3291,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
32913291
; RV64ZVE32F-NEXT: add a3, a0, a3
32923292
; RV64ZVE32F-NEXT: lw a3, 0(a3)
32933293
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
3294-
; RV64ZVE32F-NEXT: vmv.s.x v8, a3
3295-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
3294+
; RV64ZVE32F-NEXT: vmv.s.x v12, a3
3295+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
32963296
; RV64ZVE32F-NEXT: andi a3, a2, 32
32973297
; RV64ZVE32F-NEXT: bnez a3, .LBB40_8
32983298
; RV64ZVE32F-NEXT: j .LBB40_9
@@ -8155,9 +8155,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
81558155
; RV64ZVE32F-NEXT: add a2, a0, a2
81568156
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
81578157
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8158-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8158+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
81598159
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8160-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8160+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
81618161
; RV64ZVE32F-NEXT: .LBB74_9: # %else14
81628162
; RV64ZVE32F-NEXT: andi a2, a1, 64
81638163
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -8200,8 +8200,8 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
82008200
; RV64ZVE32F-NEXT: add a2, a0, a2
82018201
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
82028202
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8203-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8204-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8203+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
8204+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
82058205
; RV64ZVE32F-NEXT: andi a2, a1, 32
82068206
; RV64ZVE32F-NEXT: bnez a2, .LBB74_8
82078207
; RV64ZVE32F-NEXT: j .LBB74_9
@@ -8305,9 +8305,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
83058305
; RV64ZVE32F-NEXT: add a2, a0, a2
83068306
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
83078307
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8308-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8308+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
83098309
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8310-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8310+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
83118311
; RV64ZVE32F-NEXT: .LBB75_9: # %else14
83128312
; RV64ZVE32F-NEXT: andi a2, a1, 64
83138313
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -8350,8 +8350,8 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
83508350
; RV64ZVE32F-NEXT: add a2, a0, a2
83518351
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
83528352
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8353-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8354-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8353+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
8354+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
83558355
; RV64ZVE32F-NEXT: andi a2, a1, 32
83568356
; RV64ZVE32F-NEXT: bnez a2, .LBB75_8
83578357
; RV64ZVE32F-NEXT: j .LBB75_9
@@ -8460,9 +8460,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
84608460
; RV64ZVE32F-NEXT: add a2, a0, a2
84618461
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
84628462
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8463-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8463+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
84648464
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8465-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8465+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
84668466
; RV64ZVE32F-NEXT: .LBB76_9: # %else14
84678467
; RV64ZVE32F-NEXT: andi a2, a1, 64
84688468
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -8508,8 +8508,8 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
85088508
; RV64ZVE32F-NEXT: add a2, a0, a2
85098509
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
85108510
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8511-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8512-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8511+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
8512+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
85138513
; RV64ZVE32F-NEXT: andi a2, a1, 32
85148514
; RV64ZVE32F-NEXT: bnez a2, .LBB76_8
85158515
; RV64ZVE32F-NEXT: j .LBB76_9
@@ -8617,9 +8617,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
86178617
; RV64ZVE32F-NEXT: add a2, a0, a2
86188618
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
86198619
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8620-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8620+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
86218621
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8622-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8622+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
86238623
; RV64ZVE32F-NEXT: .LBB77_9: # %else14
86248624
; RV64ZVE32F-NEXT: andi a2, a1, 64
86258625
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -8662,8 +8662,8 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
86628662
; RV64ZVE32F-NEXT: add a2, a0, a2
86638663
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
86648664
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8665-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8666-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8665+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
8666+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
86678667
; RV64ZVE32F-NEXT: andi a2, a1, 32
86688668
; RV64ZVE32F-NEXT: bnez a2, .LBB77_8
86698669
; RV64ZVE32F-NEXT: j .LBB77_9
@@ -8768,9 +8768,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
87688768
; RV64ZVE32F-NEXT: add a2, a0, a2
87698769
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
87708770
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8771-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8771+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
87728772
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8773-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8773+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
87748774
; RV64ZVE32F-NEXT: .LBB78_9: # %else14
87758775
; RV64ZVE32F-NEXT: andi a2, a1, 64
87768776
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -8813,8 +8813,8 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
88138813
; RV64ZVE32F-NEXT: add a2, a0, a2
88148814
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
88158815
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8816-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8817-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8816+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
8817+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
88188818
; RV64ZVE32F-NEXT: andi a2, a1, 32
88198819
; RV64ZVE32F-NEXT: bnez a2, .LBB78_8
88208820
; RV64ZVE32F-NEXT: j .LBB78_9
@@ -8924,9 +8924,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
89248924
; RV64ZVE32F-NEXT: add a3, a0, a3
89258925
; RV64ZVE32F-NEXT: flw fa5, 0(a3)
89268926
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8927-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8927+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
89288928
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8929-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8929+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
89308930
; RV64ZVE32F-NEXT: .LBB79_9: # %else14
89318931
; RV64ZVE32F-NEXT: andi a3, a2, 64
89328932
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -8972,8 +8972,8 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
89728972
; RV64ZVE32F-NEXT: add a3, a0, a3
89738973
; RV64ZVE32F-NEXT: flw fa5, 0(a3)
89748974
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8975-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8976-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8975+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
8976+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
89778977
; RV64ZVE32F-NEXT: andi a3, a2, 32
89788978
; RV64ZVE32F-NEXT: bnez a3, .LBB79_8
89798979
; RV64ZVE32F-NEXT: j .LBB79_9

0 commit comments

Comments
 (0)