@@ -1138,11 +1138,11 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8> %id
1138
1138
; RV64ZVE32F-NEXT: andi a2, a1, 16
1139
1139
; RV64ZVE32F-NEXT: beqz a2, .LBB18_7
1140
1140
; RV64ZVE32F-NEXT: .LBB18_14: # %cond.store7
1141
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
1141
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
1142
1142
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
1143
1143
; RV64ZVE32F-NEXT: slli a2, a2, 1
1144
1144
; RV64ZVE32F-NEXT: add a2, a0, a2
1145
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e16, m1, ta, ma
1145
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e16, m1, ta, ma
1146
1146
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
1147
1147
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
1148
1148
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -1271,11 +1271,11 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
1271
1271
; RV64ZVE32F-NEXT: andi a2, a1, 16
1272
1272
; RV64ZVE32F-NEXT: beqz a2, .LBB19_7
1273
1273
; RV64ZVE32F-NEXT: .LBB19_14: # %cond.store7
1274
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
1274
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
1275
1275
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
1276
1276
; RV64ZVE32F-NEXT: slli a2, a2, 1
1277
1277
; RV64ZVE32F-NEXT: add a2, a0, a2
1278
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e16, m1, ta, ma
1278
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e16, m1, ta, ma
1279
1279
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
1280
1280
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
1281
1281
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -1408,12 +1408,12 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
1408
1408
; RV64ZVE32F-NEXT: andi a2, a1, 16
1409
1409
; RV64ZVE32F-NEXT: beqz a2, .LBB20_7
1410
1410
; RV64ZVE32F-NEXT: .LBB20_14: # %cond.store7
1411
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
1411
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
1412
1412
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
1413
1413
; RV64ZVE32F-NEXT: andi a2, a2, 255
1414
1414
; RV64ZVE32F-NEXT: slli a2, a2, 1
1415
1415
; RV64ZVE32F-NEXT: add a2, a0, a2
1416
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e16, m1, ta, ma
1416
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e16, m1, ta, ma
1417
1417
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
1418
1418
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
1419
1419
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -2043,11 +2043,11 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
2043
2043
; RV64ZVE32F-NEXT: andi a2, a1, 16
2044
2044
; RV64ZVE32F-NEXT: beqz a2, .LBB29_7
2045
2045
; RV64ZVE32F-NEXT: .LBB29_14: # %cond.store7
2046
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
2046
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
2047
2047
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
2048
2048
; RV64ZVE32F-NEXT: slli a2, a2, 2
2049
2049
; RV64ZVE32F-NEXT: add a2, a0, a2
2050
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
2050
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
2051
2051
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
2052
2052
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
2053
2053
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -2175,11 +2175,11 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
2175
2175
; RV64ZVE32F-NEXT: andi a2, a1, 16
2176
2176
; RV64ZVE32F-NEXT: beqz a2, .LBB30_7
2177
2177
; RV64ZVE32F-NEXT: .LBB30_14: # %cond.store7
2178
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
2178
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
2179
2179
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
2180
2180
; RV64ZVE32F-NEXT: slli a2, a2, 2
2181
2181
; RV64ZVE32F-NEXT: add a2, a0, a2
2182
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
2182
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
2183
2183
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
2184
2184
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
2185
2185
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -2314,12 +2314,12 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
2314
2314
; RV64ZVE32F-NEXT: andi a2, a1, 16
2315
2315
; RV64ZVE32F-NEXT: beqz a2, .LBB31_7
2316
2316
; RV64ZVE32F-NEXT: .LBB31_14: # %cond.store7
2317
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
2317
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
2318
2318
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
2319
2319
; RV64ZVE32F-NEXT: andi a2, a2, 255
2320
2320
; RV64ZVE32F-NEXT: slli a2, a2, 2
2321
2321
; RV64ZVE32F-NEXT: add a2, a0, a2
2322
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
2322
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
2323
2323
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
2324
2324
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
2325
2325
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -2451,11 +2451,11 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
2451
2451
; RV64ZVE32F-NEXT: andi a2, a1, 16
2452
2452
; RV64ZVE32F-NEXT: beqz a2, .LBB32_7
2453
2453
; RV64ZVE32F-NEXT: .LBB32_14: # %cond.store7
2454
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2 , ta, ma
2454
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1 , ta, ma
2455
2455
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
2456
2456
; RV64ZVE32F-NEXT: slli a2, a2, 2
2457
2457
; RV64ZVE32F-NEXT: add a2, a0, a2
2458
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
2458
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
2459
2459
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
2460
2460
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
2461
2461
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -2584,11 +2584,11 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
2584
2584
; RV64ZVE32F-NEXT: andi a2, a1, 16
2585
2585
; RV64ZVE32F-NEXT: beqz a2, .LBB33_7
2586
2586
; RV64ZVE32F-NEXT: .LBB33_14: # %cond.store7
2587
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2 , ta, ma
2587
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1 , ta, ma
2588
2588
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
2589
2589
; RV64ZVE32F-NEXT: slli a2, a2, 2
2590
2590
; RV64ZVE32F-NEXT: add a2, a0, a2
2591
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
2591
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
2592
2592
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
2593
2593
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
2594
2594
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -2724,12 +2724,12 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
2724
2724
; RV64ZVE32F-NEXT: andi a3, a2, 16
2725
2725
; RV64ZVE32F-NEXT: beqz a3, .LBB34_7
2726
2726
; RV64ZVE32F-NEXT: .LBB34_14: # %cond.store7
2727
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2 , ta, ma
2727
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1 , ta, ma
2728
2728
; RV64ZVE32F-NEXT: vmv.x.s a3, v11
2729
2729
; RV64ZVE32F-NEXT: and a3, a3, a1
2730
2730
; RV64ZVE32F-NEXT: slli a3, a3, 2
2731
2731
; RV64ZVE32F-NEXT: add a3, a0, a3
2732
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
2732
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
2733
2733
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
2734
2734
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
2735
2735
; RV64ZVE32F-NEXT: andi a3, a2, 32
@@ -6393,11 +6393,11 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i8> %i
6393
6393
; RV64ZVE32F-NEXT: andi a2, a1, 16
6394
6394
; RV64ZVE32F-NEXT: beqz a2, .LBB58_7
6395
6395
; RV64ZVE32F-NEXT: .LBB58_14: # %cond.store7
6396
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
6396
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
6397
6397
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
6398
6398
; RV64ZVE32F-NEXT: slli a2, a2, 1
6399
6399
; RV64ZVE32F-NEXT: add a2, a0, a2
6400
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e16, m1, ta, ma
6400
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e16, m1, ta, ma
6401
6401
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
6402
6402
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
6403
6403
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -6526,11 +6526,11 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
6526
6526
; RV64ZVE32F-NEXT: andi a2, a1, 16
6527
6527
; RV64ZVE32F-NEXT: beqz a2, .LBB59_7
6528
6528
; RV64ZVE32F-NEXT: .LBB59_14: # %cond.store7
6529
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
6529
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
6530
6530
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
6531
6531
; RV64ZVE32F-NEXT: slli a2, a2, 1
6532
6532
; RV64ZVE32F-NEXT: add a2, a0, a2
6533
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e16, m1, ta, ma
6533
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e16, m1, ta, ma
6534
6534
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
6535
6535
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
6536
6536
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -6663,12 +6663,12 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
6663
6663
; RV64ZVE32F-NEXT: andi a2, a1, 16
6664
6664
; RV64ZVE32F-NEXT: beqz a2, .LBB60_7
6665
6665
; RV64ZVE32F-NEXT: .LBB60_14: # %cond.store7
6666
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
6666
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
6667
6667
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
6668
6668
; RV64ZVE32F-NEXT: andi a2, a2, 255
6669
6669
; RV64ZVE32F-NEXT: slli a2, a2, 1
6670
6670
; RV64ZVE32F-NEXT: add a2, a0, a2
6671
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e16, m1, ta, ma
6671
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e16, m1, ta, ma
6672
6672
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
6673
6673
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
6674
6674
; RV64ZVE32F-NEXT: andi a2, a1, 32
@@ -7249,11 +7249,11 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
7249
7249
; RV64ZVE32F-NEXT: andi a2, a1, 16
7250
7250
; RV64ZVE32F-NEXT: beqz a2, .LBB68_7
7251
7251
; RV64ZVE32F-NEXT: .LBB68_14: # %cond.store7
7252
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
7252
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
7253
7253
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
7254
7254
; RV64ZVE32F-NEXT: slli a2, a2, 2
7255
7255
; RV64ZVE32F-NEXT: add a2, a0, a2
7256
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
7256
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
7257
7257
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
7258
7258
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
7259
7259
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
@@ -7385,11 +7385,11 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
7385
7385
; RV64ZVE32F-NEXT: andi a2, a1, 16
7386
7386
; RV64ZVE32F-NEXT: beqz a2, .LBB69_7
7387
7387
; RV64ZVE32F-NEXT: .LBB69_14: # %cond.store7
7388
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
7388
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
7389
7389
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
7390
7390
; RV64ZVE32F-NEXT: slli a2, a2, 2
7391
7391
; RV64ZVE32F-NEXT: add a2, a0, a2
7392
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
7392
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
7393
7393
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
7394
7394
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
7395
7395
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
@@ -7528,12 +7528,12 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
7528
7528
; RV64ZVE32F-NEXT: andi a2, a1, 16
7529
7529
; RV64ZVE32F-NEXT: beqz a2, .LBB70_7
7530
7530
; RV64ZVE32F-NEXT: .LBB70_14: # %cond.store7
7531
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4 , ta, ma
7531
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2 , ta, ma
7532
7532
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
7533
7533
; RV64ZVE32F-NEXT: andi a2, a2, 255
7534
7534
; RV64ZVE32F-NEXT: slli a2, a2, 2
7535
7535
; RV64ZVE32F-NEXT: add a2, a0, a2
7536
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
7536
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
7537
7537
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
7538
7538
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
7539
7539
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
@@ -7669,11 +7669,11 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
7669
7669
; RV64ZVE32F-NEXT: andi a2, a1, 16
7670
7670
; RV64ZVE32F-NEXT: beqz a2, .LBB71_7
7671
7671
; RV64ZVE32F-NEXT: .LBB71_14: # %cond.store7
7672
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2 , ta, ma
7672
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1 , ta, ma
7673
7673
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
7674
7674
; RV64ZVE32F-NEXT: slli a2, a2, 2
7675
7675
; RV64ZVE32F-NEXT: add a2, a0, a2
7676
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
7676
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
7677
7677
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
7678
7678
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
7679
7679
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
@@ -7806,11 +7806,11 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
7806
7806
; RV64ZVE32F-NEXT: andi a2, a1, 16
7807
7807
; RV64ZVE32F-NEXT: beqz a2, .LBB72_7
7808
7808
; RV64ZVE32F-NEXT: .LBB72_14: # %cond.store7
7809
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2 , ta, ma
7809
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1 , ta, ma
7810
7810
; RV64ZVE32F-NEXT: vmv.x.s a2, v11
7811
7811
; RV64ZVE32F-NEXT: slli a2, a2, 2
7812
7812
; RV64ZVE32F-NEXT: add a2, a0, a2
7813
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
7813
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
7814
7814
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
7815
7815
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
7816
7816
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
@@ -7950,12 +7950,12 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
7950
7950
; RV64ZVE32F-NEXT: andi a3, a2, 16
7951
7951
; RV64ZVE32F-NEXT: beqz a3, .LBB73_7
7952
7952
; RV64ZVE32F-NEXT: .LBB73_14: # %cond.store7
7953
- ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2 , ta, ma
7953
+ ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1 , ta, ma
7954
7954
; RV64ZVE32F-NEXT: vmv.x.s a3, v11
7955
7955
; RV64ZVE32F-NEXT: and a3, a3, a1
7956
7956
; RV64ZVE32F-NEXT: slli a3, a3, 2
7957
7957
; RV64ZVE32F-NEXT: add a3, a0, a3
7958
- ; RV64ZVE32F-NEXT: vsetivli zero, 1 , e32, m2, ta, ma
7958
+ ; RV64ZVE32F-NEXT: vsetvli zero, zero , e32, m2, ta, ma
7959
7959
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
7960
7960
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
7961
7961
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
0 commit comments