Skip to content

Commit 9105260

Browse files
committed
Update vmsge.ll and vmsgeu.ll
Created using spr 1.3.6-beta.1
1 parent f36c414 commit 9105260

File tree

2 files changed

+96
-132
lines changed

2 files changed

+96
-132
lines changed

llvm/test/CodeGen/RISCV/rvv/vmsge.ll

Lines changed: 48 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
3434
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
3535
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
3636
; CHECK: # %bb.0: # %entry
37-
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
38-
; CHECK-NEXT: vmsle.vv v8, v9, v8
3937
; CHECK-NEXT: vmv1r.v v11, v0
40-
; CHECK-NEXT: vmv1r.v v0, v8
38+
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
39+
; CHECK-NEXT: vmsle.vv v0, v9, v8
4140
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
4241
; CHECK-NEXT: vmv1r.v v0, v11
4342
; CHECK-NEXT: ret
@@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
8685
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
8786
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
8887
; CHECK: # %bb.0: # %entry
89-
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
90-
; CHECK-NEXT: vmsle.vv v8, v9, v8
9188
; CHECK-NEXT: vmv1r.v v11, v0
92-
; CHECK-NEXT: vmv1r.v v0, v8
89+
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
90+
; CHECK-NEXT: vmsle.vv v0, v9, v8
9391
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
9492
; CHECK-NEXT: vmv1r.v v0, v11
9593
; CHECK-NEXT: ret
@@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
138136
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
139137
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
140138
; CHECK: # %bb.0: # %entry
141-
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
142-
; CHECK-NEXT: vmsle.vv v8, v9, v8
143139
; CHECK-NEXT: vmv1r.v v11, v0
144-
; CHECK-NEXT: vmv1r.v v0, v8
140+
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
141+
; CHECK-NEXT: vmsle.vv v0, v9, v8
145142
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
146143
; CHECK-NEXT: vmv1r.v v0, v11
147144
; CHECK-NEXT: ret
@@ -190,10 +187,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
190187
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
191188
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
192189
; CHECK: # %bb.0: # %entry
193-
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
194-
; CHECK-NEXT: vmsle.vv v8, v9, v8
195190
; CHECK-NEXT: vmv1r.v v11, v0
196-
; CHECK-NEXT: vmv.v.v v0, v8
191+
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
192+
; CHECK-NEXT: vmsle.vv v0, v9, v8
197193
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
198194
; CHECK-NEXT: vmv.v.v v0, v11
199195
; CHECK-NEXT: ret
@@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
242238
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
243239
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
244240
; CHECK: # %bb.0: # %entry
241+
; CHECK-NEXT: vmv1r.v v14, v0
245242
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
246-
; CHECK-NEXT: vmsle.vv v14, v10, v8
247-
; CHECK-NEXT: vmv1r.v v8, v0
243+
; CHECK-NEXT: vmsle.vv v0, v10, v8
244+
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
248245
; CHECK-NEXT: vmv1r.v v0, v14
249-
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
250-
; CHECK-NEXT: vmv1r.v v0, v8
251246
; CHECK-NEXT: ret
252247
entry:
253248
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
@@ -294,12 +289,11 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
294289
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
295290
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
296291
; CHECK: # %bb.0: # %entry
292+
; CHECK-NEXT: vmv1r.v v20, v0
297293
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
298-
; CHECK-NEXT: vmsle.vv v20, v12, v8
299-
; CHECK-NEXT: vmv1r.v v8, v0
294+
; CHECK-NEXT: vmsle.vv v0, v12, v8
295+
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
300296
; CHECK-NEXT: vmv1r.v v0, v20
301-
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
302-
; CHECK-NEXT: vmv1r.v v0, v8
303297
; CHECK-NEXT: ret
304298
entry:
305299
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
@@ -346,10 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
346340
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
347341
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
348342
; CHECK: # %bb.0: # %entry
349-
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
350-
; CHECK-NEXT: vmsle.vv v8, v9, v8
351343
; CHECK-NEXT: vmv1r.v v11, v0
352-
; CHECK-NEXT: vmv1r.v v0, v8
344+
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
345+
; CHECK-NEXT: vmsle.vv v0, v9, v8
353346
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
354347
; CHECK-NEXT: vmv1r.v v0, v11
355348
; CHECK-NEXT: ret
@@ -398,10 +391,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
398391
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
399392
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
400393
; CHECK: # %bb.0: # %entry
401-
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
402-
; CHECK-NEXT: vmsle.vv v8, v9, v8
403394
; CHECK-NEXT: vmv1r.v v11, v0
404-
; CHECK-NEXT: vmv1r.v v0, v8
395+
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
396+
; CHECK-NEXT: vmsle.vv v0, v9, v8
405397
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
406398
; CHECK-NEXT: vmv1r.v v0, v11
407399
; CHECK-NEXT: ret
@@ -450,10 +442,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
450442
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
451443
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
452444
; CHECK: # %bb.0: # %entry
453-
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
454-
; CHECK-NEXT: vmsle.vv v8, v9, v8
455445
; CHECK-NEXT: vmv1r.v v11, v0
456-
; CHECK-NEXT: vmv.v.v v0, v8
446+
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
447+
; CHECK-NEXT: vmsle.vv v0, v9, v8
457448
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
458449
; CHECK-NEXT: vmv.v.v v0, v11
459450
; CHECK-NEXT: ret
@@ -502,12 +493,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
502493
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
503494
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
504495
; CHECK: # %bb.0: # %entry
496+
; CHECK-NEXT: vmv1r.v v14, v0
505497
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
506-
; CHECK-NEXT: vmsle.vv v14, v10, v8
507-
; CHECK-NEXT: vmv1r.v v8, v0
498+
; CHECK-NEXT: vmsle.vv v0, v10, v8
499+
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
508500
; CHECK-NEXT: vmv1r.v v0, v14
509-
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
510-
; CHECK-NEXT: vmv1r.v v0, v8
511501
; CHECK-NEXT: ret
512502
entry:
513503
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
@@ -554,12 +544,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
554544
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
555545
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
556546
; CHECK: # %bb.0: # %entry
547+
; CHECK-NEXT: vmv1r.v v20, v0
557548
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
558-
; CHECK-NEXT: vmsle.vv v20, v12, v8
559-
; CHECK-NEXT: vmv1r.v v8, v0
549+
; CHECK-NEXT: vmsle.vv v0, v12, v8
550+
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
560551
; CHECK-NEXT: vmv1r.v v0, v20
561-
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
562-
; CHECK-NEXT: vmv1r.v v0, v8
563552
; CHECK-NEXT: ret
564553
entry:
565554
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
@@ -606,10 +595,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
606595
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
607596
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
608597
; CHECK: # %bb.0: # %entry
609-
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
610-
; CHECK-NEXT: vmsle.vv v8, v9, v8
611598
; CHECK-NEXT: vmv1r.v v11, v0
612-
; CHECK-NEXT: vmv1r.v v0, v8
599+
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
600+
; CHECK-NEXT: vmsle.vv v0, v9, v8
613601
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
614602
; CHECK-NEXT: vmv1r.v v0, v11
615603
; CHECK-NEXT: ret
@@ -658,10 +646,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
658646
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
659647
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
660648
; CHECK: # %bb.0: # %entry
661-
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
662-
; CHECK-NEXT: vmsle.vv v8, v9, v8
663649
; CHECK-NEXT: vmv1r.v v11, v0
664-
; CHECK-NEXT: vmv.v.v v0, v8
650+
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
651+
; CHECK-NEXT: vmsle.vv v0, v9, v8
665652
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
666653
; CHECK-NEXT: vmv.v.v v0, v11
667654
; CHECK-NEXT: ret
@@ -710,12 +697,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
710697
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
711698
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
712699
; CHECK: # %bb.0: # %entry
700+
; CHECK-NEXT: vmv1r.v v14, v0
713701
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
714-
; CHECK-NEXT: vmsle.vv v14, v10, v8
715-
; CHECK-NEXT: vmv1r.v v8, v0
702+
; CHECK-NEXT: vmsle.vv v0, v10, v8
703+
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
716704
; CHECK-NEXT: vmv1r.v v0, v14
717-
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
718-
; CHECK-NEXT: vmv1r.v v0, v8
719705
; CHECK-NEXT: ret
720706
entry:
721707
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
@@ -762,12 +748,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
762748
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
763749
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
764750
; CHECK: # %bb.0: # %entry
751+
; CHECK-NEXT: vmv1r.v v20, v0
765752
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
766-
; CHECK-NEXT: vmsle.vv v20, v12, v8
767-
; CHECK-NEXT: vmv1r.v v8, v0
753+
; CHECK-NEXT: vmsle.vv v0, v12, v8
754+
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
768755
; CHECK-NEXT: vmv1r.v v0, v20
769-
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
770-
; CHECK-NEXT: vmv1r.v v0, v8
771756
; CHECK-NEXT: ret
772757
entry:
773758
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
@@ -814,10 +799,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
814799
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
815800
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
816801
; CHECK: # %bb.0: # %entry
817-
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
818-
; CHECK-NEXT: vmsle.vv v8, v9, v8
819802
; CHECK-NEXT: vmv1r.v v11, v0
820-
; CHECK-NEXT: vmv.v.v v0, v8
803+
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
804+
; CHECK-NEXT: vmsle.vv v0, v9, v8
821805
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
822806
; CHECK-NEXT: vmv.v.v v0, v11
823807
; CHECK-NEXT: ret
@@ -866,12 +850,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
866850
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
867851
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
868852
; CHECK: # %bb.0: # %entry
853+
; CHECK-NEXT: vmv1r.v v14, v0
869854
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
870-
; CHECK-NEXT: vmsle.vv v14, v10, v8
871-
; CHECK-NEXT: vmv1r.v v8, v0
855+
; CHECK-NEXT: vmsle.vv v0, v10, v8
856+
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
872857
; CHECK-NEXT: vmv1r.v v0, v14
873-
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
874-
; CHECK-NEXT: vmv1r.v v0, v8
875858
; CHECK-NEXT: ret
876859
entry:
877860
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
@@ -918,12 +901,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
918901
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
919902
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
920903
; CHECK: # %bb.0: # %entry
904+
; CHECK-NEXT: vmv1r.v v20, v0
921905
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
922-
; CHECK-NEXT: vmsle.vv v20, v12, v8
923-
; CHECK-NEXT: vmv1r.v v8, v0
906+
; CHECK-NEXT: vmsle.vv v0, v12, v8
907+
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
924908
; CHECK-NEXT: vmv1r.v v0, v20
925-
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
926-
; CHECK-NEXT: vmv1r.v v0, v8
927909
; CHECK-NEXT: ret
928910
entry:
929911
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
@@ -1708,11 +1690,11 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
17081690
; RV32-NEXT: sw a0, 8(sp)
17091691
; RV32-NEXT: addi a0, sp, 8
17101692
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1711-
; RV32-NEXT: vlse64.v v11, (a0), zero
1712-
; RV32-NEXT: vmv1r.v v10, v0
1693+
; RV32-NEXT: vlse64.v v10, (a0), zero
1694+
; RV32-NEXT: vmv1r.v v11, v0
17131695
; RV32-NEXT: vmv1r.v v0, v9
1714-
; RV32-NEXT: vmsle.vv v10, v11, v8, v0.t
1715-
; RV32-NEXT: vmv.v.v v0, v10
1696+
; RV32-NEXT: vmsle.vv v11, v10, v8, v0.t
1697+
; RV32-NEXT: vmv.v.v v0, v11
17161698
; RV32-NEXT: addi sp, sp, 16
17171699
; RV32-NEXT: ret
17181700
;

0 commit comments

Comments
 (0)