Skip to content

Commit 5f7c3b8

Browse files
author
SahilPatidar
committed
update CodeGen tests
1 parent 9fc3b2e commit 5f7c3b8

File tree

5 files changed

+61
-75
lines changed

5 files changed

+61
-75
lines changed

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll

Lines changed: 42 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -549,36 +549,36 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
549549
; CHECK-RV32-NEXT: # %bb.1:
550550
; CHECK-RV32-NEXT: li a3, 32
551551
; CHECK-RV32-NEXT: .LBB42_2:
552-
; CHECK-RV32-NEXT: mul a5, a3, a2
553-
; CHECK-RV32-NEXT: addi a6, a4, -32
554-
; CHECK-RV32-NEXT: sltu a4, a4, a6
555-
; CHECK-RV32-NEXT: addi a4, a4, -1
556-
; CHECK-RV32-NEXT: and a6, a4, a6
557-
; CHECK-RV32-NEXT: li a4, 16
558-
; CHECK-RV32-NEXT: add a5, a1, a5
559-
; CHECK-RV32-NEXT: bltu a6, a4, .LBB42_4
552+
; CHECK-RV32-NEXT: mul a6, a3, a2
553+
; CHECK-RV32-NEXT: addi a5, a4, -32
554+
; CHECK-RV32-NEXT: sltu a7, a4, a5
555+
; CHECK-RV32-NEXT: addi a7, a7, -1
556+
; CHECK-RV32-NEXT: and a7, a7, a5
557+
; CHECK-RV32-NEXT: li a5, 16
558+
; CHECK-RV32-NEXT: add a6, a1, a6
559+
; CHECK-RV32-NEXT: bltu a7, a5, .LBB42_4
560560
; CHECK-RV32-NEXT: # %bb.3:
561-
; CHECK-RV32-NEXT: li a6, 16
561+
; CHECK-RV32-NEXT: li a7, 16
562562
; CHECK-RV32-NEXT: .LBB42_4:
563563
; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
564564
; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4
565-
; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, ma
566-
; CHECK-RV32-NEXT: vlse64.v v16, (a5), a2, v0.t
567-
; CHECK-RV32-NEXT: addi a5, a3, -16
568-
; CHECK-RV32-NEXT: sltu a6, a3, a5
569-
; CHECK-RV32-NEXT: addi a6, a6, -1
570-
; CHECK-RV32-NEXT: and a5, a6, a5
571-
; CHECK-RV32-NEXT: bltu a3, a4, .LBB42_6
565+
; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, ma
566+
; CHECK-RV32-NEXT: vlse64.v v16, (a6), a2, v0.t
567+
; CHECK-RV32-NEXT: addi a6, a3, -16
568+
; CHECK-RV32-NEXT: sltu a3, a3, a6
569+
; CHECK-RV32-NEXT: addi a3, a3, -1
570+
; CHECK-RV32-NEXT: and a3, a3, a6
571+
; CHECK-RV32-NEXT: bltu a4, a5, .LBB42_6
572572
; CHECK-RV32-NEXT: # %bb.5:
573-
; CHECK-RV32-NEXT: li a3, 16
573+
; CHECK-RV32-NEXT: li a4, 16
574574
; CHECK-RV32-NEXT: .LBB42_6:
575-
; CHECK-RV32-NEXT: mul a4, a3, a2
576-
; CHECK-RV32-NEXT: add a4, a1, a4
575+
; CHECK-RV32-NEXT: mul a5, a4, a2
576+
; CHECK-RV32-NEXT: add a5, a1, a5
577577
; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
578578
; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2
579-
; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma
580-
; CHECK-RV32-NEXT: vlse64.v v24, (a4), a2, v0.t
581579
; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
580+
; CHECK-RV32-NEXT: vlse64.v v24, (a5), a2, v0.t
581+
; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma
582582
; CHECK-RV32-NEXT: vmv1r.v v0, v8
583583
; CHECK-RV32-NEXT: vlse64.v v8, (a1), a2, v0.t
584584
; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
@@ -599,36 +599,36 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
599599
; CHECK-RV64-NEXT: # %bb.1:
600600
; CHECK-RV64-NEXT: li a4, 32
601601
; CHECK-RV64-NEXT: .LBB42_2:
602-
; CHECK-RV64-NEXT: mul a5, a4, a2
603-
; CHECK-RV64-NEXT: addi a6, a3, -32
604-
; CHECK-RV64-NEXT: sltu a3, a3, a6
605-
; CHECK-RV64-NEXT: addi a3, a3, -1
606-
; CHECK-RV64-NEXT: and a6, a3, a6
607-
; CHECK-RV64-NEXT: li a3, 16
608-
; CHECK-RV64-NEXT: add a5, a1, a5
609-
; CHECK-RV64-NEXT: bltu a6, a3, .LBB42_4
602+
; CHECK-RV64-NEXT: mul a6, a4, a2
603+
; CHECK-RV64-NEXT: addi a5, a3, -32
604+
; CHECK-RV64-NEXT: sltu a7, a3, a5
605+
; CHECK-RV64-NEXT: addi a7, a7, -1
606+
; CHECK-RV64-NEXT: and a7, a7, a5
607+
; CHECK-RV64-NEXT: li a5, 16
608+
; CHECK-RV64-NEXT: add a6, a1, a6
609+
; CHECK-RV64-NEXT: bltu a7, a5, .LBB42_4
610610
; CHECK-RV64-NEXT: # %bb.3:
611-
; CHECK-RV64-NEXT: li a6, 16
611+
; CHECK-RV64-NEXT: li a7, 16
612612
; CHECK-RV64-NEXT: .LBB42_4:
613613
; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
614614
; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4
615-
; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, ma
616-
; CHECK-RV64-NEXT: vlse64.v v16, (a5), a2, v0.t
617-
; CHECK-RV64-NEXT: addi a5, a4, -16
618-
; CHECK-RV64-NEXT: sltu a6, a4, a5
619-
; CHECK-RV64-NEXT: addi a6, a6, -1
620-
; CHECK-RV64-NEXT: and a5, a6, a5
621-
; CHECK-RV64-NEXT: bltu a4, a3, .LBB42_6
615+
; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, ma
616+
; CHECK-RV64-NEXT: vlse64.v v16, (a6), a2, v0.t
617+
; CHECK-RV64-NEXT: addi a6, a4, -16
618+
; CHECK-RV64-NEXT: sltu a4, a4, a6
619+
; CHECK-RV64-NEXT: addi a4, a4, -1
620+
; CHECK-RV64-NEXT: and a4, a4, a6
621+
; CHECK-RV64-NEXT: bltu a3, a5, .LBB42_6
622622
; CHECK-RV64-NEXT: # %bb.5:
623-
; CHECK-RV64-NEXT: li a4, 16
623+
; CHECK-RV64-NEXT: li a3, 16
624624
; CHECK-RV64-NEXT: .LBB42_6:
625-
; CHECK-RV64-NEXT: mul a3, a4, a2
626-
; CHECK-RV64-NEXT: add a3, a1, a3
625+
; CHECK-RV64-NEXT: mul a5, a3, a2
626+
; CHECK-RV64-NEXT: add a5, a1, a5
627627
; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
628628
; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2
629-
; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma
630-
; CHECK-RV64-NEXT: vlse64.v v24, (a3), a2, v0.t
631629
; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma
630+
; CHECK-RV64-NEXT: vlse64.v v24, (a5), a2, v0.t
631+
; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
632632
; CHECK-RV64-NEXT: vmv1r.v v0, v8
633633
; CHECK-RV64-NEXT: vlse64.v v8, (a1), a2, v0.t
634634
; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -418,20 +418,20 @@ define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
418418
; CHECK-NEXT: li a3, 32
419419
; CHECK-NEXT: .LBB32_2:
420420
; CHECK-NEXT: addi a4, a3, -16
421-
; CHECK-NEXT: sltu a5, a3, a4
422-
; CHECK-NEXT: addi a5, a5, -1
423-
; CHECK-NEXT: and a4, a5, a4
424-
; CHECK-NEXT: addi a5, a1, 128
421+
; CHECK-NEXT: sltu a3, a3, a4
422+
; CHECK-NEXT: addi a3, a3, -1
423+
; CHECK-NEXT: and a3, a3, a4
424+
; CHECK-NEXT: addi a4, a1, 128
425425
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
426426
; CHECK-NEXT: vslidedown.vi v0, v8, 2
427-
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
428-
; CHECK-NEXT: vle64.v v16, (a5), v0.t
429-
; CHECK-NEXT: addi a4, a2, -32
430-
; CHECK-NEXT: sltu a2, a2, a4
431-
; CHECK-NEXT: addi a2, a2, -1
432-
; CHECK-NEXT: and a4, a2, a4
433-
; CHECK-NEXT: li a2, 16
434-
; CHECK-NEXT: bltu a4, a2, .LBB32_4
427+
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
428+
; CHECK-NEXT: vle64.v v16, (a4), v0.t
429+
; CHECK-NEXT: addi a3, a2, -32
430+
; CHECK-NEXT: sltu a4, a2, a3
431+
; CHECK-NEXT: addi a4, a4, -1
432+
; CHECK-NEXT: and a4, a4, a3
433+
; CHECK-NEXT: li a3, 16
434+
; CHECK-NEXT: bltu a4, a3, .LBB32_4
435435
; CHECK-NEXT: # %bb.3:
436436
; CHECK-NEXT: li a4, 16
437437
; CHECK-NEXT: .LBB32_4:
@@ -440,11 +440,11 @@ define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
440440
; CHECK-NEXT: vslidedown.vi v0, v8, 4
441441
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
442442
; CHECK-NEXT: vle64.v v24, (a5), v0.t
443-
; CHECK-NEXT: bltu a3, a2, .LBB32_6
443+
; CHECK-NEXT: bltu a2, a3, .LBB32_6
444444
; CHECK-NEXT: # %bb.5:
445-
; CHECK-NEXT: li a3, 16
445+
; CHECK-NEXT: li a2, 16
446446
; CHECK-NEXT: .LBB32_6:
447-
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
447+
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
448448
; CHECK-NEXT: vmv1r.v v0, v8
449449
; CHECK-NEXT: vle64.v v8, (a1), v0.t
450450
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma

llvm/test/CodeGen/X86/combine-smin.ll

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,6 @@ define <16 x i8> @test_v16i8_reassociation(<16 x i8> %a) {
7070
; SSE2-LABEL: test_v16i8_reassociation:
7171
; SSE2: # %bb.0:
7272
; SSE2-NEXT: pxor %xmm1, %xmm1
73-
; SSE2-NEXT: pxor %xmm2, %xmm2
74-
; SSE2-NEXT: pcmpgtb %xmm0, %xmm2
75-
; SSE2-NEXT: pand %xmm2, %xmm0
7673
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
7774
; SSE2-NEXT: pand %xmm1, %xmm0
7875
; SSE2-NEXT: retq
@@ -81,21 +78,18 @@ define <16 x i8> @test_v16i8_reassociation(<16 x i8> %a) {
8178
; SSE41: # %bb.0:
8279
; SSE41-NEXT: pxor %xmm1, %xmm1
8380
; SSE41-NEXT: pminsb %xmm1, %xmm0
84-
; SSE41-NEXT: pminsb %xmm1, %xmm0
8581
; SSE41-NEXT: retq
8682
;
8783
; SSE42-LABEL: test_v16i8_reassociation:
8884
; SSE42: # %bb.0:
8985
; SSE42-NEXT: pxor %xmm1, %xmm1
9086
; SSE42-NEXT: pminsb %xmm1, %xmm0
91-
; SSE42-NEXT: pminsb %xmm1, %xmm0
9287
; SSE42-NEXT: retq
9388
;
9489
; AVX-LABEL: test_v16i8_reassociation:
9590
; AVX: # %bb.0:
9691
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
9792
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
98-
; AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
9993
; AVX-NEXT: retq
10094
%1 = call <16 x i8> @llvm.smin.v16i8(<16 x i8> %a, <16 x i8> zeroinitializer)
10195
%2 = call <16 x i8> @llvm.smin.v16i8(<16 x i8> %1, <16 x i8> zeroinitializer)

llvm/test/CodeGen/X86/combine-umax.ll

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,16 +45,12 @@ define <8 x i16> @test_v8i16_nosignbit(<8 x i16> %a, <8 x i16> %b) {
4545
define <16 x i8> @test_v16i8_reassociation(<16 x i8> %a) {
4646
; SSE-LABEL: test_v16i8_reassociation:
4747
; SSE: # %bb.0:
48-
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
49-
; SSE-NEXT: pmaxub %xmm1, %xmm0
50-
; SSE-NEXT: pmaxub %xmm1, %xmm0
48+
; SSE-NEXT: pmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
5149
; SSE-NEXT: retq
5250
;
5351
; AVX-LABEL: test_v16i8_reassociation:
5452
; AVX: # %bb.0:
55-
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
56-
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
57-
; AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
53+
; AVX-NEXT: vpmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
5854
; AVX-NEXT: retq
5955
%1 = call <16 x i8> @llvm.umax.v16i8(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
6056
%2 = call <16 x i8> @llvm.umax.v16i8(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)

llvm/test/CodeGen/X86/combine-umin.ll

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -62,16 +62,12 @@ define <8 x i16> @test_v8i16_nosignbit(<8 x i16> %a, <8 x i16> %b) {
6262
define <16 x i8> @test_v16i8_reassociation(<16 x i8> %a) {
6363
; SSE-LABEL: test_v16i8_reassociation:
6464
; SSE: # %bb.0:
65-
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
66-
; SSE-NEXT: pminub %xmm1, %xmm0
67-
; SSE-NEXT: pminub %xmm1, %xmm0
65+
; SSE-NEXT: pminub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
6866
; SSE-NEXT: retq
6967
;
7068
; AVX-LABEL: test_v16i8_reassociation:
7169
; AVX: # %bb.0:
72-
; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
73-
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
74-
; AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
70+
; AVX-NEXT: vpminub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
7571
; AVX-NEXT: retq
7672
%1 = call <16 x i8> @llvm.umin.v16i8(<16 x i8> %a, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)
7773
%2 = call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>)

0 commit comments

Comments
 (0)