Skip to content

Commit e7e491f

Browse files
authored
[SelectionDAG] Add ISD::VSELECT to SelectionDAG::canCreateUndefOrPoison. (#143760)
1 parent ad2a2b8 commit e7e491f

File tree

5 files changed

+46
-46
lines changed

5 files changed

+46
-46
lines changed

llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5553,6 +5553,7 @@ bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts,
55535553
case ISD::BUILD_VECTOR:
55545554
case ISD::BUILD_PAIR:
55555555
case ISD::SPLAT_VECTOR:
5556+
case ISD::VSELECT:
55565557
return false;
55575558

55585559
case ISD::SELECT_CC:

llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll

Lines changed: 34 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -313,12 +313,12 @@ define i32 @test_nxv128i1(<vscale x 128 x i1> %x) {
313313
; CHECK-NEXT: vslidedown.vx v0, v6, a0
314314
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
315315
; CHECK-NEXT: vslidedown.vx v6, v7, a1
316+
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
317+
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
316318
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
319+
; CHECK-NEXT: vslidedown.vx v0, v7, a0
317320
; CHECK-NEXT: vslidedown.vx v5, v6, a0
318-
; CHECK-NEXT: vslidedown.vx v4, v7, a0
319321
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
320-
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
321-
; CHECK-NEXT: vmv1r.v v0, v4
322322
; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
323323
; CHECK-NEXT: vmv1r.v v0, v5
324324
; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
@@ -364,9 +364,9 @@ define i32 @test_nxv256i1(<vscale x 256 x i1> %x) {
364364
; CHECK-NEXT: vmv1r.v v7, v9
365365
; CHECK-NEXT: vmv1r.v v5, v8
366366
; CHECK-NEXT: vmv1r.v v4, v0
367-
; CHECK-NEXT: vmv.v.i v16, 0
367+
; CHECK-NEXT: vmv.v.i v24, 0
368368
; CHECK-NEXT: csrr a1, vlenb
369-
; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
369+
; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
370370
; CHECK-NEXT: csrr a0, vlenb
371371
; CHECK-NEXT: slli a0, a0, 3
372372
; CHECK-NEXT: mv a2, a0
@@ -376,7 +376,7 @@ define i32 @test_nxv256i1(<vscale x 256 x i1> %x) {
376376
; CHECK-NEXT: addi a0, a0, 16
377377
; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
378378
; CHECK-NEXT: vmv1r.v v0, v5
379-
; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
379+
; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
380380
; CHECK-NEXT: csrr a0, vlenb
381381
; CHECK-NEXT: slli a0, a0, 5
382382
; CHECK-NEXT: add a0, sp, a0
@@ -388,52 +388,52 @@ define i32 @test_nxv256i1(<vscale x 256 x i1> %x) {
388388
; CHECK-NEXT: vslidedown.vx v3, v4, a0
389389
; CHECK-NEXT: vslidedown.vx v2, v5, a0
390390
; CHECK-NEXT: vmv.v.v v0, v3
391-
; CHECK-NEXT: vmv8r.v v8, v16
392391
; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma
393-
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
392+
; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
394393
; CHECK-NEXT: csrr a2, vlenb
395394
; CHECK-NEXT: slli a2, a2, 3
396395
; CHECK-NEXT: mv a3, a2
397396
; CHECK-NEXT: slli a2, a2, 1
398397
; CHECK-NEXT: add a2, a2, a3
399398
; CHECK-NEXT: add a2, sp, a2
400399
; CHECK-NEXT: addi a2, a2, 16
401-
; CHECK-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
400+
; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
402401
; CHECK-NEXT: vmv1r.v v0, v2
403-
; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
402+
; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
404403
; CHECK-NEXT: csrr a2, vlenb
405404
; CHECK-NEXT: slli a2, a2, 4
406405
; CHECK-NEXT: add a2, sp, a2
407406
; CHECK-NEXT: addi a2, a2, 16
408-
; CHECK-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
407+
; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
409408
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
410409
; CHECK-NEXT: vslidedown.vx v0, v3, a1
411410
; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma
412-
; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
411+
; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
413412
; CHECK-NEXT: csrr a2, vlenb
414413
; CHECK-NEXT: slli a2, a2, 3
415414
; CHECK-NEXT: add a2, sp, a2
416415
; CHECK-NEXT: addi a2, a2, 16
417-
; CHECK-NEXT: vs8r.v v16, (a2) # vscale x 64-byte Folded Spill
416+
; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
418417
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
419418
; CHECK-NEXT: vslidedown.vx v0, v2, a1
420419
; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma
421-
; CHECK-NEXT: vmerge.vim v24, v8, 1, v0
420+
; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
422421
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
423422
; CHECK-NEXT: vslidedown.vx v0, v4, a1
424423
; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma
425-
; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
424+
; CHECK-NEXT: vmerge.vim v8, v24, 1, v0
426425
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
427426
; CHECK-NEXT: vslidedown.vx v0, v5, a1
427+
; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma
428+
; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
429+
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
430+
; CHECK-NEXT: vslidedown.vx v0, v6, a1
428431
; CHECK-NEXT: vslidedown.vx v5, v7, a1
429-
; CHECK-NEXT: vslidedown.vx v4, v6, a1
430432
; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu
431-
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
432-
; CHECK-NEXT: vmv1r.v v0, v4
433-
; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
433+
; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t
434434
; CHECK-NEXT: vmv1r.v v0, v5
435-
; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
436-
; CHECK-NEXT: vadd.vv v8, v16, v8
435+
; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
436+
; CHECK-NEXT: vadd.vv v8, v8, v24
437437
; CHECK-NEXT: addi a2, sp, 16
438438
; CHECK-NEXT: vs8r.v v8, (a2) # vscale x 64-byte Folded Spill
439439
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
@@ -443,15 +443,15 @@ define i32 @test_nxv256i1(<vscale x 256 x i1> %x) {
443443
; CHECK-NEXT: vslidedown.vx v0, v4, a1
444444
; CHECK-NEXT: vslidedown.vx v3, v5, a1
445445
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
446-
; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t
446+
; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
447447
; CHECK-NEXT: vmv1r.v v0, v3
448448
; CHECK-NEXT: csrr a0, vlenb
449449
; CHECK-NEXT: slli a0, a0, 3
450450
; CHECK-NEXT: add a0, sp, a0
451451
; CHECK-NEXT: addi a0, a0, 16
452452
; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
453453
; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
454-
; CHECK-NEXT: vadd.vv v8, v8, v24
454+
; CHECK-NEXT: vadd.vv v8, v8, v16
455455
; CHECK-NEXT: csrr a0, vlenb
456456
; CHECK-NEXT: slli a0, a0, 3
457457
; CHECK-NEXT: add a0, sp, a0
@@ -492,16 +492,16 @@ define i32 @test_nxv256i1(<vscale x 256 x i1> %x) {
492492
; CHECK-NEXT: addi a0, a0, 16
493493
; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
494494
; CHECK-NEXT: vadd.vi v24, v24, 1, v0.t
495-
; CHECK-NEXT: vadd.vv v24, v24, v8
495+
; CHECK-NEXT: vadd.vv v0, v24, v8
496496
; CHECK-NEXT: addi a0, sp, 16
497497
; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
498498
; CHECK-NEXT: csrr a0, vlenb
499499
; CHECK-NEXT: slli a0, a0, 3
500500
; CHECK-NEXT: add a0, sp, a0
501501
; CHECK-NEXT: addi a0, a0, 16
502-
; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
503-
; CHECK-NEXT: vadd.vv v8, v8, v0
504-
; CHECK-NEXT: vadd.vv v16, v24, v16
502+
; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
503+
; CHECK-NEXT: vadd.vv v8, v8, v24
504+
; CHECK-NEXT: vadd.vv v16, v0, v16
505505
; CHECK-NEXT: vadd.vv v8, v16, v8
506506
; CHECK-NEXT: vmv.s.x v16, zero
507507
; CHECK-NEXT: vredsum.vs v8, v8, v16
@@ -537,18 +537,17 @@ entry:
537537
define i16 @test_narrow_nxv64i1(<vscale x 64 x i1> %x) {
538538
; CHECK-LABEL: test_narrow_nxv64i1:
539539
; CHECK: # %bb.0: # %entry
540+
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
541+
; CHECK-NEXT: vmv.v.i v8, 0
540542
; CHECK-NEXT: csrr a0, vlenb
541-
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
542-
; CHECK-NEXT: vmv.v.i v16, 0
543+
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
543544
; CHECK-NEXT: srli a0, a0, 1
544545
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
545-
; CHECK-NEXT: vslidedown.vx v8, v0, a0
546+
; CHECK-NEXT: vslidedown.vx v0, v0, a0
546547
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
547-
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
548-
; CHECK-NEXT: vmv1r.v v0, v8
549-
; CHECK-NEXT: vadd.vi v16, v16, 1, v0.t
550-
; CHECK-NEXT: vmv.s.x v8, zero
551-
; CHECK-NEXT: vredsum.vs v8, v16, v8
548+
; CHECK-NEXT: vadd.vi v8, v8, 1, v0.t
549+
; CHECK-NEXT: vmv.s.x v16, zero
550+
; CHECK-NEXT: vredsum.vs v8, v8, v16
552551
; CHECK-NEXT: vmv.x.s a0, v8
553552
; CHECK-NEXT: ret
554553
entry:

llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -260,18 +260,18 @@ define <vscale x 128 x i1> @vector_interleave_nxv128i1_nxv64i1(<vscale x 64 x i1
260260
; ZIP-NEXT: vsetvli a0, zero, e8, m8, ta, ma
261261
; ZIP-NEXT: vmv1r.v v9, v0
262262
; ZIP-NEXT: vmv1r.v v0, v8
263-
; ZIP-NEXT: vmv.v.i v16, 0
264-
; ZIP-NEXT: vmerge.vim v24, v16, 1, v0
263+
; ZIP-NEXT: vmv.v.i v24, 0
264+
; ZIP-NEXT: vmerge.vim v16, v24, 1, v0
265265
; ZIP-NEXT: vmv1r.v v0, v9
266-
; ZIP-NEXT: vmerge.vim v8, v16, 1, v0
266+
; ZIP-NEXT: vmerge.vim v8, v24, 1, v0
267267
; ZIP-NEXT: vsetvli a0, zero, e8, m4, ta, ma
268-
; ZIP-NEXT: ri.vzip2b.vv v4, v8, v24
269-
; ZIP-NEXT: ri.vzip2b.vv v20, v12, v28
270-
; ZIP-NEXT: ri.vzip2a.vv v0, v8, v24
271-
; ZIP-NEXT: ri.vzip2a.vv v16, v12, v28
268+
; ZIP-NEXT: ri.vzip2b.vv v4, v8, v16
269+
; ZIP-NEXT: ri.vzip2b.vv v28, v12, v20
270+
; ZIP-NEXT: ri.vzip2a.vv v0, v8, v16
271+
; ZIP-NEXT: ri.vzip2a.vv v24, v12, v20
272272
; ZIP-NEXT: vsetvli a0, zero, e8, m8, ta, ma
273273
; ZIP-NEXT: vmsne.vi v9, v0, 0
274-
; ZIP-NEXT: vmsne.vi v8, v16, 0
274+
; ZIP-NEXT: vmsne.vi v8, v24, 0
275275
; ZIP-NEXT: vmv1r.v v0, v9
276276
; ZIP-NEXT: ret
277277
%res = call <vscale x 128 x i1> @llvm.vector.interleave2.nxv128i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b)

llvm/test/CodeGen/X86/avx10_2_512bf16-arith.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,8 @@ define <32 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_512(<32 x bfloat> %src,
9494
;
9595
; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_512:
9696
; X86: # %bb.0:
97-
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
9897
; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
98+
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
9999
; X86-NEXT: vsubbf16 %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xc9,0x5c,0xc2]
100100
; X86-NEXT: vsubbf16 (%eax), %zmm1, %zmm1 # encoding: [0x62,0xf5,0x75,0x48,0x5c,0x08]
101101
; X86-NEXT: vsubbf16 %zmm1, %zmm0, %zmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x5c,0xc1]

llvm/test/CodeGen/X86/avx10_2bf16-arith.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -147,8 +147,8 @@ define <16 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_256(<16 x bfloat> %src,
147147
;
148148
; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_256:
149149
; X86: # %bb.0:
150-
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
151150
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
151+
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
152152
; X86-NEXT: vsubbf16 %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0xa9,0x5c,0xc2]
153153
; X86-NEXT: vsubbf16 (%eax), %ymm1, %ymm1 # encoding: [0x62,0xf5,0x75,0x28,0x5c,0x08]
154154
; X86-NEXT: vsubbf16 %ymm1, %ymm0, %ymm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x5c,0xc1]
@@ -201,8 +201,8 @@ define <8 x bfloat> @test_int_x86_avx10_maskz_sub_bf16_128(<8 x bfloat> %src, <8
201201
;
202202
; X86-LABEL: test_int_x86_avx10_maskz_sub_bf16_128:
203203
; X86: # %bb.0:
204-
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
205204
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
205+
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
206206
; X86-NEXT: vsubbf16 %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x75,0x89,0x5c,0xc2]
207207
; X86-NEXT: vsubbf16 (%eax), %xmm1, %xmm1 # encoding: [0x62,0xf5,0x75,0x08,0x5c,0x08]
208208
; X86-NEXT: vsubbf16 %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x5c,0xc1]

0 commit comments

Comments
 (0)