@@ -441,98 +441,32 @@ define <8 x i16> @combine_vec_udiv_nonuniform3(<8 x i16> %x) {
441
441
ret <8 x i16 > %1
442
442
}
443
443
444
- ; TODO: Handle udiv-by-one
445
444
define <8 x i16 > @pr38477 (<8 x i16 > %a0 ) {
446
445
; SSE-LABEL: pr38477:
447
446
; SSE: # %bb.0:
447
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,4957,57457,4103,16385,35545,2048,2115]
448
+ ; SSE-NEXT: pmulhuw %xmm0, %xmm2
448
449
; SSE-NEXT: movdqa %xmm0, %xmm1
449
- ; SSE-NEXT: pxor %xmm0, %xmm0
450
- ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
451
- ; SSE-NEXT: pextrw $1, %xmm1, %eax
452
- ; SSE-NEXT: imull $4957, %eax, %ecx # imm = 0x135D
453
- ; SSE-NEXT: shrl $16, %ecx
454
- ; SSE-NEXT: subl %ecx, %eax
455
- ; SSE-NEXT: movzwl %ax, %eax
456
- ; SSE-NEXT: shrl %eax
457
- ; SSE-NEXT: addl %ecx, %eax
458
- ; SSE-NEXT: shrl $6, %eax
459
- ; SSE-NEXT: pinsrw $1, %eax, %xmm0
460
- ; SSE-NEXT: pextrw $2, %xmm1, %eax
461
- ; SSE-NEXT: imull $57457, %eax, %eax # imm = 0xE071
462
- ; SSE-NEXT: shrl $22, %eax
463
- ; SSE-NEXT: pinsrw $2, %eax, %xmm0
464
- ; SSE-NEXT: pextrw $3, %xmm1, %eax
465
- ; SSE-NEXT: imull $4103, %eax, %eax # imm = 0x1007
466
- ; SSE-NEXT: shrl $28, %eax
467
- ; SSE-NEXT: pinsrw $3, %eax, %xmm0
468
- ; SSE-NEXT: pextrw $4, %xmm1, %eax
469
- ; SSE-NEXT: movl %eax, %ecx
470
- ; SSE-NEXT: shll $14, %ecx
471
- ; SSE-NEXT: addl %eax, %ecx
472
- ; SSE-NEXT: shrl $30, %ecx
473
- ; SSE-NEXT: pinsrw $4, %ecx, %xmm0
474
- ; SSE-NEXT: pextrw $5, %xmm1, %eax
475
- ; SSE-NEXT: imull $35545, %eax, %eax # imm = 0x8AD9
476
- ; SSE-NEXT: shrl $22, %eax
477
- ; SSE-NEXT: pinsrw $5, %eax, %xmm0
478
- ; SSE-NEXT: pextrw $6, %xmm1, %eax
479
- ; SSE-NEXT: shrl $5, %eax
480
- ; SSE-NEXT: pinsrw $6, %eax, %xmm0
481
- ; SSE-NEXT: pextrw $7, %xmm1, %eax
482
- ; SSE-NEXT: imull $2115, %eax, %ecx # imm = 0x843
483
- ; SSE-NEXT: shrl $16, %ecx
484
- ; SSE-NEXT: subl %ecx, %eax
485
- ; SSE-NEXT: movzwl %ax, %eax
486
- ; SSE-NEXT: shrl %eax
487
- ; SSE-NEXT: addl %ecx, %eax
488
- ; SSE-NEXT: shrl $4, %eax
489
- ; SSE-NEXT: pinsrw $7, %eax, %xmm0
450
+ ; SSE-NEXT: psubw %xmm2, %xmm1
451
+ ; SSE-NEXT: pmulhuw {{.*}}(%rip), %xmm1
452
+ ; SSE-NEXT: paddw %xmm2, %xmm1
453
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = <u,1024,1024,16,4,1024,u,4096>
454
+ ; SSE-NEXT: pmulhuw %xmm1, %xmm2
455
+ ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5],xmm1[6],xmm2[7]
456
+ ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
457
+ ; SSE-NEXT: movdqa %xmm1, %xmm0
490
458
; SSE-NEXT: retq
491
459
;
492
460
; AVX-LABEL: pr38477:
493
461
; AVX: # %bb.0:
494
- ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
495
- ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
496
- ; AVX-NEXT: vpextrw $1, %xmm0, %eax
497
- ; AVX-NEXT: imull $4957, %eax, %ecx # imm = 0x135D
498
- ; AVX-NEXT: shrl $16, %ecx
499
- ; AVX-NEXT: subl %ecx, %eax
500
- ; AVX-NEXT: movzwl %ax, %eax
501
- ; AVX-NEXT: shrl %eax
502
- ; AVX-NEXT: addl %ecx, %eax
503
- ; AVX-NEXT: shrl $6, %eax
504
- ; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1
505
- ; AVX-NEXT: vpextrw $2, %xmm0, %eax
506
- ; AVX-NEXT: imull $57457, %eax, %eax # imm = 0xE071
507
- ; AVX-NEXT: shrl $22, %eax
508
- ; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
509
- ; AVX-NEXT: vpextrw $3, %xmm0, %eax
510
- ; AVX-NEXT: imull $4103, %eax, %eax # imm = 0x1007
511
- ; AVX-NEXT: shrl $28, %eax
512
- ; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1
513
- ; AVX-NEXT: vpextrw $4, %xmm0, %eax
514
- ; AVX-NEXT: movl %eax, %ecx
515
- ; AVX-NEXT: shll $14, %ecx
516
- ; AVX-NEXT: addl %eax, %ecx
517
- ; AVX-NEXT: shrl $30, %ecx
518
- ; AVX-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
519
- ; AVX-NEXT: vpextrw $5, %xmm0, %eax
520
- ; AVX-NEXT: imull $35545, %eax, %eax # imm = 0x8AD9
521
- ; AVX-NEXT: shrl $22, %eax
522
- ; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1
523
- ; AVX-NEXT: vpextrw $6, %xmm0, %eax
524
- ; AVX-NEXT: shrl $5, %eax
525
- ; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
526
- ; AVX-NEXT: vpextrw $7, %xmm0, %eax
527
- ; AVX-NEXT: imull $2115, %eax, %ecx # imm = 0x843
528
- ; AVX-NEXT: shrl $16, %ecx
529
- ; AVX-NEXT: subl %ecx, %eax
530
- ; AVX-NEXT: movzwl %ax, %eax
531
- ; AVX-NEXT: shrl %eax
532
- ; AVX-NEXT: addl %ecx, %eax
533
- ; AVX-NEXT: shrl $4, %eax
534
- ; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
462
+ ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm1
463
+ ; AVX-NEXT: vpsubw %xmm1, %xmm0, %xmm2
464
+ ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm2, %xmm2
465
+ ; AVX-NEXT: vpaddw %xmm1, %xmm2, %xmm1
466
+ ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm1, %xmm2
467
+ ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5],xmm1[6],xmm2[7]
468
+ ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
535
469
; AVX-NEXT: retq
536
- %rem = udiv <8 x i16 > %a0 , <i16 1 , i16 119 , i16 73 , i16 -111 , i16 -3 , i16 118 , i16 32 , i16 31 >
537
- ret <8 x i16 > %rem
470
+ %1 = udiv <8 x i16 > %a0 , <i16 1 , i16 119 , i16 73 , i16 -111 , i16 -3 , i16 118 , i16 32 , i16 31 >
471
+ ret <8 x i16 > %1
538
472
}
0 commit comments