Skip to content

Commit 72a049d

Browse files
committed
[X86][AVX2] LowerINSERT_VECTOR_ELT - support v4i64 insertion as BLENDI(X, SCALAR_TO_VECTOR(Y))
1 parent 87c770b commit 72a049d

File tree

5 files changed

+31
-20
lines changed

5 files changed

+31
-20
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19773,7 +19773,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
1977319773
// and incur a domain crossing penalty if that's what we'll end up
1977419774
// doing anyway after extracting to a 128-bit vector.
1977519775
if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
19776-
(Subtarget.hasAVX2() && EltVT == MVT::i32)) {
19776+
(Subtarget.hasAVX2() && (EltVT == MVT::i32 || EltVT == MVT::i64))) {
1977719777
SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
1977819778
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
1977919779
DAG.getTargetConstant(1, dl, MVT::i8));

llvm/test/CodeGen/X86/avx-insertelt.ll

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,8 @@ define <4 x i64> @insert_i64_firstelt_of_low_subvector(<4 x i64> %x, i64 %s) {
8181
;
8282
; AVX2-LABEL: insert_i64_firstelt_of_low_subvector:
8383
; AVX2: # %bb.0:
84-
; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
85-
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
84+
; AVX2-NEXT: vmovq %rdi, %xmm1
85+
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
8686
; AVX2-NEXT: retq
8787
%i0 = insertelement <4 x i64> %x, i64 %s, i32 0
8888
ret <4 x i64> %i0
@@ -312,11 +312,9 @@ define <4 x i64> @insert_i64_firstelts(<4 x i64> %x, i64 %s) {
312312
;
313313
; AVX2-LABEL: insert_i64_firstelts:
314314
; AVX2: # %bb.0:
315-
; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
316-
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
317315
; AVX2-NEXT: vmovq %rdi, %xmm1
318316
; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1
319-
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
317+
; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
320318
; AVX2-NEXT: retq
321319
%i0 = insertelement <4 x i64> %x, i64 %s, i32 0
322320
%i1 = insertelement <4 x i64> %i0, i64 %s, i32 2
@@ -532,7 +530,7 @@ define <4 x i64> @insert_i64_two_elts_of_low_subvector(<4 x i64> %x, i64 %s) {
532530
;
533531
; AVX2-LABEL: insert_i64_two_elts_of_low_subvector:
534532
; AVX2: # %bb.0:
535-
; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
533+
; AVX2-NEXT: vmovq %rdi, %xmm1
536534
; AVX2-NEXT: vpinsrq $1, %rdi, %xmm1, %xmm1
537535
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
538536
; AVX2-NEXT: retq

llvm/test/CodeGen/X86/combine-mul.ll

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -477,8 +477,8 @@ define <4 x i64> @fuzz15429(<4 x i64> %InVec) {
477477
; AVX: # %bb.0:
478478
; AVX-NEXT: vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
479479
; AVX-NEXT: movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
480-
; AVX-NEXT: vpinsrq $0, %rax, %xmm0, %xmm1
481-
; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
480+
; AVX-NEXT: vmovq %rax, %xmm1
481+
; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
482482
; AVX-NEXT: retq
483483
%mul = mul <4 x i64> %InVec, <i64 1, i64 2, i64 4, i64 8>
484484
%I = insertelement <4 x i64> %mul, i64 9223372036854775807, i64 0

llvm/test/CodeGen/X86/splat-for-size.ll

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -385,13 +385,20 @@ define <32 x i8> @splat_v32i8_pgso(<32 x i8> %x) !prof !14 {
385385
@A = common dso_local global <3 x i64> zeroinitializer, align 32
386386

387387
define <8 x i64> @pr23259() #1 {
388-
; CHECK-LABEL: pr23259:
389-
; CHECK: # %bb.0: # %entry
390-
; CHECK-NEXT: vmovaps A+16(%rip), %xmm0
391-
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
392-
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
393-
; CHECK-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
394-
; CHECK-NEXT: retq
388+
; AVX-LABEL: pr23259:
389+
; AVX: # %bb.0: # %entry
390+
; AVX-NEXT: vmovaps A+16(%rip), %xmm0
391+
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
392+
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
393+
; AVX-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
394+
; AVX-NEXT: retq
395+
;
396+
; AVX2-LABEL: pr23259:
397+
; AVX2: # %bb.0: # %entry
398+
; AVX2-NEXT: vmovaps A+16(%rip), %xmm0
399+
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3,4,5,6,7]
400+
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
401+
; AVX2-NEXT: retq
395402
entry:
396403
%0 = load <4 x i64>, <4 x i64>* bitcast (<3 x i64>* @A to <4 x i64>*), align 32
397404
%1 = shufflevector <4 x i64> %0, <4 x i64> undef, <3 x i32> <i32 undef, i32 undef, i32 2>

llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -140,10 +140,16 @@ define <4 x double> @demandedelts_vpermil2pd256_as_shufpd(<4 x double> %a0, <4 x
140140
; X86-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,2,3]
141141
; X86-NEXT: retl
142142
;
143-
; X64-LABEL: demandedelts_vpermil2pd256_as_shufpd:
144-
; X64: # %bb.0:
145-
; X64-NEXT: vpermil2pd {{.*#+}} ymm0 = ymm1[0,0],ymm0[3],ymm1[3]
146-
; X64-NEXT: retq
143+
; X64-AVX-LABEL: demandedelts_vpermil2pd256_as_shufpd:
144+
; X64-AVX: # %bb.0:
145+
; X64-AVX-NEXT: vpermil2pd {{.*#+}} ymm0 = ymm1[0,0],ymm0[3],ymm1[3]
146+
; X64-AVX-NEXT: retq
147+
;
148+
; X64-AVX2-LABEL: demandedelts_vpermil2pd256_as_shufpd:
149+
; X64-AVX2: # %bb.0:
150+
; X64-AVX2-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3]
151+
; X64-AVX2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,2,3]
152+
; X64-AVX2-NEXT: retq
147153
%res0 = insertelement <4 x i64> <i64 0, i64 4, i64 2, i64 7>, i64 %a2, i32 0
148154
%res1 = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x i64> %res0, i8 0)
149155
%res2 = shufflevector <4 x double> %res1, <4 x double> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 3>

0 commit comments

Comments
 (0)