Skip to content

Commit 927da1b

Browse files
committed
[X86][AVX] Fix handling of out-of-bounds shift amounts in AVX2 vector shift nodes #83840
1 parent 6e27dd4 commit 927da1b

File tree

2 files changed

+39
-0
lines changed

2 files changed

+39
-0
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47291,6 +47291,16 @@ static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
4729147291
if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
4729247292
return V;
4729347293

47294+
APInt ShiftAmt;
47295+
SDNode *UMinNode = N1.getNode();
47296+
if (UMinNode->getOpcode() == ISD::UMIN &&
47297+
ISD::isConstantSplatVector(UMinNode->getOperand(1).getNode(), ShiftAmt) &&
47298+
ShiftAmt == VT.getScalarSizeInBits() - 1) {
47299+
SDValue ShrAmtVal = UMinNode->getOperand(0);
47300+
SDLoc DL(N);
47301+
return DAG.getNode(N->getOpcode(), DL, N->getVTList(), N0, ShrAmtVal);
47302+
}
47303+
4729447304
// fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
4729547305
// into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
4729647306
// into (lshr, (sext (a), SarConst - [56,48,32,24,16]))

llvm/test/CodeGen/X86/combine-sra.ll

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -382,3 +382,32 @@ define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) {
382382
%2 = ashr <4 x i32> %1, <i32 10, i32 10, i32 10, i32 10>
383383
ret <4 x i32> %2
384384
}
385+
386+
define <4 x i32> @combine_vec_ashr_out_of_bound(<4 x i32> %x, <4 x i32> %y) {
387+
; SSE-LABEL: combine_vec_ashr_out_of_bound:
388+
; SSE: # %bb.0:
389+
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
390+
; SSE-NEXT: movdqa %xmm0, %xmm3
391+
; SSE-NEXT: psrad %xmm2, %xmm3
392+
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
393+
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
394+
; SSE-NEXT: movdqa %xmm0, %xmm5
395+
; SSE-NEXT: psrad %xmm4, %xmm5
396+
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
397+
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
398+
; SSE-NEXT: movdqa %xmm0, %xmm3
399+
; SSE-NEXT: psrad %xmm1, %xmm3
400+
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
401+
; SSE-NEXT: psrad %xmm1, %xmm0
402+
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
403+
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
404+
; SSE-NEXT: retq
405+
;
406+
; AVX-LABEL: combine_vec_ashr_out_of_bound:
407+
; AVX: # %bb.0:
408+
; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0
409+
; AVX-NEXT: retq
410+
%1 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %y, <4 x i32> <i32 31, i32 31, i32 31, i32 31>)
411+
%2 = ashr <4 x i32> %x, %1
412+
ret <4 x i32> %2
413+
}

0 commit comments

Comments
 (0)