Skip to content

Commit f48f6a5

Browse files
committed
[X86][AVX] Fix handling of out-of-bounds shift amounts in AVX2 vector shift nodes #83840
1 parent 6e27dd4 commit f48f6a5

File tree

2 files changed

+44
-0
lines changed

2 files changed

+44
-0
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28927,6 +28927,9 @@ SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
2892728927
// supported by the Subtarget
2892828928
static bool supportedVectorShiftWithImm(EVT VT, const X86Subtarget &Subtarget,
2892928929
unsigned Opcode) {
28930+
assert(Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL &&
28931+
"Unexpected Opcode!");
28932+
2893028933
if (!VT.isSimple())
2893128934
return false;
2893228935

@@ -47291,6 +47294,17 @@ static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
4729147294
if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
4729247295
return V;
4729347296

47297+
APInt ShiftAmt;
47298+
SDNode *UMinNode = N1.getNode();
47299+
if (supportedVectorVarShift(VT, Subtarget, ISD::SRA) &&
47300+
UMinNode->getOpcode() == ISD::UMIN &&
47301+
ISD::isConstantSplatVector(UMinNode->getOperand(1).getNode(), ShiftAmt) &&
47302+
ShiftAmt == VT.getScalarSizeInBits() - 1) {
47303+
SDValue ShrAmtVal = UMinNode->getOperand(0);
47304+
SDLoc DL(N);
47305+
return DAG.getNode(X86ISD::VSRAV, DL, N->getVTList(), N0, ShrAmtVal);
47306+
}
47307+
4729447308
// fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
4729547309
// into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
4729647310
// into (lshr, (sext (a), SarConst - [56,48,32,24,16]))

llvm/test/CodeGen/X86/combine-sra.ll

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -382,3 +382,33 @@ define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) {
382382
%2 = ashr <4 x i32> %1, <i32 10, i32 10, i32 10, i32 10>
383383
ret <4 x i32> %2
384384
}
385+
386+
define <4 x i32> @combine_vec_ashr_out_of_bound(<4 x i32> %x, <4 x i32> %y) {
387+
; SSE-LABEL: combine_vec_ashr_out_of_bound:
388+
; SSE: # %bb.0:
389+
; SSE-NEXT: pminud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
390+
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
391+
; SSE-NEXT: movdqa %xmm0, %xmm3
392+
; SSE-NEXT: psrad %xmm2, %xmm3
393+
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
394+
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
395+
; SSE-NEXT: movdqa %xmm0, %xmm5
396+
; SSE-NEXT: psrad %xmm4, %xmm5
397+
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
398+
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
399+
; SSE-NEXT: movdqa %xmm0, %xmm3
400+
; SSE-NEXT: psrad %xmm1, %xmm3
401+
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
402+
; SSE-NEXT: psrad %xmm1, %xmm0
403+
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
404+
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
405+
; SSE-NEXT: retq
406+
;
407+
; AVX-LABEL: combine_vec_ashr_out_of_bound:
408+
; AVX: # %bb.0:
409+
; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0
410+
; AVX-NEXT: retq
411+
%1 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %y, <4 x i32> <i32 31, i32 31, i32 31, i32 31>)
412+
%2 = ashr <4 x i32> %x, %1
413+
ret <4 x i32> %2
414+
}

0 commit comments

Comments
 (0)