Skip to content
This repository was archived by the owner on Mar 28, 2020. It is now read-only.

Commit 42694a3

Browse files
author
Zvi Rackover
committed
[X86] Prefer reduced width multiplication over pmulld on Silvermont
Summary: Prefer expansions such as: pmullw,pmulhw,unpacklwd,unpackhwd over pmulld. On Silvermont [source: Optimization Reference Manual]: PMULLD has a throughput of 1/11 [instruction/cycles]. PMULHUW/PMULHW/PMULLW have a throughput of 1/2 [instruction/cycles]. Fixes pr31202. Analysis of this issue was done by Fahana Aleen. Reviewers: wmi, delena, mkuper Subscribers: RKSimon, llvm-commits Differential Revision: https://reviews.llvm.org/D27203 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@288844 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent 6e9255f commit 42694a3

File tree

5 files changed

+93
-3
lines changed

5 files changed

+93
-3
lines changed

lib/Target/X86/X86.td

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,8 @@ def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
9999
"Bit testing of memory is slow">;
100100
def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true",
101101
"SHLD instruction is slow">;
102+
def FeatureSlowPMULLD : SubtargetFeature<"slow-pmulld", "IsPMULLDSlow", "true",
103+
"PMULLD instruction is slow">;
102104
// FIXME: This should not apply to CPUs that do not have SSE.
103105
def FeatureSlowUAMem16 : SubtargetFeature<"slow-unaligned-mem-16",
104106
"IsUAMem16Slow", "true",
@@ -403,6 +405,7 @@ class SilvermontProc<string Name> : ProcessorModel<Name, SLMModel, [
403405
FeatureSlowLEA,
404406
FeatureSlowIncDec,
405407
FeatureSlowBTMem,
408+
FeatureSlowPMULLD,
406409
FeatureLAHFSAHF
407410
]>;
408411
def : SilvermontProc<"silvermont">;

lib/Target/X86/X86ISelLowering.cpp

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29302,10 +29302,17 @@ static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
2930229302
/// generate pmullw+pmulhuw for it (MULU16 mode).
2930329303
static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
2930429304
const X86Subtarget &Subtarget) {
29305-
// pmulld is supported since SSE41. It is better to use pmulld
29306-
// instead of pmullw+pmulhw.
29305+
// Check for legality
2930729306
// pmullw/pmulhw are not supported by SSE.
29308-
if (Subtarget.hasSSE41() || !Subtarget.hasSSE2())
29307+
if (!Subtarget.hasSSE2())
29308+
return SDValue();
29309+
29310+
// Check for profitability
29311+
// pmulld is supported since SSE41. It is better to use pmulld
29312+
// instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
29313+
// the expansion.
29314+
bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
29315+
if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
2930929316
return SDValue();
2931029317

2931129318
ShrinkMode Mode;

lib/Target/X86/X86Subtarget.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,9 @@ void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
228228
else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
229229
isTargetKFreeBSD() || In64BitMode)
230230
stackAlignment = 16;
231+
232+
assert((!isPMULLDSlow() || hasSSE41()) &&
233+
"Feature Slow PMULLD can only be set on a subtarget with SSE4.1");
231234
}
232235

233236
void X86Subtarget::initializeEnvironment() {
@@ -275,6 +278,7 @@ void X86Subtarget::initializeEnvironment() {
275278
HasMWAITX = false;
276279
HasMPX = false;
277280
IsBTMemSlow = false;
281+
IsPMULLDSlow = false;
278282
IsSHLDSlow = false;
279283
IsUAMem16Slow = false;
280284
IsUAMem32Slow = false;

lib/Target/X86/X86Subtarget.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,10 @@ class X86Subtarget final : public X86GenSubtargetInfo {
178178
/// True if SHLD instructions are slow.
179179
bool IsSHLDSlow;
180180

181+
/// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and
182+
// PMULUDQ.
183+
bool IsPMULLDSlow;
184+
181185
/// True if unaligned memory accesses of 16-bytes are slow.
182186
bool IsUAMem16Slow;
183187

@@ -452,6 +456,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
452456
bool hasMWAITX() const { return HasMWAITX; }
453457
bool isBTMemSlow() const { return IsBTMemSlow; }
454458
bool isSHLDSlow() const { return IsSHLDSlow; }
459+
bool isPMULLDSlow() const { return IsPMULLDSlow; }
455460
bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
456461
bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
457462
bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }

test/CodeGen/X86/slow-pmulld.ll

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK32
3+
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK64
4+
; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-32
5+
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-64
6+
7+
define <4 x i32> @foo(<4 x i8> %A) {
8+
; CHECK32-LABEL: foo:
9+
; CHECK32: # BB#0:
10+
; CHECK32-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
11+
; CHECK32-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
12+
; CHECK32-NEXT: movdqa %xmm0, %xmm2
13+
; CHECK32-NEXT: pmullw %xmm1, %xmm0
14+
; CHECK32-NEXT: pmulhw %xmm1, %xmm2
15+
; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
16+
; CHECK32-NEXT: retl
17+
;
18+
; CHECK64-LABEL: foo:
19+
; CHECK64: # BB#0:
20+
; CHECK64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
21+
; CHECK64-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
22+
; CHECK64-NEXT: movdqa %xmm0, %xmm2
23+
; CHECK64-NEXT: pmullw %xmm1, %xmm0
24+
; CHECK64-NEXT: pmulhw %xmm1, %xmm2
25+
; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
26+
; CHECK64-NEXT: retq
27+
;
28+
; SSE4-32-LABEL: foo:
29+
; SSE4-32: # BB#0:
30+
; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0
31+
; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
32+
; SSE4-32-NEXT: retl
33+
;
34+
; SSE4-64-LABEL: foo:
35+
; SSE4-64: # BB#0:
36+
; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0
37+
; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0
38+
; SSE4-64-NEXT: retq
39+
%z = zext <4 x i8> %A to <4 x i32>
40+
%m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778>
41+
ret <4 x i32> %m
42+
}
43+
44+
define <4 x i32> @foo_os(<4 x i8> %A) minsize {
45+
; CHECK32-LABEL: foo_os:
46+
; CHECK32: # BB#0:
47+
; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm0
48+
; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
49+
; CHECK32-NEXT: retl
50+
;
51+
; CHECK64-LABEL: foo_os:
52+
; CHECK64: # BB#0:
53+
; CHECK64-NEXT: pand {{.*}}(%rip), %xmm0
54+
; CHECK64-NEXT: pmulld {{.*}}(%rip), %xmm0
55+
; CHECK64-NEXT: retq
56+
;
57+
; SSE4-32-LABEL: foo_os:
58+
; SSE4-32: # BB#0:
59+
; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0
60+
; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
61+
; SSE4-32-NEXT: retl
62+
;
63+
; SSE4-64-LABEL: foo_os:
64+
; SSE4-64: # BB#0:
65+
; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0
66+
; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0
67+
; SSE4-64-NEXT: retq
68+
%z = zext <4 x i8> %A to <4 x i32>
69+
%m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778>
70+
ret <4 x i32> %m
71+
}

0 commit comments

Comments
 (0)