Skip to content

Commit 366e62a

Browse files
[X86] Combine uitofp <v x i32> to <v x half> (#121809)
Closes #121793
1 parent c1d01b2 commit 366e62a

File tree

2 files changed

+223
-0
lines changed

2 files changed

+223
-0
lines changed

llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1777,6 +1777,31 @@ void VectorLegalizer::ExpandUINT_TO_FLOAT(SDNode *Node,
17771777
assert((BW == 64 || BW == 32) &&
17781778
"Elements in vector-UINT_TO_FP must be 32 or 64 bits wide");
17791779

1780+
// If STRICT_/FMUL is not supported by the target (in case of f16) replace the
1781+
// UINT_TO_FP with a larger float and round to the smaller type
1782+
if ((!IsStrict && !TLI.isOperationLegalOrCustom(ISD::FMUL, DstVT)) ||
1783+
(IsStrict && !TLI.isOperationLegalOrCustom(ISD::STRICT_FMUL, DstVT))) {
1784+
EVT FPVT = BW == 32 ? MVT::f32 : MVT::f64;
1785+
SDValue UIToFP;
1786+
SDValue Result;
1787+
SDValue TargetZero = DAG.getIntPtrConstant(0, DL, /*isTarget=*/true);
1788+
EVT FloatVecVT = SrcVT.changeVectorElementType(FPVT);
1789+
if (IsStrict) {
1790+
UIToFP = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {FloatVecVT, MVT::Other},
1791+
{Node->getOperand(0), Src});
1792+
Result = DAG.getNode(ISD::STRICT_FP_ROUND, DL, {DstVT, MVT::Other},
1793+
{Node->getOperand(0), UIToFP, TargetZero});
1794+
Results.push_back(Result);
1795+
Results.push_back(Result.getValue(1));
1796+
} else {
1797+
UIToFP = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVecVT, Src);
1798+
Result = DAG.getNode(ISD::FP_ROUND, DL, DstVT, UIToFP, TargetZero);
1799+
Results.push_back(Result);
1800+
}
1801+
1802+
return;
1803+
}
1804+
17801805
SDValue HalfWord = DAG.getConstant(BW / 2, DL, SrcVT);
17811806

17821807
// Constants to clear the upper part of the word.

llvm/test/CodeGen/X86/uint_to_half.ll

Lines changed: 198 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,198 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx,+f16c | FileCheck %s -check-prefixes=AVX1
3+
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+f16c | FileCheck %s -check-prefixes=AVX2
4+
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s -check-prefixes=AVX512
5+
6+
define <8 x half> @test_uitofp_v8i32_v8f16(<8 x i32> %a) {
7+
; AVX1-LABEL: test_uitofp_v8i32_v8f16:
8+
; AVX1: # %bb.0:
9+
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
10+
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
11+
; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
12+
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
13+
; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1
14+
; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
15+
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
16+
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
17+
; AVX1-NEXT: vaddps %ymm0, %ymm1, %ymm0
18+
; AVX1-NEXT: vcvtps2ph $4, %ymm0, %xmm0
19+
; AVX1-NEXT: vzeroupper
20+
; AVX1-NEXT: retq
21+
;
22+
; AVX2-LABEL: test_uitofp_v8i32_v8f16:
23+
; AVX2: # %bb.0:
24+
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
25+
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
26+
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
27+
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
28+
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
29+
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
30+
; AVX2-NEXT: vsubps %ymm2, %ymm0, %ymm0
31+
; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
32+
; AVX2-NEXT: vcvtps2ph $4, %ymm0, %xmm0
33+
; AVX2-NEXT: vzeroupper
34+
; AVX2-NEXT: retq
35+
;
36+
; AVX512-LABEL: test_uitofp_v8i32_v8f16:
37+
; AVX512: # %bb.0:
38+
; AVX512-NEXT: vcvtudq2ps %ymm0, %ymm0
39+
; AVX512-NEXT: vcvtps2ph $4, %ymm0, %xmm0
40+
; AVX512-NEXT: vzeroupper
41+
; AVX512-NEXT: retq
42+
%vec = uitofp <8 x i32> %a to <8 x half>
43+
ret <8 x half> %vec
44+
}
45+
46+
define <8 x half> @test_strict_uitofp_v8i32_v8f16(<8 x i32> %a) {
47+
; AVX1-LABEL: test_strict_uitofp_v8i32_v8f16:
48+
; AVX1: # %bb.0:
49+
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
50+
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
51+
; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
52+
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
53+
; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1
54+
; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
55+
; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
56+
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
57+
; AVX1-NEXT: vaddps %ymm0, %ymm1, %ymm0
58+
; AVX1-NEXT: vcvtps2ph $4, %ymm0, %xmm0
59+
; AVX1-NEXT: vzeroupper
60+
; AVX1-NEXT: retq
61+
;
62+
; AVX2-LABEL: test_strict_uitofp_v8i32_v8f16:
63+
; AVX2: # %bb.0:
64+
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
65+
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
66+
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
67+
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
68+
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
69+
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
70+
; AVX2-NEXT: vsubps %ymm2, %ymm0, %ymm0
71+
; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
72+
; AVX2-NEXT: vcvtps2ph $4, %ymm0, %xmm0
73+
; AVX2-NEXT: vzeroupper
74+
; AVX2-NEXT: retq
75+
;
76+
; AVX512-LABEL: test_strict_uitofp_v8i32_v8f16:
77+
; AVX512: # %bb.0:
78+
; AVX512-NEXT: vcvtudq2ps %ymm0, %ymm0
79+
; AVX512-NEXT: vcvtps2ph $4, %ymm0, %xmm0
80+
; AVX512-NEXT: vzeroupper
81+
; AVX512-NEXT: retq
82+
%vec = tail call <8 x half> @llvm.experimental.constrained.uitofp.f16.i32(<8 x i32> %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
83+
ret <8 x half> %vec
84+
}
85+
86+
define <16 x half> @test_uitofp_v16i32_v16f16(<16 x i32> %a) {
87+
; AVX1-LABEL: test_uitofp_v16i32_v16f16:
88+
; AVX1: # %bb.0:
89+
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
90+
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
91+
; AVX1-NEXT: vpsrld $16, %xmm3, %xmm3
92+
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
93+
; AVX1-NEXT: vcvtdq2ps %ymm2, %ymm2
94+
; AVX1-NEXT: vbroadcastss {{.*#+}} ymm3 = [6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4]
95+
; AVX1-NEXT: vmulps %ymm3, %ymm2, %ymm2
96+
; AVX1-NEXT: vbroadcastss {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,65535,65535,65535]
97+
; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
98+
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
99+
; AVX1-NEXT: vaddps %ymm0, %ymm2, %ymm0
100+
; AVX1-NEXT: vcvtps2ph $4, %ymm0, %xmm0
101+
; AVX1-NEXT: vpsrld $16, %xmm1, %xmm2
102+
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
103+
; AVX1-NEXT: vpsrld $16, %xmm5, %xmm5
104+
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
105+
; AVX1-NEXT: vcvtdq2ps %ymm2, %ymm2
106+
; AVX1-NEXT: vmulps %ymm3, %ymm2, %ymm2
107+
; AVX1-NEXT: vandps %ymm4, %ymm1, %ymm1
108+
; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1
109+
; AVX1-NEXT: vaddps %ymm1, %ymm2, %ymm1
110+
; AVX1-NEXT: vcvtps2ph $4, %ymm1, %xmm1
111+
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
112+
; AVX1-NEXT: retq
113+
;
114+
; AVX2-LABEL: test_uitofp_v16i32_v16f16:
115+
; AVX2: # %bb.0:
116+
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
117+
; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
118+
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
119+
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm4 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
120+
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
121+
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm5 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
122+
; AVX2-NEXT: vsubps %ymm5, %ymm0, %ymm0
123+
; AVX2-NEXT: vaddps %ymm0, %ymm3, %ymm0
124+
; AVX2-NEXT: vcvtps2ph $4, %ymm0, %xmm0
125+
; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
126+
; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
127+
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
128+
; AVX2-NEXT: vsubps %ymm5, %ymm1, %ymm1
129+
; AVX2-NEXT: vaddps %ymm1, %ymm2, %ymm1
130+
; AVX2-NEXT: vcvtps2ph $4, %ymm1, %xmm1
131+
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
132+
; AVX2-NEXT: retq
133+
;
134+
; AVX512-LABEL: test_uitofp_v16i32_v16f16:
135+
; AVX512: # %bb.0:
136+
; AVX512-NEXT: vcvtudq2ps %zmm0, %zmm0
137+
; AVX512-NEXT: vcvtps2ph $4, %zmm0, %ymm0
138+
; AVX512-NEXT: retq
139+
%vec = uitofp <16 x i32> %a to <16 x half>
140+
ret <16 x half> %vec
141+
}
142+
143+
define <16 x half> @test_strict_uitofp_v16i32_v16f16(<16 x i32> %a) {
144+
; AVX1-LABEL: test_strict_uitofp_v16i32_v16f16:
145+
; AVX1: # %bb.0:
146+
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
147+
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
148+
; AVX1-NEXT: vpsrld $16, %xmm3, %xmm3
149+
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
150+
; AVX1-NEXT: vcvtdq2ps %ymm2, %ymm2
151+
; AVX1-NEXT: vbroadcastss {{.*#+}} ymm3 = [6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4]
152+
; AVX1-NEXT: vmulps %ymm3, %ymm2, %ymm2
153+
; AVX1-NEXT: vbroadcastss {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,65535,65535,65535]
154+
; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
155+
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
156+
; AVX1-NEXT: vaddps %ymm0, %ymm2, %ymm0
157+
; AVX1-NEXT: vcvtps2ph $4, %ymm0, %xmm0
158+
; AVX1-NEXT: vpsrld $16, %xmm1, %xmm2
159+
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
160+
; AVX1-NEXT: vpsrld $16, %xmm5, %xmm5
161+
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
162+
; AVX1-NEXT: vcvtdq2ps %ymm2, %ymm2
163+
; AVX1-NEXT: vmulps %ymm3, %ymm2, %ymm2
164+
; AVX1-NEXT: vandps %ymm4, %ymm1, %ymm1
165+
; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1
166+
; AVX1-NEXT: vaddps %ymm1, %ymm2, %ymm1
167+
; AVX1-NEXT: vcvtps2ph $4, %ymm1, %xmm1
168+
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
169+
; AVX1-NEXT: retq
170+
;
171+
; AVX2-LABEL: test_strict_uitofp_v16i32_v16f16:
172+
; AVX2: # %bb.0:
173+
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
174+
; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
175+
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
176+
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm4 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
177+
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
178+
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm5 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
179+
; AVX2-NEXT: vsubps %ymm5, %ymm0, %ymm0
180+
; AVX2-NEXT: vaddps %ymm0, %ymm3, %ymm0
181+
; AVX2-NEXT: vcvtps2ph $4, %ymm0, %xmm0
182+
; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
183+
; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
184+
; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
185+
; AVX2-NEXT: vsubps %ymm5, %ymm1, %ymm1
186+
; AVX2-NEXT: vaddps %ymm1, %ymm2, %ymm1
187+
; AVX2-NEXT: vcvtps2ph $4, %ymm1, %xmm1
188+
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
189+
; AVX2-NEXT: retq
190+
;
191+
; AVX512-LABEL: test_strict_uitofp_v16i32_v16f16:
192+
; AVX512: # %bb.0:
193+
; AVX512-NEXT: vcvtudq2ps %zmm0, %zmm0
194+
; AVX512-NEXT: vcvtps2ph $4, %zmm0, %ymm0
195+
; AVX512-NEXT: retq
196+
%vec = tail call <16 x half> @llvm.experimental.constrained.uitofp.f16.i32(<16 x i32> %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
197+
ret <16 x half> %vec
198+
}

0 commit comments

Comments
 (0)