Skip to content

Commit 070d1e8

Browse files
authored
[AMDGPU] Add test for fpext & fptrunc with bf16. (#85909)
Authored-by: Pravin Jagtap <[email protected]>
1 parent 211eebf commit 070d1e8

File tree

1 file changed

+344
-0
lines changed

1 file changed

+344
-0
lines changed
Lines changed: 344 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,344 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2+
; RUN: llc -march=amdgcn -mcpu=gfx940 < %s | FileCheck --check-prefixes=GCN %s
3+
4+
; TODO: Add global-isel when it can support bf16
5+
define amdgpu_ps float @v_test_cvt_bf16_f32_v(bfloat %v) {
6+
; GCN-LABEL: v_test_cvt_bf16_f32_v:
7+
; GCN: ; %bb.0:
8+
; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
9+
; GCN-NEXT: ; return to shader part epilog
10+
%cvt = fpext bfloat %v to float
11+
ret float %cvt
12+
}
13+
define amdgpu_ps float @v_test_cvt_bf16_f32_s(bfloat inreg %v) {
14+
; GCN-LABEL: v_test_cvt_bf16_f32_s:
15+
; GCN: ; %bb.0:
16+
; GCN-NEXT: s_lshl_b32 s0, s0, 16
17+
; GCN-NEXT: v_mov_b32_e32 v0, s0
18+
; GCN-NEXT: ; return to shader part epilog
19+
%cvt = fpext bfloat %v to float
20+
ret float %cvt
21+
}
22+
define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_v(<2 x float> %src) {
23+
; GCN-LABEL: v_test_cvt_v2f32_v2bf16_v:
24+
; GCN: ; %bb.0:
25+
; GCN-NEXT: v_bfe_u32 v2, v0, 16, 1
26+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
27+
; GCN-NEXT: v_add3_u32 v2, v2, v0, s0
28+
; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v0
29+
; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
30+
; GCN-NEXT: s_nop 1
31+
; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
32+
; GCN-NEXT: v_bfe_u32 v2, v1, 16, 1
33+
; GCN-NEXT: v_add3_u32 v2, v2, v1, s0
34+
; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v1
35+
; GCN-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
36+
; GCN-NEXT: s_mov_b32 s0, 0x7060302
37+
; GCN-NEXT: s_nop 0
38+
; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
39+
; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
40+
; GCN-NEXT: ; return to shader part epilog
41+
%res = fptrunc <2 x float> %src to <2 x bfloat>
42+
%cast = bitcast <2 x bfloat> %res to float
43+
ret float %cast
44+
}
45+
define amdgpu_ps float @v_test_cvt_v2f32_v2bf16_s(<2 x float> inreg %src) {
46+
; GCN-LABEL: v_test_cvt_v2f32_v2bf16_s:
47+
; GCN: ; %bb.0:
48+
; GCN-NEXT: s_bfe_u32 s2, s1, 0x10010
49+
; GCN-NEXT: s_add_i32 s2, s2, s1
50+
; GCN-NEXT: s_or_b32 s4, s1, 0x400000
51+
; GCN-NEXT: s_add_i32 s5, s2, 0x7fff
52+
; GCN-NEXT: v_cmp_u_f32_e64 s[2:3], s1, s1
53+
; GCN-NEXT: s_and_b64 s[2:3], s[2:3], exec
54+
; GCN-NEXT: s_cselect_b32 s2, s4, s5
55+
; GCN-NEXT: s_bfe_u32 s1, s0, 0x10010
56+
; GCN-NEXT: s_add_i32 s1, s1, s0
57+
; GCN-NEXT: s_or_b32 s3, s0, 0x400000
58+
; GCN-NEXT: s_add_i32 s4, s1, 0x7fff
59+
; GCN-NEXT: v_cmp_u_f32_e64 s[0:1], s0, s0
60+
; GCN-NEXT: s_and_b64 s[0:1], s[0:1], exec
61+
; GCN-NEXT: s_cselect_b32 s0, s3, s4
62+
; GCN-NEXT: s_pack_hh_b32_b16 s0, s0, s2
63+
; GCN-NEXT: v_mov_b32_e32 v0, s0
64+
; GCN-NEXT: ; return to shader part epilog
65+
%res = fptrunc <2 x float> %src to <2 x bfloat>
66+
%cast = bitcast <2 x bfloat> %res to float
67+
ret float %cast
68+
}
69+
define amdgpu_ps float @v_test_cvt_f32_bf16_v(float %src) {
70+
; GCN-LABEL: v_test_cvt_f32_bf16_v:
71+
; GCN: ; %bb.0:
72+
; GCN-NEXT: v_bfe_u32 v1, v0, 16, 1
73+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
74+
; GCN-NEXT: v_add3_u32 v1, v1, v0, s0
75+
; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v0
76+
; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
77+
; GCN-NEXT: s_nop 1
78+
; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
79+
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
80+
; GCN-NEXT: ; return to shader part epilog
81+
%trunc = fptrunc float %src to bfloat
82+
%ext = fpext bfloat %trunc to float
83+
ret float %ext
84+
}
85+
define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
86+
; GCN-LABEL: v_test_cvt_v2f64_v2bf16_v:
87+
; GCN: ; %bb.0:
88+
; GCN-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
89+
; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
90+
; GCN-NEXT: v_and_b32_e32 v7, 1, v6
91+
; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
92+
; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
93+
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
94+
; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
95+
; GCN-NEXT: v_add_u32_e32 v4, v6, v4
96+
; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
97+
; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
98+
; GCN-NEXT: s_brev_b32 s4, 1
99+
; GCN-NEXT: v_and_or_b32 v5, v1, s4, v4
100+
; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
101+
; GCN-NEXT: s_movk_i32 s5, 0x7fff
102+
; GCN-NEXT: v_add3_u32 v4, v4, v5, s5
103+
; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
104+
; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
105+
; GCN-NEXT: s_nop 1
106+
; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
107+
; GCN-NEXT: v_cvt_f32_f64_e64 v5, |v[2:3]|
108+
; GCN-NEXT: v_cvt_f64_f32_e32 v[0:1], v5
109+
; GCN-NEXT: v_and_b32_e32 v6, 1, v5
110+
; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[2:3]|, v[0:1]
111+
; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[2:3]|, v[0:1]
112+
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6
113+
; GCN-NEXT: v_cndmask_b32_e64 v0, -1, 1, s[2:3]
114+
; GCN-NEXT: v_add_u32_e32 v0, v5, v0
115+
; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
116+
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v5, vcc
117+
; GCN-NEXT: v_and_or_b32 v1, v3, s4, v0
118+
; GCN-NEXT: v_bfe_u32 v0, v0, 16, 1
119+
; GCN-NEXT: v_add3_u32 v0, v0, v1, s5
120+
; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
121+
; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[2:3], v[2:3]
122+
; GCN-NEXT: s_mov_b32 s0, 0x7060302
123+
; GCN-NEXT: s_nop 0
124+
; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
125+
; GCN-NEXT: v_perm_b32 v0, v0, v4, s0
126+
; GCN-NEXT: ; return to shader part epilog
127+
%res = fptrunc <2 x double> %src to <2 x bfloat>
128+
%cast = bitcast <2 x bfloat> %res to float
129+
ret float %cast
130+
}
131+
define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16(float %a, float %b) {
132+
; GCN-LABEL: fptrunc_f32_f32_to_v2bf16:
133+
; GCN: ; %bb.0: ; %entry
134+
; GCN-NEXT: v_bfe_u32 v2, v0, 16, 1
135+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
136+
; GCN-NEXT: v_add3_u32 v2, v2, v0, s0
137+
; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v0
138+
; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
139+
; GCN-NEXT: s_nop 1
140+
; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
141+
; GCN-NEXT: v_bfe_u32 v2, v1, 16, 1
142+
; GCN-NEXT: v_add3_u32 v2, v2, v1, s0
143+
; GCN-NEXT: v_or_b32_e32 v3, 0x400000, v1
144+
; GCN-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
145+
; GCN-NEXT: s_mov_b32 s0, 0x7060302
146+
; GCN-NEXT: s_nop 0
147+
; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
148+
; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
149+
; GCN-NEXT: ; return to shader part epilog
150+
entry:
151+
%a.cvt = fptrunc float %a to bfloat
152+
%b.cvt = fptrunc float %b to bfloat
153+
%v2.1 = insertelement <2 x bfloat> undef, bfloat %a.cvt, i32 0
154+
%v2.2 = insertelement <2 x bfloat> %v2.1, bfloat %b.cvt, i32 1
155+
%ret = bitcast <2 x bfloat> %v2.2 to float
156+
ret float %ret
157+
}
158+
define amdgpu_ps float @fptrunc_f32_f32_to_v2bf16_mods(float %a, float %b) {
159+
; GCN-LABEL: fptrunc_f32_f32_to_v2bf16_mods:
160+
; GCN: ; %bb.0: ; %entry
161+
; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
162+
; GCN-NEXT: v_bfe_u32 v3, v2, 16, 1
163+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
164+
; GCN-NEXT: v_add3_u32 v3, v3, v2, s0
165+
; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v2
166+
; GCN-NEXT: v_cmp_u_f32_e64 vcc, -v0, -v0
167+
; GCN-NEXT: s_nop 1
168+
; GCN-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
169+
; GCN-NEXT: v_and_b32_e32 v2, 0x7fffffff, v1
170+
; GCN-NEXT: v_bfe_u32 v3, v2, 16, 1
171+
; GCN-NEXT: v_add3_u32 v3, v3, v2, s0
172+
; GCN-NEXT: v_or_b32_e32 v2, 0x400000, v2
173+
; GCN-NEXT: v_cmp_u_f32_e64 vcc, |v1|, |v1|
174+
; GCN-NEXT: s_mov_b32 s0, 0x7060302
175+
; GCN-NEXT: s_nop 0
176+
; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
177+
; GCN-NEXT: v_perm_b32 v0, v1, v0, s0
178+
; GCN-NEXT: ; return to shader part epilog
179+
entry:
180+
%a.neg = fneg float %a
181+
%a.cvt = fptrunc float %a.neg to bfloat
182+
%b.abs = call float @llvm.fabs.f32(float %b)
183+
%b.cvt = fptrunc float %b.abs to bfloat
184+
%v2.1 = insertelement <2 x bfloat> undef, bfloat %a.cvt, i32 0
185+
%v2.2 = insertelement <2 x bfloat> %v2.1, bfloat %b.cvt, i32 1
186+
%ret = bitcast <2 x bfloat> %v2.2 to float
187+
ret float %ret
188+
}
189+
define amdgpu_ps void @fptrunc_f32_to_bf16(float %a, ptr %out) {
190+
; GCN-LABEL: fptrunc_f32_to_bf16:
191+
; GCN: ; %bb.0: ; %entry
192+
; GCN-NEXT: v_mov_b32_e32 v3, v2
193+
; GCN-NEXT: v_mov_b32_e32 v2, v1
194+
; GCN-NEXT: v_bfe_u32 v1, v0, 16, 1
195+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
196+
; GCN-NEXT: v_add3_u32 v1, v1, v0, s0
197+
; GCN-NEXT: v_or_b32_e32 v4, 0x400000, v0
198+
; GCN-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
199+
; GCN-NEXT: s_nop 1
200+
; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
201+
; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
202+
; GCN-NEXT: s_endpgm
203+
entry:
204+
%a.cvt = fptrunc float %a to bfloat
205+
store bfloat %a.cvt, ptr %out
206+
ret void
207+
}
208+
define amdgpu_ps void @fptrunc_f32_to_bf16_abs(float %a, ptr %out) {
209+
; GCN-LABEL: fptrunc_f32_to_bf16_abs:
210+
; GCN: ; %bb.0: ; %entry
211+
; GCN-NEXT: v_mov_b32_e32 v3, v2
212+
; GCN-NEXT: v_mov_b32_e32 v2, v1
213+
; GCN-NEXT: v_and_b32_e32 v1, 0x7fffffff, v0
214+
; GCN-NEXT: v_bfe_u32 v4, v1, 16, 1
215+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
216+
; GCN-NEXT: v_add3_u32 v4, v4, v1, s0
217+
; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
218+
; GCN-NEXT: v_cmp_u_f32_e64 vcc, |v0|, |v0|
219+
; GCN-NEXT: s_nop 1
220+
; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
221+
; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
222+
; GCN-NEXT: s_endpgm
223+
entry:
224+
%a.abs = call float @llvm.fabs.f32(float %a)
225+
%a.cvt = fptrunc float %a.abs to bfloat
226+
store bfloat %a.cvt, ptr %out
227+
ret void
228+
}
229+
define amdgpu_ps void @fptrunc_f32_to_bf16_neg(float %a, ptr %out) {
230+
; GCN-LABEL: fptrunc_f32_to_bf16_neg:
231+
; GCN: ; %bb.0: ; %entry
232+
; GCN-NEXT: v_mov_b32_e32 v3, v2
233+
; GCN-NEXT: v_mov_b32_e32 v2, v1
234+
; GCN-NEXT: v_xor_b32_e32 v1, 0x80000000, v0
235+
; GCN-NEXT: v_bfe_u32 v4, v1, 16, 1
236+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
237+
; GCN-NEXT: v_add3_u32 v4, v4, v1, s0
238+
; GCN-NEXT: v_or_b32_e32 v1, 0x400000, v1
239+
; GCN-NEXT: v_cmp_u_f32_e64 vcc, -v0, -v0
240+
; GCN-NEXT: s_nop 1
241+
; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
242+
; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
243+
; GCN-NEXT: s_endpgm
244+
entry:
245+
%a.neg = fneg float %a
246+
%a.cvt = fptrunc float %a.neg to bfloat
247+
store bfloat %a.cvt, ptr %out
248+
ret void
249+
}
250+
define amdgpu_ps void @fptrunc_f64_to_bf16(double %a, ptr %out) {
251+
; GCN-LABEL: fptrunc_f64_to_bf16:
252+
; GCN: ; %bb.0: ; %entry
253+
; GCN-NEXT: v_cvt_f32_f64_e64 v6, |v[0:1]|
254+
; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v6
255+
; GCN-NEXT: v_and_b32_e32 v7, 1, v6
256+
; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
257+
; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
258+
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7
259+
; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
260+
; GCN-NEXT: v_add_u32_e32 v4, v6, v4
261+
; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
262+
; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
263+
; GCN-NEXT: s_brev_b32 s0, 1
264+
; GCN-NEXT: v_and_or_b32 v5, v1, s0, v4
265+
; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
266+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
267+
; GCN-NEXT: v_add3_u32 v4, v4, v5, s0
268+
; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
269+
; GCN-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
270+
; GCN-NEXT: s_nop 1
271+
; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
272+
; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
273+
; GCN-NEXT: s_endpgm
274+
entry:
275+
%a.cvt = fptrunc double %a to bfloat
276+
store bfloat %a.cvt, ptr %out
277+
ret void
278+
}
279+
define amdgpu_ps void @fptrunc_f64_to_bf16_neg(double %a, ptr %out) {
280+
; GCN-LABEL: fptrunc_f64_to_bf16_neg:
281+
; GCN: ; %bb.0: ; %entry
282+
; GCN-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
283+
; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
284+
; GCN-NEXT: v_and_b32_e32 v8, 1, v7
285+
; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
286+
; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
287+
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
288+
; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
289+
; GCN-NEXT: v_add_u32_e32 v4, v7, v4
290+
; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
291+
; GCN-NEXT: s_brev_b32 s4, 1
292+
; GCN-NEXT: v_xor_b32_e32 v6, 0x80000000, v1
293+
; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
294+
; GCN-NEXT: v_and_or_b32 v5, v6, s4, v4
295+
; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
296+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
297+
; GCN-NEXT: v_add3_u32 v4, v4, v5, s0
298+
; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
299+
; GCN-NEXT: v_cmp_u_f64_e64 vcc, -v[0:1], -v[0:1]
300+
; GCN-NEXT: s_nop 1
301+
; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
302+
; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
303+
; GCN-NEXT: s_endpgm
304+
entry:
305+
%a.neg = fneg double %a
306+
%a.cvt = fptrunc double %a.neg to bfloat
307+
store bfloat %a.cvt, ptr %out
308+
ret void
309+
}
310+
define amdgpu_ps void @fptrunc_f64_to_bf16_abs(double %a, ptr %out) {
311+
; GCN-LABEL: fptrunc_f64_to_bf16_abs:
312+
; GCN: ; %bb.0: ; %entry
313+
; GCN-NEXT: v_cvt_f32_f64_e64 v7, |v[0:1]|
314+
; GCN-NEXT: v_cvt_f64_f32_e32 v[4:5], v7
315+
; GCN-NEXT: v_and_b32_e32 v8, 1, v7
316+
; GCN-NEXT: v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, v[4:5]
317+
; GCN-NEXT: v_cmp_nlg_f64_e64 s[0:1], |v[0:1]|, v[4:5]
318+
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8
319+
; GCN-NEXT: v_cndmask_b32_e64 v4, -1, 1, s[2:3]
320+
; GCN-NEXT: v_add_u32_e32 v4, v7, v4
321+
; GCN-NEXT: s_or_b64 vcc, s[0:1], vcc
322+
; GCN-NEXT: v_and_b32_e32 v6, 0x7fffffff, v1
323+
; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
324+
; GCN-NEXT: s_brev_b32 s0, 1
325+
; GCN-NEXT: v_and_or_b32 v5, v6, s0, v4
326+
; GCN-NEXT: v_bfe_u32 v4, v4, 16, 1
327+
; GCN-NEXT: s_movk_i32 s0, 0x7fff
328+
; GCN-NEXT: v_add3_u32 v4, v4, v5, s0
329+
; GCN-NEXT: v_or_b32_e32 v5, 0x400000, v5
330+
; GCN-NEXT: v_cmp_u_f64_e64 vcc, |v[0:1]|, |v[0:1]|
331+
; GCN-NEXT: s_nop 1
332+
; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
333+
; GCN-NEXT: flat_store_short_d16_hi v[2:3], v0 sc0 sc1
334+
; GCN-NEXT: s_endpgm
335+
entry:
336+
%a.abs = call double @llvm.fabs.f64(double %a)
337+
%a.cvt = fptrunc double %a.abs to bfloat
338+
store bfloat %a.cvt, ptr %out
339+
ret void
340+
}
341+
342+
declare float @llvm.fabs.f32(float)
343+
declare double @llvm.fabs.f64(double)
344+

0 commit comments

Comments
 (0)