Skip to content

Commit 768b0b4

Browse files
authored
[RISCV] Add test cases for RV64 i128<->half/float/double (llvm#115124)
These emit 'ti' libcalls.
1 parent 381156c commit 768b0b4

File tree

6 files changed

+1266
-0
lines changed

6 files changed

+1266
-0
lines changed
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=riscv64 -verify-machineinstrs -target-abi=lp64 \
3+
; RUN: -disable-strictnode-mutation < %s | FileCheck %s -check-prefixes=CHECK,RV64I
4+
; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs -target-abi=lp64d \
5+
; RUN: -disable-strictnode-mutation < %s | FileCheck %s -check-prefixes=CHECK,RV64ID
6+
; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs -target-abi=lp64 \
7+
; RUN: -disable-strictnode-mutation < %s | FileCheck %s -check-prefixes=CHECK,RV64IDINX
8+
9+
define i128 @fptosi_f64_to_i128(double %a) nounwind strictfp {
10+
; CHECK-LABEL: fptosi_f64_to_i128:
11+
; CHECK: # %bb.0:
12+
; CHECK-NEXT: addi sp, sp, -16
13+
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
14+
; CHECK-NEXT: call __fixdfti
15+
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
16+
; CHECK-NEXT: addi sp, sp, 16
17+
; CHECK-NEXT: ret
18+
%1 = call i128 @llvm.experimental.constrained.fptosi.i128.f64(double %a, metadata !"fpexcept.strict")
19+
ret i128 %1
20+
}
21+
22+
define i128 @fptoui_f64_to_i128(double %a) nounwind strictfp {
23+
; CHECK-LABEL: fptoui_f64_to_i128:
24+
; CHECK: # %bb.0:
25+
; CHECK-NEXT: addi sp, sp, -16
26+
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
27+
; CHECK-NEXT: call __fixunsdfti
28+
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
29+
; CHECK-NEXT: addi sp, sp, 16
30+
; CHECK-NEXT: ret
31+
%1 = call i128 @llvm.experimental.constrained.fptoui.i128.f64(double %a, metadata !"fpexcept.strict")
32+
ret i128 %1
33+
}
34+
35+
define double @sitofp_i128_to_f64(i128 %a) nounwind strictfp {
36+
; CHECK-LABEL: sitofp_i128_to_f64:
37+
; CHECK: # %bb.0:
38+
; CHECK-NEXT: addi sp, sp, -16
39+
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
40+
; CHECK-NEXT: call __floattidf
41+
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
42+
; CHECK-NEXT: addi sp, sp, 16
43+
; CHECK-NEXT: ret
44+
%1 = call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
45+
ret double %1
46+
}
47+
48+
define double @uitofp_i128_to_f64(i128 %a) nounwind strictfp {
49+
; CHECK-LABEL: uitofp_i128_to_f64:
50+
; CHECK: # %bb.0:
51+
; CHECK-NEXT: addi sp, sp, -16
52+
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
53+
; CHECK-NEXT: call __floatuntidf
54+
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
55+
; CHECK-NEXT: addi sp, sp, 16
56+
; CHECK-NEXT: ret
57+
%1 = call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
58+
ret double %1
59+
}
60+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
61+
; RV64I: {{.*}}
62+
; RV64ID: {{.*}}
63+
; RV64IDINX: {{.*}}
Lines changed: 286 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,286 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3+
; RUN: -target-abi=lp64 | FileCheck %s -check-prefixes=CHECK,RV64I
4+
; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
5+
; RUN: -target-abi=lp64d | FileCheck %s -check-prefixes=CHECK,RV64ID
6+
; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
7+
; RUN: -target-abi=lp64 | FileCheck %s -check-prefixes=CHECK,RV64IDINX
8+
9+
define i128 @fptosi_f64_to_i128(double %a) nounwind {
10+
; CHECK-LABEL: fptosi_f64_to_i128:
11+
; CHECK: # %bb.0:
12+
; CHECK-NEXT: addi sp, sp, -16
13+
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
14+
; CHECK-NEXT: call __fixdfti
15+
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
16+
; CHECK-NEXT: addi sp, sp, 16
17+
; CHECK-NEXT: ret
18+
%1 = fptosi double %a to i128
19+
ret i128 %1
20+
}
21+
22+
define i128 @fptoui_f64_to_i128(double %a) nounwind {
23+
; CHECK-LABEL: fptoui_f64_to_i128:
24+
; CHECK: # %bb.0:
25+
; CHECK-NEXT: addi sp, sp, -16
26+
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
27+
; CHECK-NEXT: call __fixunsdfti
28+
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
29+
; CHECK-NEXT: addi sp, sp, 16
30+
; CHECK-NEXT: ret
31+
%1 = fptoui double %a to i128
32+
ret i128 %1
33+
}
34+
35+
define double @sitofp_i128_to_f64(i128 %a) nounwind {
36+
; CHECK-LABEL: sitofp_i128_to_f64:
37+
; CHECK: # %bb.0:
38+
; CHECK-NEXT: addi sp, sp, -16
39+
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
40+
; CHECK-NEXT: call __floattidf
41+
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
42+
; CHECK-NEXT: addi sp, sp, 16
43+
; CHECK-NEXT: ret
44+
%1 = sitofp i128 %a to double
45+
ret double %1
46+
}
47+
48+
define double @uitofp_i128_to_f64(i128 %a) nounwind {
49+
; CHECK-LABEL: uitofp_i128_to_f64:
50+
; CHECK: # %bb.0:
51+
; CHECK-NEXT: addi sp, sp, -16
52+
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
53+
; CHECK-NEXT: call __floatuntidf
54+
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
55+
; CHECK-NEXT: addi sp, sp, 16
56+
; CHECK-NEXT: ret
57+
%1 = uitofp i128 %a to double
58+
ret double %1
59+
}
60+
61+
define i128 @fptosi_sat_f64_to_i128(double %a) nounwind {
62+
; RV64I-LABEL: fptosi_sat_f64_to_i128:
63+
; RV64I: # %bb.0:
64+
; RV64I-NEXT: addi sp, sp, -64
65+
; RV64I-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
66+
; RV64I-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
67+
; RV64I-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
68+
; RV64I-NEXT: sd s2, 32(sp) # 8-byte Folded Spill
69+
; RV64I-NEXT: sd s3, 24(sp) # 8-byte Folded Spill
70+
; RV64I-NEXT: sd s4, 16(sp) # 8-byte Folded Spill
71+
; RV64I-NEXT: sd s5, 8(sp) # 8-byte Folded Spill
72+
; RV64I-NEXT: mv s0, a0
73+
; RV64I-NEXT: li a1, -449
74+
; RV64I-NEXT: slli a1, a1, 53
75+
; RV64I-NEXT: call __gedf2
76+
; RV64I-NEXT: mv s1, a0
77+
; RV64I-NEXT: mv a0, s0
78+
; RV64I-NEXT: call __fixdfti
79+
; RV64I-NEXT: mv s2, a0
80+
; RV64I-NEXT: mv s3, a1
81+
; RV64I-NEXT: li s5, -1
82+
; RV64I-NEXT: bgez s1, .LBB4_2
83+
; RV64I-NEXT: # %bb.1:
84+
; RV64I-NEXT: slli s3, s5, 63
85+
; RV64I-NEXT: .LBB4_2:
86+
; RV64I-NEXT: li a0, 575
87+
; RV64I-NEXT: slli a0, a0, 53
88+
; RV64I-NEXT: addi a1, a0, -1
89+
; RV64I-NEXT: mv a0, s0
90+
; RV64I-NEXT: call __gtdf2
91+
; RV64I-NEXT: mv s4, a0
92+
; RV64I-NEXT: blez a0, .LBB4_4
93+
; RV64I-NEXT: # %bb.3:
94+
; RV64I-NEXT: srli s3, s5, 1
95+
; RV64I-NEXT: .LBB4_4:
96+
; RV64I-NEXT: mv a0, s0
97+
; RV64I-NEXT: mv a1, s0
98+
; RV64I-NEXT: call __unorddf2
99+
; RV64I-NEXT: snez a0, a0
100+
; RV64I-NEXT: addi a0, a0, -1
101+
; RV64I-NEXT: and a1, a0, s3
102+
; RV64I-NEXT: slti a2, s1, 0
103+
; RV64I-NEXT: addi a2, a2, -1
104+
; RV64I-NEXT: and a2, a2, s2
105+
; RV64I-NEXT: sgtz a3, s4
106+
; RV64I-NEXT: neg a3, a3
107+
; RV64I-NEXT: or a2, a3, a2
108+
; RV64I-NEXT: and a0, a0, a2
109+
; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
110+
; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
111+
; RV64I-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
112+
; RV64I-NEXT: ld s2, 32(sp) # 8-byte Folded Reload
113+
; RV64I-NEXT: ld s3, 24(sp) # 8-byte Folded Reload
114+
; RV64I-NEXT: ld s4, 16(sp) # 8-byte Folded Reload
115+
; RV64I-NEXT: ld s5, 8(sp) # 8-byte Folded Reload
116+
; RV64I-NEXT: addi sp, sp, 64
117+
; RV64I-NEXT: ret
118+
;
119+
; RV64ID-LABEL: fptosi_sat_f64_to_i128:
120+
; RV64ID: # %bb.0:
121+
; RV64ID-NEXT: addi sp, sp, -32
122+
; RV64ID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
123+
; RV64ID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
124+
; RV64ID-NEXT: fsd fs0, 8(sp) # 8-byte Folded Spill
125+
; RV64ID-NEXT: lui a0, %hi(.LCPI4_0)
126+
; RV64ID-NEXT: fld fa5, %lo(.LCPI4_0)(a0)
127+
; RV64ID-NEXT: fmv.d fs0, fa0
128+
; RV64ID-NEXT: fle.d s0, fa5, fa0
129+
; RV64ID-NEXT: call __fixdfti
130+
; RV64ID-NEXT: li a2, -1
131+
; RV64ID-NEXT: bnez s0, .LBB4_2
132+
; RV64ID-NEXT: # %bb.1:
133+
; RV64ID-NEXT: slli a1, a2, 63
134+
; RV64ID-NEXT: .LBB4_2:
135+
; RV64ID-NEXT: lui a3, %hi(.LCPI4_1)
136+
; RV64ID-NEXT: fld fa5, %lo(.LCPI4_1)(a3)
137+
; RV64ID-NEXT: flt.d a3, fa5, fs0
138+
; RV64ID-NEXT: beqz a3, .LBB4_4
139+
; RV64ID-NEXT: # %bb.3:
140+
; RV64ID-NEXT: srli a1, a2, 1
141+
; RV64ID-NEXT: .LBB4_4:
142+
; RV64ID-NEXT: feq.d a2, fs0, fs0
143+
; RV64ID-NEXT: neg a2, a2
144+
; RV64ID-NEXT: and a1, a2, a1
145+
; RV64ID-NEXT: neg a3, a3
146+
; RV64ID-NEXT: neg a4, s0
147+
; RV64ID-NEXT: and a0, a4, a0
148+
; RV64ID-NEXT: or a0, a3, a0
149+
; RV64ID-NEXT: and a0, a2, a0
150+
; RV64ID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
151+
; RV64ID-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
152+
; RV64ID-NEXT: fld fs0, 8(sp) # 8-byte Folded Reload
153+
; RV64ID-NEXT: addi sp, sp, 32
154+
; RV64ID-NEXT: ret
155+
;
156+
; RV64IDINX-LABEL: fptosi_sat_f64_to_i128:
157+
; RV64IDINX: # %bb.0:
158+
; RV64IDINX-NEXT: addi sp, sp, -32
159+
; RV64IDINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
160+
; RV64IDINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
161+
; RV64IDINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
162+
; RV64IDINX-NEXT: mv s0, a0
163+
; RV64IDINX-NEXT: li a0, -449
164+
; RV64IDINX-NEXT: slli a0, a0, 53
165+
; RV64IDINX-NEXT: fle.d s1, a0, s0
166+
; RV64IDINX-NEXT: mv a0, s0
167+
; RV64IDINX-NEXT: call __fixdfti
168+
; RV64IDINX-NEXT: li a2, -1
169+
; RV64IDINX-NEXT: bnez s1, .LBB4_2
170+
; RV64IDINX-NEXT: # %bb.1:
171+
; RV64IDINX-NEXT: slli a1, a2, 63
172+
; RV64IDINX-NEXT: .LBB4_2:
173+
; RV64IDINX-NEXT: lui a3, %hi(.LCPI4_0)
174+
; RV64IDINX-NEXT: ld a3, %lo(.LCPI4_0)(a3)
175+
; RV64IDINX-NEXT: flt.d a3, a3, s0
176+
; RV64IDINX-NEXT: beqz a3, .LBB4_4
177+
; RV64IDINX-NEXT: # %bb.3:
178+
; RV64IDINX-NEXT: srli a1, a2, 1
179+
; RV64IDINX-NEXT: .LBB4_4:
180+
; RV64IDINX-NEXT: feq.d a2, s0, s0
181+
; RV64IDINX-NEXT: neg a2, a2
182+
; RV64IDINX-NEXT: and a1, a2, a1
183+
; RV64IDINX-NEXT: neg a3, a3
184+
; RV64IDINX-NEXT: neg a4, s1
185+
; RV64IDINX-NEXT: and a0, a4, a0
186+
; RV64IDINX-NEXT: or a0, a3, a0
187+
; RV64IDINX-NEXT: and a0, a2, a0
188+
; RV64IDINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
189+
; RV64IDINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
190+
; RV64IDINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
191+
; RV64IDINX-NEXT: addi sp, sp, 32
192+
; RV64IDINX-NEXT: ret
193+
%1 = tail call i128 @llvm.fptosi.sat.i128.f64(double %a)
194+
ret i128 %1
195+
}
196+
declare i128 @llvm.fptosi.sat.i128.f64(double)
197+
198+
define i128 @fptoui_sat_f64_to_i128(double %a) nounwind {
199+
; RV64I-LABEL: fptoui_sat_f64_to_i128:
200+
; RV64I: # %bb.0:
201+
; RV64I-NEXT: addi sp, sp, -48
202+
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
203+
; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
204+
; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
205+
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
206+
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
207+
; RV64I-NEXT: mv s0, a0
208+
; RV64I-NEXT: li a1, 0
209+
; RV64I-NEXT: call __gedf2
210+
; RV64I-NEXT: slti a0, a0, 0
211+
; RV64I-NEXT: addi s2, a0, -1
212+
; RV64I-NEXT: mv a0, s0
213+
; RV64I-NEXT: call __fixunsdfti
214+
; RV64I-NEXT: mv s1, a1
215+
; RV64I-NEXT: and s3, s2, a0
216+
; RV64I-NEXT: li a0, 1151
217+
; RV64I-NEXT: slli a0, a0, 52
218+
; RV64I-NEXT: addi a1, a0, -1
219+
; RV64I-NEXT: mv a0, s0
220+
; RV64I-NEXT: call __gtdf2
221+
; RV64I-NEXT: sgtz a0, a0
222+
; RV64I-NEXT: neg a1, a0
223+
; RV64I-NEXT: or a0, a1, s3
224+
; RV64I-NEXT: and a2, s2, s1
225+
; RV64I-NEXT: or a1, a1, a2
226+
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
227+
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
228+
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
229+
; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload
230+
; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload
231+
; RV64I-NEXT: addi sp, sp, 48
232+
; RV64I-NEXT: ret
233+
;
234+
; RV64ID-LABEL: fptoui_sat_f64_to_i128:
235+
; RV64ID: # %bb.0:
236+
; RV64ID-NEXT: addi sp, sp, -32
237+
; RV64ID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
238+
; RV64ID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
239+
; RV64ID-NEXT: fsd fs0, 8(sp) # 8-byte Folded Spill
240+
; RV64ID-NEXT: fmv.d fs0, fa0
241+
; RV64ID-NEXT: fmv.d.x fa5, zero
242+
; RV64ID-NEXT: fle.d a0, fa5, fa0
243+
; RV64ID-NEXT: neg s0, a0
244+
; RV64ID-NEXT: call __fixunsdfti
245+
; RV64ID-NEXT: lui a2, %hi(.LCPI5_0)
246+
; RV64ID-NEXT: fld fa5, %lo(.LCPI5_0)(a2)
247+
; RV64ID-NEXT: and a0, s0, a0
248+
; RV64ID-NEXT: flt.d a2, fa5, fs0
249+
; RV64ID-NEXT: neg a2, a2
250+
; RV64ID-NEXT: or a0, a2, a0
251+
; RV64ID-NEXT: and a1, s0, a1
252+
; RV64ID-NEXT: or a1, a2, a1
253+
; RV64ID-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
254+
; RV64ID-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
255+
; RV64ID-NEXT: fld fs0, 8(sp) # 8-byte Folded Reload
256+
; RV64ID-NEXT: addi sp, sp, 32
257+
; RV64ID-NEXT: ret
258+
;
259+
; RV64IDINX-LABEL: fptoui_sat_f64_to_i128:
260+
; RV64IDINX: # %bb.0:
261+
; RV64IDINX-NEXT: addi sp, sp, -32
262+
; RV64IDINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
263+
; RV64IDINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
264+
; RV64IDINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
265+
; RV64IDINX-NEXT: mv s0, a0
266+
; RV64IDINX-NEXT: fle.d a0, zero, a0
267+
; RV64IDINX-NEXT: neg s1, a0
268+
; RV64IDINX-NEXT: mv a0, s0
269+
; RV64IDINX-NEXT: call __fixunsdfti
270+
; RV64IDINX-NEXT: lui a2, %hi(.LCPI5_0)
271+
; RV64IDINX-NEXT: ld a2, %lo(.LCPI5_0)(a2)
272+
; RV64IDINX-NEXT: and a0, s1, a0
273+
; RV64IDINX-NEXT: flt.d a2, a2, s0
274+
; RV64IDINX-NEXT: neg a2, a2
275+
; RV64IDINX-NEXT: or a0, a2, a0
276+
; RV64IDINX-NEXT: and a1, s1, a1
277+
; RV64IDINX-NEXT: or a1, a2, a1
278+
; RV64IDINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
279+
; RV64IDINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
280+
; RV64IDINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
281+
; RV64IDINX-NEXT: addi sp, sp, 32
282+
; RV64IDINX-NEXT: ret
283+
%1 = tail call i128 @llvm.fptoui.sat.i128.f64(double %a)
284+
ret i128 %1
285+
}
286+
declare i128 @llvm.fptoui.sat.i128.f64(double)

0 commit comments

Comments
 (0)