Skip to content

Commit 7a76038

Browse files
authored
CodeGen/RISCV: increase test coverage of lrint, llrint (#70826)
To follow up on 98c90a1 (ISel: introduce vector ISD::LRINT, ISD::LLRINT; custom RISCV lowering), increase the test coverage to test the codegen of the i32-variant of lrint on RV64, and llrint on RV32.
1 parent 0e8cbb6 commit 7a76038

File tree

2 files changed

+101
-45
lines changed

2 files changed

+101
-45
lines changed

llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d -target-abi=ilp32d \
3+
; RUN: -verify-machineinstrs < %s | FileCheck %s
24
; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d \
35
; RUN: -verify-machineinstrs < %s | FileCheck %s
46

llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll

Lines changed: 99 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
22
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d \
33
; RUN: -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
4+
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
5+
; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32
46
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
5-
; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64
7+
; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
68

79
define <vscale x 1 x iXLen> @lrint_nxv1f32(<vscale x 1 x float> %x) {
810
; RV32-LABEL: lrint_nxv1f32:
@@ -11,12 +13,18 @@ define <vscale x 1 x iXLen> @lrint_nxv1f32(<vscale x 1 x float> %x) {
1113
; RV32-NEXT: vfcvt.x.f.v v8, v8
1214
; RV32-NEXT: ret
1315
;
14-
; RV64-LABEL: lrint_nxv1f32:
15-
; RV64: # %bb.0:
16-
; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
17-
; RV64-NEXT: vfwcvt.x.f.v v9, v8
18-
; RV64-NEXT: vmv1r.v v8, v9
19-
; RV64-NEXT: ret
16+
; RV64-i32-LABEL: lrint_nxv1f32:
17+
; RV64-i32: # %bb.0:
18+
; RV64-i32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
19+
; RV64-i32-NEXT: vfcvt.x.f.v v8, v8
20+
; RV64-i32-NEXT: ret
21+
;
22+
; RV64-i64-LABEL: lrint_nxv1f32:
23+
; RV64-i64: # %bb.0:
24+
; RV64-i64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
25+
; RV64-i64-NEXT: vfwcvt.x.f.v v9, v8
26+
; RV64-i64-NEXT: vmv1r.v v8, v9
27+
; RV64-i64-NEXT: ret
2028
%a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f32(<vscale x 1 x float> %x)
2129
ret <vscale x 1 x iXLen> %a
2230
}
@@ -29,12 +37,18 @@ define <vscale x 2 x iXLen> @lrint_nxv2f32(<vscale x 2 x float> %x) {
2937
; RV32-NEXT: vfcvt.x.f.v v8, v8
3038
; RV32-NEXT: ret
3139
;
32-
; RV64-LABEL: lrint_nxv2f32:
33-
; RV64: # %bb.0:
34-
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
35-
; RV64-NEXT: vfwcvt.x.f.v v10, v8
36-
; RV64-NEXT: vmv2r.v v8, v10
37-
; RV64-NEXT: ret
40+
; RV64-i32-LABEL: lrint_nxv2f32:
41+
; RV64-i32: # %bb.0:
42+
; RV64-i32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
43+
; RV64-i32-NEXT: vfcvt.x.f.v v8, v8
44+
; RV64-i32-NEXT: ret
45+
;
46+
; RV64-i64-LABEL: lrint_nxv2f32:
47+
; RV64-i64: # %bb.0:
48+
; RV64-i64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
49+
; RV64-i64-NEXT: vfwcvt.x.f.v v10, v8
50+
; RV64-i64-NEXT: vmv2r.v v8, v10
51+
; RV64-i64-NEXT: ret
3852
%a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f32(<vscale x 2 x float> %x)
3953
ret <vscale x 2 x iXLen> %a
4054
}
@@ -47,12 +61,18 @@ define <vscale x 4 x iXLen> @lrint_nxv4f32(<vscale x 4 x float> %x) {
4761
; RV32-NEXT: vfcvt.x.f.v v8, v8
4862
; RV32-NEXT: ret
4963
;
50-
; RV64-LABEL: lrint_nxv4f32:
51-
; RV64: # %bb.0:
52-
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
53-
; RV64-NEXT: vfwcvt.x.f.v v12, v8
54-
; RV64-NEXT: vmv4r.v v8, v12
55-
; RV64-NEXT: ret
64+
; RV64-i32-LABEL: lrint_nxv4f32:
65+
; RV64-i32: # %bb.0:
66+
; RV64-i32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
67+
; RV64-i32-NEXT: vfcvt.x.f.v v8, v8
68+
; RV64-i32-NEXT: ret
69+
;
70+
; RV64-i64-LABEL: lrint_nxv4f32:
71+
; RV64-i64: # %bb.0:
72+
; RV64-i64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
73+
; RV64-i64-NEXT: vfwcvt.x.f.v v12, v8
74+
; RV64-i64-NEXT: vmv4r.v v8, v12
75+
; RV64-i64-NEXT: ret
5676
%a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f32(<vscale x 4 x float> %x)
5777
ret <vscale x 4 x iXLen> %a
5878
}
@@ -65,12 +85,18 @@ define <vscale x 8 x iXLen> @lrint_nxv8f32(<vscale x 8 x float> %x) {
6585
; RV32-NEXT: vfcvt.x.f.v v8, v8
6686
; RV32-NEXT: ret
6787
;
68-
; RV64-LABEL: lrint_nxv8f32:
69-
; RV64: # %bb.0:
70-
; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
71-
; RV64-NEXT: vfwcvt.x.f.v v16, v8
72-
; RV64-NEXT: vmv8r.v v8, v16
73-
; RV64-NEXT: ret
88+
; RV64-i32-LABEL: lrint_nxv8f32:
89+
; RV64-i32: # %bb.0:
90+
; RV64-i32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
91+
; RV64-i32-NEXT: vfcvt.x.f.v v8, v8
92+
; RV64-i32-NEXT: ret
93+
;
94+
; RV64-i64-LABEL: lrint_nxv8f32:
95+
; RV64-i64: # %bb.0:
96+
; RV64-i64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
97+
; RV64-i64-NEXT: vfwcvt.x.f.v v16, v8
98+
; RV64-i64-NEXT: vmv8r.v v8, v16
99+
; RV64-i64-NEXT: ret
74100
%a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f32(<vscale x 8 x float> %x)
75101
ret <vscale x 8 x iXLen> %a
76102
}
@@ -90,11 +116,18 @@ define <vscale x 1 x iXLen> @lrint_nxv1f64(<vscale x 1 x double> %x) {
90116
; RV32-NEXT: vmv1r.v v8, v9
91117
; RV32-NEXT: ret
92118
;
93-
; RV64-LABEL: lrint_nxv1f64:
94-
; RV64: # %bb.0:
95-
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
96-
; RV64-NEXT: vfcvt.x.f.v v8, v8
97-
; RV64-NEXT: ret
119+
; RV64-i32-LABEL: lrint_nxv1f64:
120+
; RV64-i32: # %bb.0:
121+
; RV64-i32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
122+
; RV64-i32-NEXT: vfncvt.x.f.w v9, v8
123+
; RV64-i32-NEXT: vmv1r.v v8, v9
124+
; RV64-i32-NEXT: ret
125+
;
126+
; RV64-i64-LABEL: lrint_nxv1f64:
127+
; RV64-i64: # %bb.0:
128+
; RV64-i64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
129+
; RV64-i64-NEXT: vfcvt.x.f.v v8, v8
130+
; RV64-i64-NEXT: ret
98131
%a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f64(<vscale x 1 x double> %x)
99132
ret <vscale x 1 x iXLen> %a
100133
}
@@ -108,11 +141,18 @@ define <vscale x 2 x iXLen> @lrint_nxv2f64(<vscale x 2 x double> %x) {
108141
; RV32-NEXT: vmv.v.v v8, v10
109142
; RV32-NEXT: ret
110143
;
111-
; RV64-LABEL: lrint_nxv2f64:
112-
; RV64: # %bb.0:
113-
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
114-
; RV64-NEXT: vfcvt.x.f.v v8, v8
115-
; RV64-NEXT: ret
144+
; RV64-i32-LABEL: lrint_nxv2f64:
145+
; RV64-i32: # %bb.0:
146+
; RV64-i32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
147+
; RV64-i32-NEXT: vfncvt.x.f.w v10, v8
148+
; RV64-i32-NEXT: vmv.v.v v8, v10
149+
; RV64-i32-NEXT: ret
150+
;
151+
; RV64-i64-LABEL: lrint_nxv2f64:
152+
; RV64-i64: # %bb.0:
153+
; RV64-i64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
154+
; RV64-i64-NEXT: vfcvt.x.f.v v8, v8
155+
; RV64-i64-NEXT: ret
116156
%a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f64(<vscale x 2 x double> %x)
117157
ret <vscale x 2 x iXLen> %a
118158
}
@@ -126,11 +166,18 @@ define <vscale x 4 x iXLen> @lrint_nxv4f64(<vscale x 4 x double> %x) {
126166
; RV32-NEXT: vmv.v.v v8, v12
127167
; RV32-NEXT: ret
128168
;
129-
; RV64-LABEL: lrint_nxv4f64:
130-
; RV64: # %bb.0:
131-
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
132-
; RV64-NEXT: vfcvt.x.f.v v8, v8
133-
; RV64-NEXT: ret
169+
; RV64-i32-LABEL: lrint_nxv4f64:
170+
; RV64-i32: # %bb.0:
171+
; RV64-i32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
172+
; RV64-i32-NEXT: vfncvt.x.f.w v12, v8
173+
; RV64-i32-NEXT: vmv.v.v v8, v12
174+
; RV64-i32-NEXT: ret
175+
;
176+
; RV64-i64-LABEL: lrint_nxv4f64:
177+
; RV64-i64: # %bb.0:
178+
; RV64-i64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
179+
; RV64-i64-NEXT: vfcvt.x.f.v v8, v8
180+
; RV64-i64-NEXT: ret
134181
%a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f64(<vscale x 4 x double> %x)
135182
ret <vscale x 4 x iXLen> %a
136183
}
@@ -144,11 +191,18 @@ define <vscale x 8 x iXLen> @lrint_nxv8f64(<vscale x 8 x double> %x) {
144191
; RV32-NEXT: vmv.v.v v8, v16
145192
; RV32-NEXT: ret
146193
;
147-
; RV64-LABEL: lrint_nxv8f64:
148-
; RV64: # %bb.0:
149-
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
150-
; RV64-NEXT: vfcvt.x.f.v v8, v8
151-
; RV64-NEXT: ret
194+
; RV64-i32-LABEL: lrint_nxv8f64:
195+
; RV64-i32: # %bb.0:
196+
; RV64-i32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
197+
; RV64-i32-NEXT: vfncvt.x.f.w v16, v8
198+
; RV64-i32-NEXT: vmv.v.v v8, v16
199+
; RV64-i32-NEXT: ret
200+
;
201+
; RV64-i64-LABEL: lrint_nxv8f64:
202+
; RV64-i64: # %bb.0:
203+
; RV64-i64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
204+
; RV64-i64-NEXT: vfcvt.x.f.v v8, v8
205+
; RV64-i64-NEXT: ret
152206
%a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f64(<vscale x 8 x double> %x)
153207
ret <vscale x 8 x iXLen> %a
154208
}

0 commit comments

Comments
 (0)