Skip to content

Commit 3d084e3

Browse files
committed
[RISCV] Add tests for fixed length concat_vector. NFC
These shufflevector chains will get combined into a n-ary concat_vectors node.
1 parent 954a048 commit 3d084e3

File tree

1 file changed

+247
-0
lines changed

1 file changed

+247
-0
lines changed
Lines changed: 247 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2+
; RUN: llc < %s -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s
3+
; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s
4+
5+
define <8 x i32> @concat_2xv4i32(<4 x i32> %a, <4 x i32> %b) {
6+
; CHECK-LABEL: concat_2xv4i32:
7+
; CHECK: # %bb.0:
8+
; CHECK-NEXT: vmv1r.v v10, v9
9+
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
10+
; CHECK-NEXT: vslideup.vi v8, v10, 4
11+
; CHECK-NEXT: ret
12+
%ab = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
13+
ret <8 x i32> %ab
14+
}
15+
16+
define <8 x i32> @concat_4xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
17+
; CHECK-LABEL: concat_4xv2i32:
18+
; CHECK: # %bb.0:
19+
; CHECK-NEXT: vmv1r.v v12, v11
20+
; CHECK-NEXT: vmv1r.v v14, v9
21+
; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
22+
; CHECK-NEXT: vslideup.vi v8, v14, 2
23+
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
24+
; CHECK-NEXT: vslideup.vi v8, v10, 4
25+
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
26+
; CHECK-NEXT: vslideup.vi v8, v12, 6
27+
; CHECK-NEXT: ret
28+
%ab = shufflevector <2 x i32> %a, <2 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
29+
%cd = shufflevector <2 x i32> %c, <2 x i32> %d, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
30+
%abcd = shufflevector <4 x i32> %ab, <4 x i32> %cd, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
31+
ret <8 x i32> %abcd
32+
}
33+
34+
define <8 x i32> @concat_8xv1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %c, <1 x i32> %d, <1 x i32> %e, <1 x i32> %f, <1 x i32> %g, <1 x i32> %h) {
35+
; CHECK-LABEL: concat_8xv1i32:
36+
; CHECK: # %bb.0:
37+
; CHECK-NEXT: vmv1r.v v16, v15
38+
; CHECK-NEXT: vmv1r.v v18, v13
39+
; CHECK-NEXT: vmv1r.v v20, v11
40+
; CHECK-NEXT: vmv1r.v v22, v9
41+
; CHECK-NEXT: vsetivli zero, 2, e32, m2, tu, ma
42+
; CHECK-NEXT: vslideup.vi v8, v22, 1
43+
; CHECK-NEXT: vsetivli zero, 3, e32, m2, tu, ma
44+
; CHECK-NEXT: vslideup.vi v8, v10, 2
45+
; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
46+
; CHECK-NEXT: vslideup.vi v8, v20, 3
47+
; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma
48+
; CHECK-NEXT: vslideup.vi v8, v12, 4
49+
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
50+
; CHECK-NEXT: vslideup.vi v8, v18, 5
51+
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
52+
; CHECK-NEXT: vslideup.vi v8, v14, 6
53+
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
54+
; CHECK-NEXT: vslideup.vi v8, v16, 7
55+
; CHECK-NEXT: ret
56+
%ab = shufflevector <1 x i32> %a, <1 x i32> %b, <2 x i32> <i32 0, i32 1>
57+
%cd = shufflevector <1 x i32> %c, <1 x i32> %d, <2 x i32> <i32 0, i32 1>
58+
%abcd = shufflevector <2 x i32> %ab, <2 x i32> %cd, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
59+
%ef = shufflevector <1 x i32> %e, <1 x i32> %f, <2 x i32> <i32 0, i32 1>
60+
%gh = shufflevector <1 x i32> %g, <1 x i32> %h, <2 x i32> <i32 0, i32 1>
61+
%efgh = shufflevector <2 x i32> %ef, <2 x i32> %gh, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
62+
%abcdefgh = shufflevector <4 x i32> %abcd, <4 x i32> %efgh, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
63+
ret <8 x i32> %abcdefgh
64+
}
65+
66+
define <16 x i32> @concat_2xv8i32(<8 x i32> %a, <8 x i32> %b) {
67+
; CHECK-LABEL: concat_2xv8i32:
68+
; CHECK: # %bb.0:
69+
; CHECK-NEXT: vmv2r.v v12, v10
70+
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
71+
; CHECK-NEXT: vslideup.vi v8, v12, 8
72+
; CHECK-NEXT: ret
73+
%v = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
74+
ret <16 x i32> %v
75+
}
76+
77+
define <16 x i32> @concat_4xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
78+
; CHECK-LABEL: concat_4xv4i32:
79+
; CHECK: # %bb.0:
80+
; CHECK-NEXT: vmv1r.v v12, v11
81+
; CHECK-NEXT: vmv1r.v v16, v10
82+
; CHECK-NEXT: vmv1r.v v20, v9
83+
; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, ma
84+
; CHECK-NEXT: vslideup.vi v8, v20, 4
85+
; CHECK-NEXT: vsetivli zero, 12, e32, m4, tu, ma
86+
; CHECK-NEXT: vslideup.vi v8, v16, 8
87+
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
88+
; CHECK-NEXT: vslideup.vi v8, v12, 12
89+
; CHECK-NEXT: ret
90+
%ab = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
91+
%cd = shufflevector <4 x i32> %c, <4 x i32> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
92+
%abcd = shufflevector <8 x i32> %ab, <8 x i32> %cd, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
93+
ret <16 x i32> %abcd
94+
}
95+
96+
define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d, <2 x i32> %e, <2 x i32> %f, <2 x i32> %g, <2 x i32> %h) {
97+
; CHECK-LABEL: concat_8xv2i32:
98+
; CHECK: # %bb.0:
99+
; CHECK-NEXT: vmv1r.v v16, v15
100+
; CHECK-NEXT: vmv1r.v v20, v14
101+
; CHECK-NEXT: vmv1r.v v24, v13
102+
; CHECK-NEXT: vmv1r.v v28, v11
103+
; CHECK-NEXT: vmv1r.v v0, v10
104+
; CHECK-NEXT: vmv1r.v v4, v9
105+
; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma
106+
; CHECK-NEXT: vslideup.vi v8, v4, 2
107+
; CHECK-NEXT: vsetivli zero, 6, e32, m4, tu, ma
108+
; CHECK-NEXT: vslideup.vi v8, v0, 4
109+
; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, ma
110+
; CHECK-NEXT: vslideup.vi v8, v28, 6
111+
; CHECK-NEXT: vsetivli zero, 10, e32, m4, tu, ma
112+
; CHECK-NEXT: vslideup.vi v8, v12, 8
113+
; CHECK-NEXT: vsetivli zero, 12, e32, m4, tu, ma
114+
; CHECK-NEXT: vslideup.vi v8, v24, 10
115+
; CHECK-NEXT: vsetivli zero, 14, e32, m4, tu, ma
116+
; CHECK-NEXT: vslideup.vi v8, v20, 12
117+
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
118+
; CHECK-NEXT: vslideup.vi v8, v16, 14
119+
; CHECK-NEXT: ret
120+
%ab = shufflevector <2 x i32> %a, <2 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
121+
%cd = shufflevector <2 x i32> %c, <2 x i32> %d, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
122+
%abcd = shufflevector <4 x i32> %ab, <4 x i32> %cd, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
123+
%ef = shufflevector <2 x i32> %e, <2 x i32> %f, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
124+
%gh = shufflevector <2 x i32> %g, <2 x i32> %h, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
125+
%efgh = shufflevector <4 x i32> %ef, <4 x i32> %gh, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
126+
%abcdefgh = shufflevector <8 x i32> %abcd, <8 x i32> %efgh, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
127+
ret <16 x i32> %abcdefgh
128+
}
129+
130+
define <32 x i32> @concat_2xv16i32(<16 x i32> %a, <16 x i32> %b) {
131+
; CHECK-LABEL: concat_2xv16i32:
132+
; CHECK: # %bb.0:
133+
; CHECK-NEXT: vmv4r.v v16, v12
134+
; CHECK-NEXT: li a0, 32
135+
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
136+
; CHECK-NEXT: vslideup.vi v8, v16, 16
137+
; CHECK-NEXT: ret
138+
%ab = shufflevector <16 x i32> %a, <16 x i32> %b, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
139+
ret <32 x i32> %ab
140+
}
141+
142+
define <32 x i32> @concat_4xv8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
143+
; CHECK-LABEL: concat_4xv8i32:
144+
; CHECK: # %bb.0:
145+
; CHECK-NEXT: vmv2r.v v16, v14
146+
; CHECK-NEXT: vmv2r.v v24, v12
147+
; CHECK-NEXT: vmv2r.v v0, v10
148+
; CHECK-NEXT: vsetivli zero, 16, e32, m8, tu, ma
149+
; CHECK-NEXT: vslideup.vi v8, v0, 8
150+
; CHECK-NEXT: vsetivli zero, 24, e32, m8, tu, ma
151+
; CHECK-NEXT: vslideup.vi v8, v24, 16
152+
; CHECK-NEXT: li a0, 32
153+
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
154+
; CHECK-NEXT: vslideup.vi v8, v16, 24
155+
; CHECK-NEXT: ret
156+
%ab = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
157+
%cd = shufflevector <8 x i32> %c, <8 x i32> %d, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
158+
%abcd = shufflevector <16 x i32> %ab, <16 x i32> %cd, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
159+
ret <32 x i32> %abcd
160+
}
161+
162+
define <32 x i32> @concat_8xv4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %g, <4 x i32> %h) {
163+
; CHECK-LABEL: concat_8xv4i32:
164+
; CHECK: # %bb.0:
165+
; CHECK-NEXT: addi sp, sp, -16
166+
; CHECK-NEXT: .cfi_def_cfa_offset 16
167+
; CHECK-NEXT: csrr a0, vlenb
168+
; CHECK-NEXT: slli a0, a0, 5
169+
; CHECK-NEXT: sub sp, sp, a0
170+
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
171+
; CHECK-NEXT: vmv1r.v v16, v15
172+
; CHECK-NEXT: csrr a0, vlenb
173+
; CHECK-NEXT: li a1, 0
174+
; CHECK-NEXT: slli a0, a0, 3
175+
; CHECK-NEXT: add a1, a1, a0
176+
; CHECK-NEXT: slli a0, a0, 1
177+
; CHECK-NEXT: add a0, a0, a1
178+
; CHECK-NEXT: add a0, sp, a0
179+
; CHECK-NEXT: addi a0, a0, 16
180+
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
181+
; CHECK-NEXT: vmv1r.v v16, v14
182+
; CHECK-NEXT: csrr a0, vlenb
183+
; CHECK-NEXT: slli a0, a0, 4
184+
; CHECK-NEXT: add a0, sp, a0
185+
; CHECK-NEXT: addi a0, a0, 16
186+
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
187+
; CHECK-NEXT: vmv1r.v v16, v13
188+
; CHECK-NEXT: csrr a0, vlenb
189+
; CHECK-NEXT: slli a0, a0, 3
190+
; CHECK-NEXT: add a0, sp, a0
191+
; CHECK-NEXT: addi a0, a0, 16
192+
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
193+
; CHECK-NEXT: vmv1r.v v16, v12
194+
; CHECK-NEXT: addi a0, sp, 16
195+
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
196+
; CHECK-NEXT: vmv1r.v v0, v11
197+
; CHECK-NEXT: vmv1r.v v24, v10
198+
; CHECK-NEXT: vmv1r.v v16, v9
199+
; CHECK-NEXT: vsetivli zero, 8, e32, m8, tu, ma
200+
; CHECK-NEXT: vslideup.vi v8, v16, 4
201+
; CHECK-NEXT: vsetivli zero, 12, e32, m8, tu, ma
202+
; CHECK-NEXT: vslideup.vi v8, v24, 8
203+
; CHECK-NEXT: vsetivli zero, 16, e32, m8, tu, ma
204+
; CHECK-NEXT: vslideup.vi v8, v0, 12
205+
; CHECK-NEXT: vsetivli zero, 20, e32, m8, tu, ma
206+
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
207+
; CHECK-NEXT: vslideup.vi v8, v16, 16
208+
; CHECK-NEXT: vsetivli zero, 24, e32, m8, tu, ma
209+
; CHECK-NEXT: csrr a0, vlenb
210+
; CHECK-NEXT: slli a0, a0, 3
211+
; CHECK-NEXT: add a0, sp, a0
212+
; CHECK-NEXT: addi a0, a0, 16
213+
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
214+
; CHECK-NEXT: vslideup.vi v8, v16, 20
215+
; CHECK-NEXT: vsetivli zero, 28, e32, m8, tu, ma
216+
; CHECK-NEXT: csrr a0, vlenb
217+
; CHECK-NEXT: slli a0, a0, 4
218+
; CHECK-NEXT: add a0, sp, a0
219+
; CHECK-NEXT: addi a0, a0, 16
220+
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
221+
; CHECK-NEXT: vslideup.vi v8, v16, 24
222+
; CHECK-NEXT: li a0, 32
223+
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
224+
; CHECK-NEXT: csrr a0, vlenb
225+
; CHECK-NEXT: li a1, 0
226+
; CHECK-NEXT: slli a0, a0, 3
227+
; CHECK-NEXT: add a1, a1, a0
228+
; CHECK-NEXT: slli a0, a0, 1
229+
; CHECK-NEXT: add a0, a0, a1
230+
; CHECK-NEXT: add a0, sp, a0
231+
; CHECK-NEXT: addi a0, a0, 16
232+
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
233+
; CHECK-NEXT: vslideup.vi v8, v16, 28
234+
; CHECK-NEXT: csrr a0, vlenb
235+
; CHECK-NEXT: slli a0, a0, 5
236+
; CHECK-NEXT: add sp, sp, a0
237+
; CHECK-NEXT: addi sp, sp, 16
238+
; CHECK-NEXT: ret
239+
%ab = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
240+
%cd = shufflevector <4 x i32> %c, <4 x i32> %d, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
241+
%abcd = shufflevector <8 x i32> %ab, <8 x i32> %cd, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
242+
%ef = shufflevector <4 x i32> %e, <4 x i32> %f, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
243+
%gh = shufflevector <4 x i32> %g, <4 x i32> %h, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
244+
%efgh = shufflevector <8 x i32> %ef, <8 x i32> %gh, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
245+
%abcdefgh = shufflevector <16 x i32> %abcd, <16 x i32> %efgh, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
246+
ret <32 x i32> %abcdefgh
247+
}

0 commit comments

Comments
 (0)