Skip to content

Commit 4db451a

Browse files
[LLVM][SVE] Honour calling convention when using SVE for fixed length vectors. (#70847)
NOTE: I'm not sure how many of the corner cases are part of the documented ABI but that shouldn't matter because my goal is for `-msve-vector-bits` to have no affect on the way arguments and returns are processed.
1 parent 00a10ef commit 4db451a

File tree

4 files changed

+566
-0
lines changed

4 files changed

+566
-0
lines changed

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26719,3 +26719,99 @@ bool AArch64TargetLowering::preferScalarizeSplat(SDNode *N) const {
2671926719
unsigned AArch64TargetLowering::getMinimumJumpTableEntries() const {
2672026720
return Subtarget->getMinimumJumpTableEntries();
2672126721
}
26722+
26723+
MVT AArch64TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
26724+
CallingConv::ID CC,
26725+
EVT VT) const {
26726+
bool NonUnitFixedLengthVector =
26727+
VT.isFixedLengthVector() && !VT.getVectorElementCount().isScalar();
26728+
if (!NonUnitFixedLengthVector || !Subtarget->useSVEForFixedLengthVectors())
26729+
return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
26730+
26731+
EVT VT1;
26732+
MVT RegisterVT;
26733+
unsigned NumIntermediates;
26734+
getVectorTypeBreakdownForCallingConv(Context, CC, VT, VT1, NumIntermediates,
26735+
RegisterVT);
26736+
return RegisterVT;
26737+
}
26738+
26739+
unsigned AArch64TargetLowering::getNumRegistersForCallingConv(
26740+
LLVMContext &Context, CallingConv::ID CC, EVT VT) const {
26741+
bool NonUnitFixedLengthVector =
26742+
VT.isFixedLengthVector() && !VT.getVectorElementCount().isScalar();
26743+
if (!NonUnitFixedLengthVector || !Subtarget->useSVEForFixedLengthVectors())
26744+
return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
26745+
26746+
EVT VT1;
26747+
MVT VT2;
26748+
unsigned NumIntermediates;
26749+
return getVectorTypeBreakdownForCallingConv(Context, CC, VT, VT1,
26750+
NumIntermediates, VT2);
26751+
}
26752+
26753+
unsigned AArch64TargetLowering::getVectorTypeBreakdownForCallingConv(
26754+
LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
26755+
unsigned &NumIntermediates, MVT &RegisterVT) const {
26756+
int NumRegs = TargetLowering::getVectorTypeBreakdownForCallingConv(
26757+
Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
26758+
if (!RegisterVT.isFixedLengthVector() ||
26759+
RegisterVT.getFixedSizeInBits() <= 128)
26760+
return NumRegs;
26761+
26762+
assert(Subtarget->useSVEForFixedLengthVectors() && "Unexpected mode!");
26763+
assert(IntermediateVT == RegisterVT && "Unexpected VT mismatch!");
26764+
assert(RegisterVT.getFixedSizeInBits() % 128 == 0 && "Unexpected size!");
26765+
26766+
// A size mismatch here implies either type promotion or widening and would
26767+
// have resulted in scalarisation if larger vectors had not be available.
26768+
if (RegisterVT.getSizeInBits() * NumRegs != VT.getSizeInBits()) {
26769+
EVT EltTy = VT.getVectorElementType();
26770+
EVT NewVT = EVT::getVectorVT(Context, EltTy, ElementCount::getFixed(1));
26771+
if (!isTypeLegal(NewVT))
26772+
NewVT = EltTy;
26773+
26774+
IntermediateVT = NewVT;
26775+
NumIntermediates = VT.getVectorNumElements();
26776+
RegisterVT = getRegisterType(Context, NewVT);
26777+
return NumIntermediates;
26778+
}
26779+
26780+
// SVE VLS support does not introduce a new ABI so we should use NEON sized
26781+
// types for vector arguments and returns.
26782+
26783+
unsigned NumSubRegs = RegisterVT.getFixedSizeInBits() / 128;
26784+
NumIntermediates *= NumSubRegs;
26785+
NumRegs *= NumSubRegs;
26786+
26787+
switch (RegisterVT.getVectorElementType().SimpleTy) {
26788+
default:
26789+
llvm_unreachable("unexpected element type for vector");
26790+
case MVT::i8:
26791+
IntermediateVT = RegisterVT = MVT::v16i8;
26792+
break;
26793+
case MVT::i16:
26794+
IntermediateVT = RegisterVT = MVT::v8i16;
26795+
break;
26796+
case MVT::i32:
26797+
IntermediateVT = RegisterVT = MVT::v4i32;
26798+
break;
26799+
case MVT::i64:
26800+
IntermediateVT = RegisterVT = MVT::v2i64;
26801+
break;
26802+
case MVT::f16:
26803+
IntermediateVT = RegisterVT = MVT::v8f16;
26804+
break;
26805+
case MVT::f32:
26806+
IntermediateVT = RegisterVT = MVT::v4f32;
26807+
break;
26808+
case MVT::f64:
26809+
IntermediateVT = RegisterVT = MVT::v2f64;
26810+
break;
26811+
case MVT::bf16:
26812+
IntermediateVT = RegisterVT = MVT::v8bf16;
26813+
break;
26814+
}
26815+
26816+
return NumRegs;
26817+
}

llvm/lib/Target/AArch64/AArch64ISelLowering.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -954,6 +954,18 @@ class AArch64TargetLowering : public TargetLowering {
954954
// used for 64bit and 128bit vectors as well.
955955
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
956956

957+
// Follow NEON ABI rules even when using SVE for fixed length vectors.
958+
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
959+
EVT VT) const override;
960+
unsigned getNumRegistersForCallingConv(LLVMContext &Context,
961+
CallingConv::ID CC,
962+
EVT VT) const override;
963+
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context,
964+
CallingConv::ID CC, EVT VT,
965+
EVT &IntermediateVT,
966+
unsigned &NumIntermediates,
967+
MVT &RegisterVT) const override;
968+
957969
private:
958970
/// Keep a pointer to the AArch64Subtarget around so that we can
959971
/// make the right decision when generating code for different targets.
Lines changed: 245 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,245 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2+
; RUN: llc < %s | FileCheck %s
3+
; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s
4+
; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s
5+
6+
target triple = "aarch64-unknown-linux-gnu"
7+
8+
declare void @foo_v32i8(<32 x i8>)
9+
define void @test_v32i8(<32 x i8> %unused, <32 x i8> %a) #0 {
10+
; CHECK-LABEL: test_v32i8:
11+
; CHECK: // %bb.0:
12+
; CHECK-NEXT: mov v1.16b, v3.16b
13+
; CHECK-NEXT: mov v0.16b, v2.16b
14+
; CHECK-NEXT: b foo_v32i8
15+
tail call void @foo_v32i8(<32 x i8> %a)
16+
ret void
17+
}
18+
19+
declare void @foo_v16i16(<16 x i16>)
20+
define void @test_v16i16(<16 x i16> %unused, <16 x i16> %a) #0 {
21+
; CHECK-LABEL: test_v16i16:
22+
; CHECK: // %bb.0:
23+
; CHECK-NEXT: mov v1.16b, v3.16b
24+
; CHECK-NEXT: mov v0.16b, v2.16b
25+
; CHECK-NEXT: b foo_v16i16
26+
tail call void @foo_v16i16(<16 x i16> %a)
27+
ret void
28+
}
29+
30+
declare void @foo_v8i32(<8 x i32>)
31+
define void @test_v8i32(<8 x i32> %unused, <8 x i32> %a) #0 {
32+
; CHECK-LABEL: test_v8i32:
33+
; CHECK: // %bb.0:
34+
; CHECK-NEXT: mov v1.16b, v3.16b
35+
; CHECK-NEXT: mov v0.16b, v2.16b
36+
; CHECK-NEXT: b foo_v8i32
37+
tail call void @foo_v8i32(<8 x i32> %a)
38+
ret void
39+
}
40+
41+
declare void @foo_v4i64(<4 x i64>)
42+
define void @test_v4i64(<4 x i64> %unused, <4 x i64> %a) #0 {
43+
; CHECK-LABEL: test_v4i64:
44+
; CHECK: // %bb.0:
45+
; CHECK-NEXT: mov v1.16b, v3.16b
46+
; CHECK-NEXT: mov v0.16b, v2.16b
47+
; CHECK-NEXT: b foo_v4i64
48+
tail call void @foo_v4i64(<4 x i64> %a)
49+
ret void
50+
}
51+
52+
declare void @foo_v16f16(<16 x half>)
53+
define void @test_v16f16(<16 x half> %unused, <16 x half> %a) #0 {
54+
; CHECK-LABEL: test_v16f16:
55+
; CHECK: // %bb.0:
56+
; CHECK-NEXT: mov v1.16b, v3.16b
57+
; CHECK-NEXT: mov v0.16b, v2.16b
58+
; CHECK-NEXT: b foo_v16f16
59+
tail call void @foo_v16f16(<16 x half> %a)
60+
ret void
61+
}
62+
63+
declare void @foo_v8f32(<8 x float>)
64+
define void @test_v8f32(<8 x float> %unused, <8 x float> %a) #0 {
65+
; CHECK-LABEL: test_v8f32:
66+
; CHECK: // %bb.0:
67+
; CHECK-NEXT: mov v1.16b, v3.16b
68+
; CHECK-NEXT: mov v0.16b, v2.16b
69+
; CHECK-NEXT: b foo_v8f32
70+
tail call void @foo_v8f32(<8 x float> %a)
71+
ret void
72+
}
73+
74+
declare void @foo_v4f64(<4 x double>)
75+
define void @test_v4f64(<4 x double> %unused, <4 x double> %a) #0 {
76+
; CHECK-LABEL: test_v4f64:
77+
; CHECK: // %bb.0:
78+
; CHECK-NEXT: mov v1.16b, v3.16b
79+
; CHECK-NEXT: mov v0.16b, v2.16b
80+
; CHECK-NEXT: b foo_v4f64
81+
tail call void @foo_v4f64(<4 x double> %a)
82+
ret void
83+
}
84+
85+
declare void @foo_v16bf16(<16 x bfloat>)
86+
define void @test_v16bf16(<16 x bfloat> %unused, <16 x bfloat> %a) #0 {
87+
; CHECK-LABEL: test_v16bf16:
88+
; CHECK: // %bb.0:
89+
; CHECK-NEXT: mov v1.16b, v3.16b
90+
; CHECK-NEXT: mov v0.16b, v2.16b
91+
; CHECK-NEXT: b foo_v16bf16
92+
tail call void @foo_v16bf16(<16 x bfloat> %a)
93+
ret void
94+
}
95+
96+
declare void @foo_v3i64(<3 x i64>)
97+
define void @test_v3i64(<3 x i64> %unused, <3 x i64> %a) #0 {
98+
; CHECK-LABEL: test_v3i64:
99+
; CHECK: // %bb.0:
100+
; CHECK-NEXT: fmov d2, d5
101+
; CHECK-NEXT: fmov d1, d4
102+
; CHECK-NEXT: fmov d0, d3
103+
; CHECK-NEXT: b foo_v3i64
104+
tail call void @foo_v3i64(<3 x i64> %a)
105+
ret void
106+
}
107+
108+
declare void @foo_v5i64(<5 x i64>)
109+
define void @test_v5i64(<5 x i64> %unused, <5 x i64> %a) #0 {
110+
; CHECK-LABEL: test_v5i64:
111+
; CHECK: // %bb.0:
112+
; CHECK-NEXT: fmov d1, d6
113+
; CHECK-NEXT: fmov d0, d5
114+
; CHECK-NEXT: fmov d2, d7
115+
; CHECK-NEXT: ldp d3, d4, [sp]
116+
; CHECK-NEXT: b foo_v5i64
117+
tail call void @foo_v5i64(<5 x i64> %a)
118+
ret void
119+
}
120+
121+
declare void @foo_v1i16(<1 x i16>)
122+
define void @test_v1i16(<1 x i16> %unused, <1 x i16> %a) #0 {
123+
; CHECK-LABEL: test_v1i16:
124+
; CHECK: // %bb.0:
125+
; CHECK-NEXT: fmov d0, d1
126+
; CHECK-NEXT: b foo_v1i16
127+
tail call void @foo_v1i16(<1 x i16> %a)
128+
ret void
129+
}
130+
131+
declare void @foo_v9i16(<9 x i16>)
132+
define void @test_v9i16(<9 x i16> %unused, <9 x i16> %a) #0 {
133+
; CHECK-LABEL: test_v9i16:
134+
; CHECK: // %bb.0:
135+
; CHECK-NEXT: ldr w0, [sp, #8]
136+
; CHECK-NEXT: ldr w1, [sp, #16]
137+
; CHECK-NEXT: ldr w2, [sp, #24]
138+
; CHECK-NEXT: ldr w3, [sp, #32]
139+
; CHECK-NEXT: ldr w4, [sp, #40]
140+
; CHECK-NEXT: ldr w5, [sp, #48]
141+
; CHECK-NEXT: ldr w6, [sp, #56]
142+
; CHECK-NEXT: ldr w7, [sp, #64]
143+
; CHECK-NEXT: ldr w8, [sp, #72]
144+
; CHECK-NEXT: str w8, [sp]
145+
; CHECK-NEXT: b foo_v9i16
146+
tail call void @foo_v9i16(<9 x i16> %a)
147+
ret void
148+
}
149+
150+
declare void @foo_v16i1(<16 x i1>)
151+
define void @test_v16i1(<16 x i1> %unused, <16 x i1> %a) #0 {
152+
; CHECK-LABEL: test_v16i1:
153+
; CHECK: // %bb.0:
154+
; CHECK-NEXT: mov v0.16b, v1.16b
155+
; CHECK-NEXT: b foo_v16i1
156+
tail call void @foo_v16i1(<16 x i1> %a)
157+
ret void
158+
}
159+
160+
; UTC_ARGS: --disable
161+
; The output from this test is large and generally not useful, what matters is
162+
; no vector registers are used.
163+
declare void @foo_v32i1(<32 x i1>)
164+
define void @test_v32i1(<32 x i1> %unused, <32 x i1> %a) #0 {
165+
; CHECK-LABEL: test_v32i1:
166+
; CHECK: // %bb.0:
167+
; CHECK-NOT: [q,v,z][0-9]+
168+
; CHECK: b foo_v32i1
169+
tail call void @foo_v32i1(<32 x i1> %a)
170+
ret void
171+
}
172+
; UTC_ARGS: --enable
173+
174+
declare void @foo_v1i128(<1 x i128>)
175+
define void @test_v1i128(<1 x i128> %unused, <1 x i128> %a) #0 {
176+
; CHECK-LABEL: test_v1i128:
177+
; CHECK: // %bb.0:
178+
; CHECK-NEXT: mov x1, x3
179+
; CHECK-NEXT: mov x0, x2
180+
; CHECK-NEXT: b foo_v1i128
181+
tail call void @foo_v1i128(<1 x i128> %a)
182+
ret void
183+
}
184+
185+
declare void @foo_v2i128(<2 x i128>)
186+
define void @test_v2i128(<2 x i128> %unused, <2 x i128> %a) #0 {
187+
; CHECK-LABEL: test_v2i128:
188+
; CHECK: // %bb.0:
189+
; CHECK-NEXT: mov x3, x7
190+
; CHECK-NEXT: mov x2, x6
191+
; CHECK-NEXT: mov x0, x4
192+
; CHECK-NEXT: mov x1, x5
193+
; CHECK-NEXT: b foo_v2i128
194+
tail call void @foo_v2i128(<2 x i128> %a)
195+
ret void
196+
}
197+
198+
declare void @foo_v1i256(<1 x i256>)
199+
define void @test_v1i256(<1 x i256> %unused, <1 x i256> %a) #0 {
200+
; CHECK-LABEL: test_v1i256:
201+
; CHECK: // %bb.0:
202+
; CHECK-NEXT: mov x3, x7
203+
; CHECK-NEXT: mov x2, x6
204+
; CHECK-NEXT: mov x0, x4
205+
; CHECK-NEXT: mov x1, x5
206+
; CHECK-NEXT: b foo_v1i256
207+
tail call void @foo_v1i256(<1 x i256> %a)
208+
ret void
209+
}
210+
211+
declare void @foo_v2i256(<2 x i256>)
212+
define void @test_v2i256(<2 x i256> %unused, <2 x i256> %a) #0 {
213+
; CHECK-LABEL: test_v2i256:
214+
; CHECK: // %bb.0:
215+
; CHECK-NEXT: ldp x0, x1, [sp]
216+
; CHECK-NEXT: ldp x2, x3, [sp, #16]
217+
; CHECK-NEXT: ldp x4, x5, [sp, #32]
218+
; CHECK-NEXT: ldp x6, x7, [sp, #48]
219+
; CHECK-NEXT: b foo_v2i256
220+
tail call void @foo_v2i256(<2 x i256> %a)
221+
ret void
222+
}
223+
224+
declare void @foo_v1f128(<1 x fp128>)
225+
define void @test_v1f128(<1 x fp128> %unused, <1 x fp128> %a) #0 {
226+
; CHECK-LABEL: test_v1f128:
227+
; CHECK: // %bb.0:
228+
; CHECK-NEXT: mov v0.16b, v1.16b
229+
; CHECK-NEXT: b foo_v1f128
230+
tail call void @foo_v1f128(<1 x fp128> %a)
231+
ret void
232+
}
233+
234+
declare void @foo_v2f128(<2 x fp128>)
235+
define void @test_v2f128(<2 x fp128> %unused, <2 x fp128> %a) #0 {
236+
; CHECK-LABEL: test_v2f128:
237+
; CHECK: // %bb.0:
238+
; CHECK-NEXT: mov v1.16b, v3.16b
239+
; CHECK-NEXT: mov v0.16b, v2.16b
240+
; CHECK-NEXT: b foo_v2f128
241+
tail call void @foo_v2f128(<2 x fp128> %a)
242+
ret void
243+
}
244+
245+
attributes #0 = { "target-features"="+sve,+bf16" nounwind }

0 commit comments

Comments
 (0)