Skip to content

Commit 0295c2a

Browse files
authored
[PowerPC][AIX] Support ByVals with greater alignment then pointer size (#93341)
Implementation is NOT compatible with IBM XL C 16.1 and earlier but is compatible with GCC. It handles all ByVals with greater alignment then pointer width the same way IBM XL C handles Byvals that have vector members. For overaligned objects that do not contain vectors IBM XL C does not align them properly if they are passed in the GPR argument registers. This patch was originally written by Sean Fertile @mandlebug. Previously on Phabricator https://reviews.llvm.org/D105659
1 parent 6150e84 commit 0295c2a

File tree

4 files changed

+120
-19
lines changed

4 files changed

+120
-19
lines changed

llvm/lib/Target/PowerPC/PPCISelLowering.cpp

Lines changed: 30 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6855,7 +6855,9 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
68556855
const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
68566856
State.getMachineFunction().getSubtarget());
68576857
const bool IsPPC64 = Subtarget.isPPC64();
6858-
const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6858+
const unsigned PtrSize = IsPPC64 ? 8 : 4;
6859+
const Align PtrAlign(PtrSize);
6860+
const Align StackAlign(16);
68596861
const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
68606862

68616863
if (ValVT == MVT::f128)
@@ -6876,12 +6878,16 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
68766878
PPC::V6, PPC::V7, PPC::V8, PPC::V9,
68776879
PPC::V10, PPC::V11, PPC::V12, PPC::V13};
68786880

6881+
const ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
6882+
68796883
if (ArgFlags.isByVal()) {
6880-
if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6884+
const Align ByValAlign(ArgFlags.getNonZeroByValAlign());
6885+
if (ByValAlign > StackAlign)
68816886
report_fatal_error("Pass-by-value arguments with alignment greater than "
6882-
"register width are not supported.");
6887+
"16 are not supported.");
68836888

68846889
const unsigned ByValSize = ArgFlags.getByValSize();
6890+
const Align ObjAlign = ByValAlign > PtrAlign ? ByValAlign : PtrAlign;
68856891

68866892
// An empty aggregate parameter takes up no storage and no registers,
68876893
// but needs a MemLoc for a stack slot for the formal arguments side.
@@ -6891,11 +6897,23 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
68916897
return false;
68926898
}
68936899

6894-
const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6895-
unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6896-
for (const unsigned E = Offset + StackSize; Offset < E;
6897-
Offset += PtrAlign.value()) {
6898-
if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6900+
// Shadow allocate any registers that are not properly aligned.
6901+
unsigned NextReg = State.getFirstUnallocated(GPRs);
6902+
while (NextReg != GPRs.size() &&
6903+
!isGPRShadowAligned(GPRs[NextReg], ObjAlign)) {
6904+
// Shadow allocate next registers since its aligment is not strict enough.
6905+
unsigned Reg = State.AllocateReg(GPRs);
6906+
// Allocate the stack space shadowed by said register.
6907+
State.AllocateStack(PtrSize, PtrAlign);
6908+
assert(Reg && "Alocating register unexpectedly failed.");
6909+
(void)Reg;
6910+
NextReg = State.getFirstUnallocated(GPRs);
6911+
}
6912+
6913+
const unsigned StackSize = alignTo(ByValSize, ObjAlign);
6914+
unsigned Offset = State.AllocateStack(StackSize, ObjAlign);
6915+
for (const unsigned E = Offset + StackSize; Offset < E; Offset += PtrSize) {
6916+
if (unsigned Reg = State.AllocateReg(GPRs))
68996917
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
69006918
else {
69016919
State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
@@ -6917,12 +6935,12 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
69176935
[[fallthrough]];
69186936
case MVT::i1:
69196937
case MVT::i32: {
6920-
const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6938+
const unsigned Offset = State.AllocateStack(PtrSize, PtrAlign);
69216939
// AIX integer arguments are always passed in register width.
69226940
if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
69236941
LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
69246942
: CCValAssign::LocInfo::ZExt;
6925-
if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6943+
if (unsigned Reg = State.AllocateReg(GPRs))
69266944
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
69276945
else
69286946
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
@@ -6942,8 +6960,8 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
69426960
State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
69436961

69446962
// Reserve and initialize GPRs or initialize the PSA as required.
6945-
for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6946-
if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6963+
for (unsigned I = 0; I < StoreSize; I += PtrSize) {
6964+
if (unsigned Reg = State.AllocateReg(GPRs)) {
69476965
assert(FReg && "An FPR should be available when a GPR is reserved.");
69486966
if (State.isVarArg()) {
69496967
// Successfully reserved GPRs are only initialized for vararg calls.
@@ -6995,9 +7013,6 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
69957013
return false;
69967014
}
69977015

6998-
const unsigned PtrSize = IsPPC64 ? 8 : 4;
6999-
ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
7000-
70017016
unsigned NextRegIndex = State.getFirstUnallocated(GPRs);
70027017
// Burn any underaligned registers and their shadowed stack space until
70037018
// we reach the required alignment.
@@ -7347,9 +7362,6 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
73477362
const MCPhysReg ArgReg = VA.getLocReg();
73487363
const PPCFrameLowering *FL = Subtarget.getFrameLowering();
73497364

7350-
if (Flags.getNonZeroByValAlign() > PtrByteSize)
7351-
report_fatal_error("Over aligned byvals not supported yet.");
7352-
73537365
const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
73547366
const int FI = MF.getFrameInfo().CreateFixedObject(
73557367
StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,

llvm/test/CodeGen/PowerPC/aix-cc-byval-limitation3.ll

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,4 +13,4 @@ entry:
1313

1414
declare void @foo(i32, i32, i32, i32, i32, i32, i32, i32, ptr byval(%struct.S) align 32)
1515

16-
; CHECK: LLVM ERROR: Pass-by-value arguments with alignment greater than register width are not supported.
16+
; CHECK: LLVM ERROR: Pass-by-value arguments with alignment greater than 16 are not supported.
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc -mtriple powerpc-ibm-aix-xcoff -stop-after=machine-cp -mcpu=pwr7 \
3+
; RUN: -mattr=-altivec -verify-machineinstrs < %s | \
4+
; RUN: FileCheck --check-prefix=32BIT %s
5+
6+
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -stop-after=machine-cp -mcpu=pwr7 \
7+
; RUN: -mattr=-altivec -verify-machineinstrs < %s | \
8+
; RUN: FileCheck --check-prefix=64BIT %s
9+
10+
%struct.vec_struct = type { <4 x i32> }
11+
12+
; Function Attrs: norecurse nounwind readonly
13+
define i32 @vec_struct_test(i32 %i, ptr nocapture readonly byval(%struct.vec_struct) align 16 %vs) {
14+
; 32BIT-LABEL: name: vec_struct_test
15+
; 32BIT: bb.0.entry:
16+
; 32BIT-NEXT: liveins: $r3, $r5, $r6, $r7, $r8
17+
; 32BIT-NEXT: {{ $}}
18+
; 32BIT-NEXT: STW killed renamable $r7, 8, %fixed-stack.0 :: (store (s32) into %fixed-stack.0 + 8, align 8)
19+
; 32BIT-NEXT: STW killed renamable $r6, 4, %fixed-stack.0 :: (store (s32) into %fixed-stack.0 + 4)
20+
; 32BIT-NEXT: STW renamable $r5, 0, %fixed-stack.0 :: (store (s32) into %fixed-stack.0, align 16)
21+
; 32BIT-NEXT: STW killed renamable $r8, 12, %fixed-stack.0 :: (store (s32) into %fixed-stack.0 + 12)
22+
; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r5, killed renamable $r3
23+
; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3
24+
;
25+
; 64BIT-LABEL: name: vec_struct_test
26+
; 64BIT: bb.0.entry:
27+
; 64BIT-NEXT: liveins: $x3, $x5, $x6
28+
; 64BIT-NEXT: {{ $}}
29+
; 64BIT-NEXT: STD renamable $x5, 0, %fixed-stack.0 :: (store (s64) into %fixed-stack.0, align 16)
30+
; 64BIT-NEXT: STD killed renamable $x6, 8, %fixed-stack.0 :: (store (s64) into %fixed-stack.0 + 8)
31+
; 64BIT-NEXT: renamable $x4 = RLDICL killed renamable $x5, 32, 32
32+
; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r4, renamable $r3, implicit killed $x3, implicit killed $x4, implicit-def $x3
33+
; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
34+
entry:
35+
%vsi = getelementptr inbounds i8, ptr %vs, i32 0
36+
%0 = load <4 x i32>, ptr %vsi, align 16
37+
%vecext = extractelement <4 x i32> %0, i32 0
38+
%add = add nsw i32 %vecext, %i
39+
ret i32 %add
40+
}
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2+
; RUN: llc -mtriple powerpc-ibm-aix-xcoff -stop-after=machine-cp -mcpu=pwr7 \
3+
; RUN: -mattr=-altivec -verify-machineinstrs < %s | \
4+
; RUN: FileCheck --check-prefix=32BIT %s
5+
6+
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -stop-after=machine-cp -mcpu=pwr7 \
7+
; RUN: -mattr=-altivec -verify-machineinstrs < %s | \
8+
; RUN: FileCheck --check-prefix=64BIT %s
9+
10+
%struct.B = type { <8 x i16>, i32, i32, [8 x i8] }
11+
12+
; Function Attrs: nounwind
13+
define i32 @caller() {
14+
; 32BIT-LABEL: name: caller
15+
; 32BIT: bb.0.entry:
16+
; 32BIT-NEXT: renamable $r3 = LWZ 28, %stack.0.vs :: (load (s32) from unknown-address + 4)
17+
; 32BIT-NEXT: STW killed renamable $r3, 60, $r1 :: (store (s32) into unknown-address + 4, basealign 16)
18+
; 32BIT-NEXT: renamable $r3 = LWZ 24, %stack.0.vs :: (load (s32) from %stack.0.vs + 24, align 8, basealign 16)
19+
; 32BIT-NEXT: STW killed renamable $r3, 56, $r1 :: (store (s32), align 16)
20+
; 32BIT-NEXT: ADJCALLSTACKDOWN 64, 0, implicit-def dead $r1, implicit $r1
21+
; 32BIT-NEXT: renamable $r10 = LWZ 20, %stack.0.vs :: (load (s32) from %stack.0.vs + 20)
22+
; 32BIT-NEXT: renamable $r9 = LWZ 16, %stack.0.vs :: (load (s32) from %stack.0.vs + 16, align 16)
23+
; 32BIT-NEXT: renamable $r8 = LWZ 12, %stack.0.vs :: (load (s32) from %stack.0.vs + 12)
24+
; 32BIT-NEXT: renamable $r7 = LWZ 8, %stack.0.vs :: (load (s32) from %stack.0.vs + 8, align 8)
25+
; 32BIT-NEXT: renamable $r6 = LWZ 4, %stack.0.vs :: (load (s32) from %stack.0.vs + 4)
26+
; 32BIT-NEXT: renamable $r5 = LWZ 0, %stack.0.vs :: (load (s32) from %stack.0.vs, align 16)
27+
; 32BIT-NEXT: $r3 = LI 0
28+
; 32BIT-NEXT: BL_NOP <mcsymbol .vec_struct_test[PR]>, csr_aix32, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r5, implicit $r6, implicit $r7, implicit $r8, implicit $r9, implicit $r10, implicit $r2, implicit-def $r1, implicit-def $r3
29+
; 32BIT-NEXT: ADJCALLSTACKUP 64, 0, implicit-def dead $r1, implicit $r1
30+
; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3
31+
;
32+
; 64BIT-LABEL: name: caller
33+
; 64BIT: bb.0.entry:
34+
; 64BIT-NEXT: ADJCALLSTACKDOWN 112, 0, implicit-def dead $r1, implicit $r1
35+
; 64BIT-NEXT: renamable $x8 = LD 24, %stack.0.vs :: (load (s64) from %stack.0.vs + 24)
36+
; 64BIT-NEXT: renamable $x7 = LD 16, %stack.0.vs :: (load (s64) from %stack.0.vs + 16, align 16)
37+
; 64BIT-NEXT: renamable $x6 = LD 8, %stack.0.vs :: (load (s64) from %stack.0.vs + 8)
38+
; 64BIT-NEXT: renamable $x5 = LD 0, %stack.0.vs :: (load (s64) from %stack.0.vs, align 16)
39+
; 64BIT-NEXT: $x3 = LI8 0
40+
; 64BIT-NEXT: BL8_NOP <mcsymbol .vec_struct_test[PR]>, csr_ppc64, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x5, implicit $x6, implicit $x7, implicit $x8, implicit $x2, implicit-def $r1, implicit-def $x3
41+
; 64BIT-NEXT: ADJCALLSTACKUP 112, 0, implicit-def dead $r1, implicit $r1
42+
; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
43+
entry:
44+
%vs = alloca %struct.B, align 16
45+
%call = tail call i32 @vec_struct_test(i32 0, ptr nonnull byval(%struct.B) align 16 %vs)
46+
ret i32 %call
47+
}
48+
49+
declare i32 @vec_struct_test(i32, ptr byval(%struct.B) align 16)

0 commit comments

Comments
 (0)