@@ -6855,7 +6855,9 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6855
6855
const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6856
6856
State.getMachineFunction().getSubtarget());
6857
6857
const bool IsPPC64 = Subtarget.isPPC64();
6858
- const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6858
+ const unsigned PtrSize = IsPPC64 ? 8 : 4;
6859
+ const Align PtrAlign(PtrSize);
6860
+ const Align StackAlign(16);
6859
6861
const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6860
6862
6861
6863
if (ValVT == MVT::f128)
@@ -6876,12 +6878,16 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6876
6878
PPC::V6, PPC::V7, PPC::V8, PPC::V9,
6877
6879
PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6878
6880
6881
+ const ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
6882
+
6879
6883
if (ArgFlags.isByVal()) {
6880
- if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6884
+ const Align ByValAlign(ArgFlags.getNonZeroByValAlign());
6885
+ if (ByValAlign > StackAlign)
6881
6886
report_fatal_error("Pass-by-value arguments with alignment greater than "
6882
- "register width are not supported.");
6887
+ "16 are not supported.");
6883
6888
6884
6889
const unsigned ByValSize = ArgFlags.getByValSize();
6890
+ const Align ObjAlign = ByValAlign > PtrAlign ? ByValAlign : PtrAlign;
6885
6891
6886
6892
// An empty aggregate parameter takes up no storage and no registers,
6887
6893
// but needs a MemLoc for a stack slot for the formal arguments side.
@@ -6891,11 +6897,23 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6891
6897
return false;
6892
6898
}
6893
6899
6894
- const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6895
- unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6896
- for (const unsigned E = Offset + StackSize; Offset < E;
6897
- Offset += PtrAlign.value()) {
6898
- if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6900
+ // Shadow allocate any registers that are not properly aligned.
6901
+ unsigned NextReg = State.getFirstUnallocated(GPRs);
6902
+ while (NextReg != GPRs.size() &&
6903
+ !isGPRShadowAligned(GPRs[NextReg], ObjAlign)) {
6904
+ // Shadow allocate next registers since its aligment is not strict enough.
6905
+ unsigned Reg = State.AllocateReg(GPRs);
6906
+ // Allocate the stack space shadowed by said register.
6907
+ State.AllocateStack(PtrSize, PtrAlign);
6908
+ assert(Reg && "Alocating register unexpectedly failed.");
6909
+ (void)Reg;
6910
+ NextReg = State.getFirstUnallocated(GPRs);
6911
+ }
6912
+
6913
+ const unsigned StackSize = alignTo(ByValSize, ObjAlign);
6914
+ unsigned Offset = State.AllocateStack(StackSize, ObjAlign);
6915
+ for (const unsigned E = Offset + StackSize; Offset < E; Offset += PtrSize) {
6916
+ if (unsigned Reg = State.AllocateReg(GPRs))
6899
6917
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6900
6918
else {
6901
6919
State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
@@ -6917,12 +6935,12 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6917
6935
[[fallthrough]];
6918
6936
case MVT::i1:
6919
6937
case MVT::i32: {
6920
- const unsigned Offset = State.AllocateStack(PtrAlign.value() , PtrAlign);
6938
+ const unsigned Offset = State.AllocateStack(PtrSize , PtrAlign);
6921
6939
// AIX integer arguments are always passed in register width.
6922
6940
if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
6923
6941
LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6924
6942
: CCValAssign::LocInfo::ZExt;
6925
- if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32 ))
6943
+ if (unsigned Reg = State.AllocateReg(GPRs ))
6926
6944
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6927
6945
else
6928
6946
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
@@ -6942,8 +6960,8 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6942
6960
State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6943
6961
6944
6962
// Reserve and initialize GPRs or initialize the PSA as required.
6945
- for (unsigned I = 0; I < StoreSize; I += PtrAlign.value() ) {
6946
- if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32 )) {
6963
+ for (unsigned I = 0; I < StoreSize; I += PtrSize ) {
6964
+ if (unsigned Reg = State.AllocateReg(GPRs )) {
6947
6965
assert(FReg && "An FPR should be available when a GPR is reserved.");
6948
6966
if (State.isVarArg()) {
6949
6967
// Successfully reserved GPRs are only initialized for vararg calls.
@@ -6995,9 +7013,6 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6995
7013
return false;
6996
7014
}
6997
7015
6998
- const unsigned PtrSize = IsPPC64 ? 8 : 4;
6999
- ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
7000
-
7001
7016
unsigned NextRegIndex = State.getFirstUnallocated(GPRs);
7002
7017
// Burn any underaligned registers and their shadowed stack space until
7003
7018
// we reach the required alignment.
@@ -7347,9 +7362,6 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7347
7362
const MCPhysReg ArgReg = VA.getLocReg();
7348
7363
const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7349
7364
7350
- if (Flags.getNonZeroByValAlign() > PtrByteSize)
7351
- report_fatal_error("Over aligned byvals not supported yet.");
7352
-
7353
7365
const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7354
7366
const int FI = MF.getFrameInfo().CreateFixedObject(
7355
7367
StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
0 commit comments