Skip to content

[ARM/X86] Standardize the isEligibleForTailCallOptimization prototypes #90688

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
May 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 22 additions & 21 deletions llvm/lib/Target/ARM/ARMISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2366,6 +2366,12 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool PreferIndirect = false;
bool GuardWithBTI = false;

// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));

// Lower 'returns_twice' calls to a pseudo-instruction.
if (CLI.CB && CLI.CB->getAttributes().hasFnAttr(Attribute::ReturnsTwice) &&
!Subtarget->noBTIAtReturnTwice())
Expand Down Expand Up @@ -2401,10 +2407,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
if (isTailCall) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(
Callee, CallConv, isVarArg, isStructRet,
MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG,
PreferIndirect);
isTailCall =
IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs, PreferIndirect);

if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt &&
CallConv != CallingConv::Tail && CallConv != CallingConv::SwiftTail)
Expand All @@ -2419,11 +2423,6 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
"site marked musttail");
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));

// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getStackSize();
Expand Down Expand Up @@ -2985,14 +2984,19 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,

/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function.
/// optimization should implement this function. Note that this function also
/// processes musttail calls, so when this function returns false on a valid
/// musttail call, a fatal backend error occurs.
bool ARMTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
bool isCalleeStructRet, bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
const bool isIndirect) const {
TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const {
CallingConv::ID CalleeCC = CLI.CallConv;
SDValue Callee = CLI.Callee;
bool isVarArg = CLI.IsVarArg;
const SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
const SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
const SelectionDAG &DAG = CLI.DAG;
MachineFunction &MF = DAG.getMachineFunction();
const Function &CallerF = MF.getFunction();
CallingConv::ID CallerCC = CallerF.getCallingConv();
Expand Down Expand Up @@ -3028,6 +3032,8 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(

// Also avoid sibcall optimization if either caller or callee uses struct
// return semantics.
bool isCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
bool isCallerStructRet = MF.getFunction().hasStructRetAttr();
if (isCalleeStructRet || isCallerStructRet)
return false;

Expand Down Expand Up @@ -3073,11 +3079,6 @@ bool ARMTargetLowering::IsEligibleForTailCallOptimization(
// If the callee takes no arguments then go on to check the results of the
// call.
if (!Outs.empty()) {
// Check if stack adjustment is needed. For now, do not do this if any
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
if (CCInfo.getStackSize()) {
// Check if the arguments are already laid out in the right way as
// the caller's fixed stack objects.
Expand Down
8 changes: 2 additions & 6 deletions llvm/lib/Target/ARM/ARMISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -943,12 +943,8 @@ class VectorType;
/// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function.
bool IsEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
bool isCalleeStructRet, bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
const bool isIndirect) const;
TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const;

bool CanLowerReturn(CallingConv::ID CallConv,
MachineFunction &MF, bool isVarArg,
Expand Down
6 changes: 2 additions & 4 deletions llvm/lib/Target/X86/X86ISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -1632,10 +1632,8 @@ namespace llvm {
/// Check whether the call is eligible for tail call optimization. Targets
/// that want to do tail call optimization should implement this function.
bool IsEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool IsCalleeStackStructRet,
bool isVarArg, Type *RetTy, const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
SmallVectorImpl<CCValAssign> &ArgLocs, bool IsCalleePopSRet) const;
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
SDValue Chain, bool IsTailCall,
bool Is64Bit, int FPDiff,
Expand Down
79 changes: 36 additions & 43 deletions llvm/lib/Target/X86/X86ISelLoweringCall.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2021,6 +2021,22 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (CallConv == CallingConv::X86_INTR)
report_fatal_error("X86 interrupts may not be called directly");

// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());

// Allocate shadow area for Win64.
if (IsWin64)
CCInfo.AllocateStack(32, Align(8));

CCInfo.AnalyzeArguments(Outs, CC_X86);

// In vectorcall calling convention a second pass is required for the HVA
// types.
if (CallingConv::X86_VectorCall == CallConv) {
CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
}

bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {
// If we are using a GOT, disable tail calls to external symbols with
Expand All @@ -2036,9 +2052,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,

if (isTailCall && !IsMustTail) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(
Callee, CallConv, IsCalleePopSRet, isVarArg, CLI.RetTy, Outs, OutVals,
Ins, DAG);
isTailCall = IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs,
IsCalleePopSRet);

// Sibcalls are automatically detected tailcalls which do not require
// ABI changes.
Expand All @@ -2056,22 +2071,6 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe");

// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());

// Allocate shadow area for Win64.
if (IsWin64)
CCInfo.AllocateStack(32, Align(8));

CCInfo.AnalyzeArguments(Outs, CC_X86);

// In vectorcall calling convention a second pass is required for the HVA
// types.
if (CallingConv::X86_VectorCall == CallConv) {
CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
}

// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
if (IsSibcall)
Expand Down Expand Up @@ -2723,11 +2722,20 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,

/// Check whether the call is eligible for tail call optimization. Targets
/// that want to do tail call optimization should implement this function.
/// Note that the x86 backend does not check musttail calls for eligibility! The
/// rest of x86 tail call lowering must be prepared to forward arguments of any
/// type.
bool X86TargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool IsCalleePopSRet,
bool isVarArg, Type *RetTy, const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
SmallVectorImpl<CCValAssign> &ArgLocs, bool IsCalleePopSRet) const {
SelectionDAG &DAG = CLI.DAG;
const SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
const SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Callee = CLI.Callee;
CallingConv::ID CalleeCC = CLI.CallConv;
bool isVarArg = CLI.IsVarArg;

if (!mayTailCallThisCC(CalleeCC))
return false;

Expand All @@ -2738,7 +2746,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// If the function return type is x86_fp80 and the callee return type is not,
// then the FP_EXTEND of the call result is not a nop. It's not safe to
// perform a tailcall optimization here.
if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
if (CallerF.getReturnType()->isX86_FP80Ty() && !CLI.RetTy->isX86_FP80Ty())
return false;

CallingConv::ID CallerCC = CallerF.getCallingConv();
Expand Down Expand Up @@ -2791,9 +2799,6 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
if (IsCalleeWin64 || IsCallerWin64)
return false;

SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
for (const auto &VA : ArgLocs)
if (!VA.isRegLoc())
return false;
Expand All @@ -2811,8 +2816,8 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
}
if (Unused) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
CCState RVCCInfo(CalleeCC, false, MF, RVLocs, C);
RVCCInfo.AnalyzeCallResult(Ins, RetCC_X86);
for (const auto &VA : RVLocs) {
if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
return false;
Expand All @@ -2832,24 +2837,12 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
return false;
}

unsigned StackArgsSize = 0;
unsigned StackArgsSize = CCInfo.getStackSize();

// If the callee takes no arguments then go on to check the results of the
// call.
if (!Outs.empty()) {
// Check if stack adjustment is needed. For now, do not do this if any
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);

// Allocate shadow area for Win64
if (IsCalleeWin64)
CCInfo.AllocateStack(32, Align(8));

CCInfo.AnalyzeCallOperands(Outs, CC_X86);
StackArgsSize = CCInfo.getStackSize();

if (CCInfo.getStackSize()) {
if (StackArgsSize > 0) {
// Check if the arguments are already laid out in the right way as
// the caller's fixed stack objects.
MachineFrameInfo &MFI = MF.getFrameInfo();
Expand Down