@@ -5708,18 +5708,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
5708
5708
llvm::FunctionType *FTy = F->getFunctionType();
5709
5709
5710
5710
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
5711
- Value *ArgValue;
5712
- // If this is a normal argument, just emit it as a scalar.
5713
- if ((ICEArguments & (1 << i)) == 0) {
5714
- ArgValue = EmitScalarExpr(E->getArg(i));
5715
- } else {
5716
- // If this is required to be a constant, constant fold it so that we
5717
- // know that the generated intrinsic gets a ConstantInt.
5718
- ArgValue = llvm::ConstantInt::get(
5719
- getLLVMContext(),
5720
- *E->getArg(i)->getIntegerConstantExpr(getContext()));
5721
- }
5722
-
5711
+ Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
5723
5712
// If the intrinsic arg type is different from the builtin arg type
5724
5713
// we need to do a bit cast.
5725
5714
llvm::Type *PTy = FTy->getParamType(i);
@@ -8599,15 +8588,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
8599
8588
}
8600
8589
}
8601
8590
8602
- if ((ICEArguments & (1 << i)) == 0) {
8603
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
8604
- } else {
8605
- // If this is required to be a constant, constant fold it so that we know
8606
- // that the generated intrinsic gets a ConstantInt.
8607
- Ops.push_back(llvm::ConstantInt::get(
8608
- getLLVMContext(),
8609
- *E->getArg(i)->getIntegerConstantExpr(getContext())));
8610
- }
8591
+ Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
8611
8592
}
8612
8593
8613
8594
switch (BuiltinID) {
@@ -11094,15 +11075,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
11094
11075
continue;
11095
11076
}
11096
11077
}
11097
- if ((ICEArguments & (1 << i)) == 0) {
11098
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
11099
- } else {
11100
- // If this is required to be a constant, constant fold it so that we know
11101
- // that the generated intrinsic gets a ConstantInt.
11102
- Ops.push_back(llvm::ConstantInt::get(
11103
- getLLVMContext(),
11104
- *E->getArg(i)->getIntegerConstantExpr(getContext())));
11105
- }
11078
+ Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
11106
11079
}
11107
11080
11108
11081
auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap);
@@ -13814,16 +13787,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
13814
13787
assert(Error == ASTContext::GE_None && "Should not codegen an error");
13815
13788
13816
13789
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
13817
- // If this is a normal argument, just emit it as a scalar.
13818
- if ((ICEArguments & (1 << i)) == 0) {
13819
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
13820
- continue;
13821
- }
13822
-
13823
- // If this is required to be a constant, constant fold it so that we know
13824
- // that the generated intrinsic gets a ConstantInt.
13825
- Ops.push_back(llvm::ConstantInt::get(
13826
- getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
13790
+ Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
13827
13791
}
13828
13792
13829
13793
// These exist so that the builtin that takes an immediate can be bounds
@@ -17588,6 +17552,23 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
17588
17552
SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
17589
17553
}
17590
17554
17555
+ llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
17556
+ unsigned Idx,
17557
+ const CallExpr *E) {
17558
+ llvm::Value *Arg = nullptr;
17559
+ if ((ICEArguments & (1 << Idx)) == 0) {
17560
+ Arg = EmitScalarExpr(E->getArg(Idx));
17561
+ } else {
17562
+ // If this is required to be a constant, constant fold it so that we
17563
+ // know that the generated intrinsic gets a ConstantInt.
17564
+ std::optional<llvm::APSInt> Result =
17565
+ E->getArg(Idx)->getIntegerConstantExpr(getContext());
17566
+ assert(Result && "Expected argument to be a constant");
17567
+ Arg = llvm::ConstantInt::get(getLLVMContext(), *Result);
17568
+ }
17569
+ return Arg;
17570
+ }
17571
+
17591
17572
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
17592
17573
const CallExpr *E) {
17593
17574
llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
@@ -17638,8 +17619,15 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
17638
17619
case AMDGPU::BI__builtin_amdgcn_mov_dpp:
17639
17620
case AMDGPU::BI__builtin_amdgcn_update_dpp: {
17640
17621
llvm::SmallVector<llvm::Value *, 6> Args;
17641
- for (unsigned I = 0; I != E->getNumArgs(); ++I)
17642
- Args.push_back(EmitScalarExpr(E->getArg(I)));
17622
+ // Find out if any arguments are required to be integer constant
17623
+ // expressions.
17624
+ unsigned ICEArguments = 0;
17625
+ ASTContext::GetBuiltinTypeError Error;
17626
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
17627
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
17628
+ for (unsigned I = 0; I != E->getNumArgs(); ++I) {
17629
+ Args.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, I, E));
17630
+ }
17643
17631
assert(Args.size() == 5 || Args.size() == 6);
17644
17632
if (Args.size() == 5)
17645
17633
Args.insert(Args.begin(), llvm::PoisonValue::get(Args[0]->getType()));
@@ -20615,17 +20603,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
20615
20603
Ops.push_back(AggValue);
20616
20604
continue;
20617
20605
}
20618
-
20619
- // If this is a normal argument, just emit it as a scalar.
20620
- if ((ICEArguments & (1 << i)) == 0) {
20621
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
20622
- continue;
20623
- }
20624
-
20625
- // If this is required to be a constant, constant fold it so that we know
20626
- // that the generated intrinsic gets a ConstantInt.
20627
- Ops.push_back(llvm::ConstantInt::get(
20628
- getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
20606
+ Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
20629
20607
}
20630
20608
20631
20609
Intrinsic::ID ID = Intrinsic::not_intrinsic;
0 commit comments