@@ -5708,7 +5708,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
5708
5708
llvm::FunctionType *FTy = F->getFunctionType();
5709
5709
5710
5710
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
5711
- Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
5711
+ Value *ArgValue;
5712
+ // If this is a normal argument, just emit it as a scalar.
5713
+ if ((ICEArguments & (1 << i)) == 0) {
5714
+ ArgValue = EmitScalarExpr(E->getArg(i));
5715
+ } else {
5716
+ // If this is required to be a constant, constant fold it so that we
5717
+ // know that the generated intrinsic gets a ConstantInt.
5718
+ ArgValue = llvm::ConstantInt::get(
5719
+ getLLVMContext(),
5720
+ *E->getArg(i)->getIntegerConstantExpr(getContext()));
5721
+ }
5722
+
5712
5723
// If the intrinsic arg type is different from the builtin arg type
5713
5724
// we need to do a bit cast.
5714
5725
llvm::Type *PTy = FTy->getParamType(i);
@@ -8588,7 +8599,15 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
8588
8599
}
8589
8600
}
8590
8601
8591
- Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
8602
+ if ((ICEArguments & (1 << i)) == 0) {
8603
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
8604
+ } else {
8605
+ // If this is required to be a constant, constant fold it so that we know
8606
+ // that the generated intrinsic gets a ConstantInt.
8607
+ Ops.push_back(llvm::ConstantInt::get(
8608
+ getLLVMContext(),
8609
+ *E->getArg(i)->getIntegerConstantExpr(getContext())));
8610
+ }
8592
8611
}
8593
8612
8594
8613
switch (BuiltinID) {
@@ -11075,7 +11094,15 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
11075
11094
continue;
11076
11095
}
11077
11096
}
11078
- Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
11097
+ if ((ICEArguments & (1 << i)) == 0) {
11098
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
11099
+ } else {
11100
+ // If this is required to be a constant, constant fold it so that we know
11101
+ // that the generated intrinsic gets a ConstantInt.
11102
+ Ops.push_back(llvm::ConstantInt::get(
11103
+ getLLVMContext(),
11104
+ *E->getArg(i)->getIntegerConstantExpr(getContext())));
11105
+ }
11079
11106
}
11080
11107
11081
11108
auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap);
@@ -13787,7 +13814,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
13787
13814
assert(Error == ASTContext::GE_None && "Should not codegen an error");
13788
13815
13789
13816
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
13790
- Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
13817
+ // If this is a normal argument, just emit it as a scalar.
13818
+ if ((ICEArguments & (1 << i)) == 0) {
13819
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
13820
+ continue;
13821
+ }
13822
+
13823
+ // If this is required to be a constant, constant fold it so that we know
13824
+ // that the generated intrinsic gets a ConstantInt.
13825
+ Ops.push_back(llvm::ConstantInt::get(
13826
+ getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
13791
13827
}
13792
13828
13793
13829
// These exist so that the builtin that takes an immediate can be bounds
@@ -17552,23 +17588,6 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
17552
17588
SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
17553
17589
}
17554
17590
17555
- llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
17556
- unsigned Idx,
17557
- const CallExpr *E) {
17558
- llvm::Value *Arg = nullptr;
17559
- if ((ICEArguments & (1 << Idx)) == 0) {
17560
- Arg = EmitScalarExpr(E->getArg(Idx));
17561
- } else {
17562
- // If this is required to be a constant, constant fold it so that we
17563
- // know that the generated intrinsic gets a ConstantInt.
17564
- std::optional<llvm::APSInt> Result =
17565
- E->getArg(Idx)->getIntegerConstantExpr(getContext());
17566
- assert(Result && "Expected argument to be a constant");
17567
- Arg = llvm::ConstantInt::get(getLLVMContext(), *Result);
17568
- }
17569
- return Arg;
17570
- }
17571
-
17572
17591
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
17573
17592
const CallExpr *E) {
17574
17593
llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
@@ -17619,15 +17638,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
17619
17638
case AMDGPU::BI__builtin_amdgcn_mov_dpp:
17620
17639
case AMDGPU::BI__builtin_amdgcn_update_dpp: {
17621
17640
llvm::SmallVector<llvm::Value *, 6> Args;
17622
- // Find out if any arguments are required to be integer constant
17623
- // expressions.
17624
- unsigned ICEArguments = 0;
17625
- ASTContext::GetBuiltinTypeError Error;
17626
- getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
17627
- assert(Error == ASTContext::GE_None && "Should not codegen an error");
17628
- for (unsigned I = 0; I != E->getNumArgs(); ++I) {
17629
- Args.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, I, E));
17630
- }
17641
+ for (unsigned I = 0; I != E->getNumArgs(); ++I)
17642
+ Args.push_back(EmitScalarExpr(E->getArg(I)));
17631
17643
assert(Args.size() == 5 || Args.size() == 6);
17632
17644
if (Args.size() == 5)
17633
17645
Args.insert(Args.begin(), llvm::PoisonValue::get(Args[0]->getType()));
@@ -20603,7 +20615,17 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
20603
20615
Ops.push_back(AggValue);
20604
20616
continue;
20605
20617
}
20606
- Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
20618
+
20619
+ // If this is a normal argument, just emit it as a scalar.
20620
+ if ((ICEArguments & (1 << i)) == 0) {
20621
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
20622
+ continue;
20623
+ }
20624
+
20625
+ // If this is required to be a constant, constant fold it so that we know
20626
+ // that the generated intrinsic gets a ConstantInt.
20627
+ Ops.push_back(llvm::ConstantInt::get(
20628
+ getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
20607
20629
}
20608
20630
20609
20631
Intrinsic::ID ID = Intrinsic::not_intrinsic;
0 commit comments