Skip to content

Clang: Add nsz to llvm.minnum and llvm.maxnum emitted from fmin and fmax #113133

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 34 additions & 17 deletions clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -574,20 +574,25 @@ Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,

// Emit an intrinsic that has 2 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) {
static Value *emitBinaryMaybeConstrainedFPBuiltin(
CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID,
llvm::FastMathFlags FMF = llvm::FastMathFlags()) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));

CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
CallInst *CI;
if (CGF.Builder.getIsFPConstrained()) {
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
CI = CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
} else {
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1 });
CI = CGF.Builder.CreateCall(F, {Src0, Src1});
}
if (isa<FPMathOperator>(CI))
CI->setFastMathFlags(FMF);
return CI;
}

// Has second type mangled argument.
Expand Down Expand Up @@ -2612,10 +2617,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fmaxf:
case Builtin::BI__builtin_fmaxf16:
case Builtin::BI__builtin_fmaxl:
case Builtin::BI__builtin_fmaxf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::maxnum,
Intrinsic::experimental_constrained_maxnum));
case Builtin::BI__builtin_fmaxf128: {
llvm::FastMathFlags FMF;
FMF.setNoSignedZeros();
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::maxnum,
Intrinsic::experimental_constrained_maxnum, FMF));
}

case Builtin::BIfmin:
case Builtin::BIfminf:
Expand All @@ -2624,10 +2632,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_fminf:
case Builtin::BI__builtin_fminf16:
case Builtin::BI__builtin_fminl:
case Builtin::BI__builtin_fminf128:
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::minnum,
Intrinsic::experimental_constrained_minnum));
case Builtin::BI__builtin_fminf128: {
llvm::FastMathFlags FMF;
FMF.setNoSignedZeros();
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
*this, E, Intrinsic::minnum,
Intrinsic::experimental_constrained_minnum, FMF));
}

case Builtin::BIfmaximum_num:
case Builtin::BIfmaximum_numf:
Expand Down Expand Up @@ -3798,8 +3809,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Result = Builder.CreateBinaryIntrinsic(
Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
Op1, nullptr, "elt.max");
} else
Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
} else {
FastMathFlags FMF;
FMF.setNoSignedZeros(true);
Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/FMF, "elt.max");
}
return RValue::get(Result);
}
case Builtin::BI__builtin_elementwise_min: {
Expand All @@ -3813,8 +3827,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Result = Builder.CreateBinaryIntrinsic(
Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
Op1, nullptr, "elt.min");
} else
Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
} else {
FastMathFlags FMF;
FMF.setNoSignedZeros(true);
Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/FMF, "elt.min");
}
return RValue::get(Result);
}

Expand Down
24 changes: 12 additions & 12 deletions clang/test/CodeGen/RISCV/math-builtins.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,22 +134,22 @@ long double truncl(long double);
// RV32-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
// RV32-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
// RV32-NEXT: [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
// RV32-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV32-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV32-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
// RV32-NEXT: [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
// RV32-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV32-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV32-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
// RV32-NEXT: [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
// RV32-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
// RV32-NEXT: [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
// RV32-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV32-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV32-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
// RV32-NEXT: [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
// RV32-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV32-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV32-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
// RV32-NEXT: [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
// RV32-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV32-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
Expand Down Expand Up @@ -310,22 +310,22 @@ long double truncl(long double);
// RV64-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
// RV64-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
// RV64-NEXT: [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
// RV64-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV64-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV64-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
// RV64-NEXT: [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
// RV64-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV64-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV64-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
// RV64-NEXT: [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
// RV64-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
// RV64-NEXT: [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
// RV64-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV64-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
// RV64-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
// RV64-NEXT: [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
// RV64-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV64-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
// RV64-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
// RV64-NEXT: [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
// RV64-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
// RV64-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
Expand Down
24 changes: 12 additions & 12 deletions clang/test/CodeGen/builtins-elementwise-math.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,21 +347,21 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,
// CHECK-LABEL: define void @test_builtin_elementwise_max(
// CHECK: [[F1:%.+]] = load float, ptr %f1.addr, align 4
// CHECK-NEXT: [[F2:%.+]] = load float, ptr %f2.addr, align 4
// CHECK-NEXT: call float @llvm.maxnum.f32(float [[F1]], float [[F2]])
// CHECK-NEXT: call nsz float @llvm.maxnum.f32(float [[F1]], float [[F2]])
f1 = __builtin_elementwise_max(f1, f2);

// CHECK: [[D1:%.+]] = load double, ptr %d1.addr, align 8
// CHECK-NEXT: [[D2:%.+]] = load double, ptr %d2.addr, align 8
// CHECK-NEXT: call double @llvm.maxnum.f64(double [[D1]], double [[D2]])
// CHECK-NEXT: call nsz double @llvm.maxnum.f64(double [[D1]], double [[D2]])
d1 = __builtin_elementwise_max(d1, d2);

// CHECK: [[D2:%.+]] = load double, ptr %d2.addr, align 8
// CHECK-NEXT: call double @llvm.maxnum.f64(double 2.000000e+01, double [[D2]])
// CHECK-NEXT: call nsz double @llvm.maxnum.f64(double 2.000000e+01, double [[D2]])
d1 = __builtin_elementwise_max(20.0, d2);

// CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
// CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
vf1 = __builtin_elementwise_max(vf1, vf2);

// CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
Expand Down Expand Up @@ -404,13 +404,13 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,

// CHECK: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
// CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
const float4 cvf1 = vf1;
vf1 = __builtin_elementwise_max(cvf1, vf2);

// CHECK: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
// CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
// CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
vf1 = __builtin_elementwise_max(vf2, cvf1);

// CHECK: [[IAS1:%.+]] = load i32, ptr addrspace(1) @int_as_one, align 4
Expand All @@ -431,21 +431,21 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,
// CHECK-LABEL: define void @test_builtin_elementwise_min(
// CHECK: [[F1:%.+]] = load float, ptr %f1.addr, align 4
// CHECK-NEXT: [[F2:%.+]] = load float, ptr %f2.addr, align 4
// CHECK-NEXT: call float @llvm.minnum.f32(float [[F1]], float [[F2]])
// CHECK-NEXT: call nsz float @llvm.minnum.f32(float [[F1]], float [[F2]])
f1 = __builtin_elementwise_min(f1, f2);

// CHECK: [[D1:%.+]] = load double, ptr %d1.addr, align 8
// CHECK-NEXT: [[D2:%.+]] = load double, ptr %d2.addr, align 8
// CHECK-NEXT: call double @llvm.minnum.f64(double [[D1]], double [[D2]])
// CHECK-NEXT: call nsz double @llvm.minnum.f64(double [[D1]], double [[D2]])
d1 = __builtin_elementwise_min(d1, d2);

// CHECK: [[D1:%.+]] = load double, ptr %d1.addr, align 8
// CHECK-NEXT: call double @llvm.minnum.f64(double [[D1]], double 2.000000e+00)
// CHECK-NEXT: call nsz double @llvm.minnum.f64(double [[D1]], double 2.000000e+00)
d1 = __builtin_elementwise_min(d1, 2.0);

// CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
// CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
vf1 = __builtin_elementwise_min(vf1, vf2);

// CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
Expand Down Expand Up @@ -495,13 +495,13 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,

// CHECK: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
// CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
const float4 cvf1 = vf1;
vf1 = __builtin_elementwise_min(cvf1, vf2);

// CHECK: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
// CHECK-NEXT: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
// CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
// CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
vf1 = __builtin_elementwise_min(vf2, cvf1);

// CHECK: [[IAS1:%.+]] = load i32, ptr addrspace(1) @int_as_one, align 4
Expand Down
12 changes: 6 additions & 6 deletions clang/test/CodeGen/builtins.c
Original file line number Diff line number Diff line change
Expand Up @@ -343,22 +343,22 @@ void test_float_builtin_ops(float F, double D, long double LD, int I) {
// CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80

resf = __builtin_fminf(F, F);
// CHECK: call float @llvm.minnum.f32
// CHECK: call nsz float @llvm.minnum.f32

resd = __builtin_fmin(D, D);
// CHECK: call double @llvm.minnum.f64
// CHECK: call nsz double @llvm.minnum.f64

resld = __builtin_fminl(LD, LD);
// CHECK: call x86_fp80 @llvm.minnum.f80
// CHECK: call nsz x86_fp80 @llvm.minnum.f80

resf = __builtin_fmaxf(F, F);
// CHECK: call float @llvm.maxnum.f32
// CHECK: call nsz float @llvm.maxnum.f32

resd = __builtin_fmax(D, D);
// CHECK: call double @llvm.maxnum.f64
// CHECK: call nsz double @llvm.maxnum.f64

resld = __builtin_fmaxl(LD, LD);
// CHECK: call x86_fp80 @llvm.maxnum.f80
// CHECK: call nsz x86_fp80 @llvm.maxnum.f80

resf = __builtin_fminimum_numf(F, F);
// CHECK: call float @llvm.minimumnum.f32
Expand Down
16 changes: 8 additions & 8 deletions clang/test/CodeGen/constrained-math-builtins.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,17 +123,17 @@ __builtin_atan2(f,f); __builtin_atan2f(f,f); __builtin_atan2l(f,f);

__builtin_fmax(f,f); __builtin_fmaxf(f,f); __builtin_fmaxl(f,f); __builtin_fmaxf128(f,f);

// CHECK: call double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")

__builtin_fmin(f,f); __builtin_fminf(f,f); __builtin_fminl(f,f); __builtin_fminf128(f,f);

// CHECK: call double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
// CHECK: call nsz fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")

__builtin_llrint(f); __builtin_llrintf(f); __builtin_llrintl(f); __builtin_llrintf128(f);

Expand Down
Loading
Loading