Skip to content

[RISCV] Support Xsfvfnrclipxfqf extensions #68297

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 62 additions & 0 deletions clang/include/clang/Basic/riscv_sifive_vector.td
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,14 @@ multiclass RVVVQMACCBuiltinSet<list<list<string>> suffixes_prototypes> {
defm NAME : RVVOutOp1Op2BuiltinSet<NAME, "i", suffixes_prototypes>;
}

multiclass RVVVFNRCLIPBuiltinSet<string suffix, string prototype, string type_range> {
let Log2LMUL = [-3, -2, -1, 0, 1, 2],
Name = NAME,
IRName = NAME,
MaskedIRName = NAME # "_mask" in
def : RVVConvBuiltin<suffix, prototype, type_range, NAME>;
}

let UnMaskedPolicyScheme = HasPolicyOperand in
let RequiredFeatures = ["Xsfvqmaccdod"] in {
defm sf_vqmaccu_2x8x2 : RVVVQMACCBuiltinSet<[["", "v", "vv(FixedSEW:8)SUv(FixedSEW:8)Uv"]]>;
Expand All @@ -139,3 +147,57 @@ let UnMaskedPolicyScheme = HasPolicyOperand in
let UnMaskedPolicyScheme = HasPolicyOperand in
let RequiredFeatures = ["Xsfvfwmaccqqq"] in
defm sf_vfwmacc_4x4x4 : RVVVFWMACCBuiltinSet<[["", "w", "wwSvv"]]>;

let UnMaskedPolicyScheme = HasPassthruOperand, RequiredFeatures = ["Xsfvfnrclipxfqf"] in {
let ManualCodegen = [{
{
// LLVM intrinsic
// Unmasked: (passthru, vector_in, scalar_in, frm, vl)
// Masked: (passthru, vector_in, scalar_in, mask, frm, vl, policy)

SmallVector<llvm::Value*, 7> Operands;
bool HasMaskedOff = !(
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
(!IsMasked && PolicyAttrs & RVV_VTA));
bool HasRoundModeOp = IsMasked ?
(HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
(HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);

unsigned Offset = IsMasked ?
(HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);

if (!HasMaskedOff)
Operands.push_back(llvm::PoisonValue::get(ResultType));
else
Operands.push_back(Ops[IsMasked ? 1 : 0]);

Operands.push_back(Ops[Offset]); // op0
Operands.push_back(Ops[Offset + 1]); // op1

if (IsMasked)
Operands.push_back(Ops[0]); // mask

if (HasRoundModeOp) {
Operands.push_back(Ops[Offset + 2]); // frm
Operands.push_back(Ops[Offset + 3]); // vl
} else {
Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
Operands.push_back(Ops[Offset + 2]); // vl
}

if (IsMasked)
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));

IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Operands.back()->getType()};
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
return Builder.CreateCall(F, Operands, "");
}
}] in {
let HasFRMRoundModeOp = true in {
defm sf_vfnrclip_x_f_qf : RVVVFNRCLIPBuiltinSet<"v", "vFqfu", "c">;
defm sf_vfnrclip_xu_f_qf : RVVVFNRCLIPBuiltinSet<"Uv", "UvFqfu", "c">;
}
defm sf_vfnrclip_x_f_qf : RVVVFNRCLIPBuiltinSet<"v", "vFqf", "c">;
defm sf_vfnrclip_xu_f_qf : RVVVFNRCLIPBuiltinSet<"Uv", "UvFqf", "c">;
}
}
1 change: 1 addition & 0 deletions clang/include/clang/Basic/riscv_vector_common.td
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@
// t: ptrdiff_t, ignores "t"
// u: unsigned long, ignores "t"
// l: long, ignores "t"
// f: float32, ignores "t"
//
// So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
Expand Down
26 changes: 14 additions & 12 deletions clang/include/clang/Support/RISCVVIntrinsicUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ enum class BaseTypeModifier : uint8_t {
Ptrdiff,
UnsignedLong,
SignedLong,
Float32
};

// Modifier for type, used for both scalar and vector types.
Expand Down Expand Up @@ -485,18 +486,19 @@ enum RVVRequire : uint16_t {
RVV_REQ_RV64 = 1 << 0,
RVV_REQ_ZvfhminOrZvfh = 1 << 1,
RVV_REQ_Xsfvcp = 1 << 2,
RVV_REQ_Xsfvfwmaccqqq = 1 << 3,
RVV_REQ_Xsfvqmaccdod = 1 << 4,
RVV_REQ_Xsfvqmaccqoq = 1 << 5,
RVV_REQ_Zvbb = 1 << 6,
RVV_REQ_Zvbc = 1 << 7,
RVV_REQ_Zvkb = 1 << 8,
RVV_REQ_Zvkg = 1 << 9,
RVV_REQ_Zvkned = 1 << 10,
RVV_REQ_Zvknha = 1 << 11,
RVV_REQ_Zvknhb = 1 << 12,
RVV_REQ_Zvksed = 1 << 13,
RVV_REQ_Zvksh = 1 << 14,
RVV_REQ_Xsfvfnrclipxfqf = 1 << 3,
RVV_REQ_Xsfvfwmaccqqq = 1 << 4,
RVV_REQ_Xsfvqmaccdod = 1 << 5,
RVV_REQ_Xsfvqmaccqoq = 1 << 6,
RVV_REQ_Zvbb = 1 << 7,
RVV_REQ_Zvbc = 1 << 8,
RVV_REQ_Zvkb = 1 << 9,
RVV_REQ_Zvkg = 1 << 10,
RVV_REQ_Zvkned = 1 << 11,
RVV_REQ_Zvknha = 1 << 12,
RVV_REQ_Zvknhb = 1 << 13,
RVV_REQ_Zvksed = 1 << 14,
RVV_REQ_Zvksh = 1 << 15,

LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Zvksh)
};
Expand Down
1 change: 1 addition & 0 deletions clang/lib/Sema/SemaRISCVVectorLookup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,7 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
{"64bit", RVV_REQ_RV64},
{"xsfvcp", RVV_REQ_Xsfvcp},
{"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
{"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
{"xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod},
{"xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq},
Expand Down
9 changes: 8 additions & 1 deletion clang/lib/Support/RISCVVIntrinsicUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -429,6 +429,9 @@ PrototypeDescriptor::parsePrototypeDescriptor(
case 'l':
PT = BaseTypeModifier::SignedLong;
break;
case 'f':
PT = BaseTypeModifier::Float32;
break;
default:
llvm_unreachable("Illegal primitive type transformers!");
}
Expand Down Expand Up @@ -665,6 +668,10 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
case BaseTypeModifier::SignedLong:
ScalarType = ScalarTypeKind::SignedLong;
break;
case BaseTypeModifier::Float32:
ElementBitwidth = 32;
ScalarType = ScalarTypeKind::Float;
break;
case BaseTypeModifier::Invalid:
ScalarType = ScalarTypeKind::Invalid;
return;
Expand Down Expand Up @@ -1149,7 +1156,7 @@ void RVVIntrinsic::updateNamesAndPolicy(

SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
SmallVector<PrototypeDescriptor> PrototypeDescriptors;
const StringRef Primaries("evwqom0ztul");
const StringRef Primaries("evwqom0ztulf");
while (!Prototypes.empty()) {
size_t Idx = 0;
// Skip over complex prototype because it could contain primitive type
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | \
// RUN: opt -S -passes=mem2reg | FileCheck %s

#include <sifive_vector.h>

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf8(vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf4(vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf2(vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8m1(vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8m2(vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf8_m(mask, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf4_m(mask, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf2_m(mask, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8m1_m(mask, vs2, rs1, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8m2_m(mask, vs2, rs1, vl);
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | \
// RUN: opt -S -passes=mem2reg | FileCheck %s

#include <sifive_vector.h>

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm(vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm(vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm(vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm(vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm(vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_m(mask, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_m(mask, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_m(mask, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_m(mask, vs2, rs1, 2, vl);
}

// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
// CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_m(mask, vs2, rs1, 2, vl);
}

Loading