Skip to content

Commit e8973dd

Browse files
committed
[RISCV] Add the passthru operand for some RVV nomask unary and nullary intrinsics.
The goal is support tail and mask policy in RVV builtins. We focus on IR part first. If the passthru operand is undef, we use tail agnostic, otherwise use tail undisturbed. My plan is to handle more complex operations in follow-up patches. Reviewers: frasercrmck Differential Revision: https://reviews.llvm.org/D118253
1 parent 86bebe1 commit e8973dd

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+2568
-759
lines changed

clang/include/clang/Basic/riscv_vector.td

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1616,6 +1616,7 @@ defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi",
16161616
[["w", "wv"]]>;
16171617

16181618
// 12.3. Vector Integer Extension
1619+
let HasNoMaskPassThru = true in {
16191620
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
16201621
def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">;
16211622
def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">;
@@ -1628,6 +1629,7 @@ let Log2LMUL = [-3, -2, -1, 0] in {
16281629
def vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">;
16291630
def vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">;
16301631
}
1632+
}
16311633

16321634
// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
16331635
let HasMask = false, HasPolicy = false in {
@@ -1833,6 +1835,7 @@ defm vfwmsac : RVVFloatingWidenTerBuiltinSet;
18331835
defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
18341836

18351837
// 14.8. Vector Floating-Point Square-Root Instruction
1838+
let HasNoMaskPassThru = true in {
18361839
def vfsqrt : RVVFloatingUnaryVVBuiltin;
18371840

18381841
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
@@ -1842,7 +1845,6 @@ def vfrsqrt7 : RVVFloatingUnaryVVBuiltin;
18421845
def vfrec7 : RVVFloatingUnaryVVBuiltin;
18431846

18441847
// 14.11. Vector Floating-Point MIN/MAX Instructions
1845-
let HasNoMaskPassThru = true in {
18461848
defm vfmin : RVVFloatingBinBuiltinSet;
18471849
defm vfmax : RVVFloatingBinBuiltinSet;
18481850

@@ -1865,7 +1867,7 @@ defm vmfge : RVVFloatingMaskOutBuiltinSet;
18651867
}
18661868

18671869
// 14.14. Vector Floating-Point Classify Instruction
1868-
let Name = "vfclass_v", HasPolicy = false in
1870+
let Name = "vfclass_v", HasNoMaskPassThru = true in
18691871
def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">;
18701872

18711873
// 14.15. Vector Floating-Point Merge Instructio
@@ -1887,6 +1889,7 @@ let HasMask = false, HasNoMaskedOverloaded = false, HasPolicy = false in
18871889
[["f", "v", "ve"]]>;
18881890

18891891
// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
1892+
let HasNoMaskPassThru = true in {
18901893
def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">;
18911894
def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">;
18921895
def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">;
@@ -1916,6 +1919,7 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
19161919
def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_f">;
19171920
def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">;
19181921
}
1922+
}
19191923

19201924
// 15. Vector Reduction Operations
19211925
// 15.1. Vector Single-Width Integer Reduction Instructions
@@ -1981,7 +1985,7 @@ def vmsif : RVVMaskUnaryBuiltin;
19811985
// 16.6. vmsof.m set-only-first mask bit
19821986
def vmsof : RVVMaskUnaryBuiltin;
19831987

1984-
let HasNoMaskedOverloaded = false in {
1988+
let HasNoMaskPassThru = true, HasNoMaskedOverloaded = false in {
19851989
// 16.8. Vector Iota Instruction
19861990
defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
19871991

clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
99
// CHECK-RV64-NEXT: entry:
10-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
10+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
1111
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1212
//
1313
vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
@@ -16,7 +16,7 @@ vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
1616

1717
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
1818
// CHECK-RV64-NEXT: entry:
19-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
19+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
2020
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2121
//
2222
vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
@@ -25,7 +25,7 @@ vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
2525

2626
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
2727
// CHECK-RV64-NEXT: entry:
28-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
28+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
2929
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
3030
//
3131
vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
@@ -34,7 +34,7 @@ vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
3434

3535
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
3636
// CHECK-RV64-NEXT: entry:
37-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
37+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
3838
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
3939
//
4040
vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
@@ -43,7 +43,7 @@ vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
4343

4444
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
4545
// CHECK-RV64-NEXT: entry:
46-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
46+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
4747
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
4848
//
4949
vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
@@ -52,7 +52,7 @@ vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
5252

5353
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
5454
// CHECK-RV64-NEXT: entry:
55-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
55+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
5656
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
5757
//
5858
vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
@@ -61,7 +61,7 @@ vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
6161

6262
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
6363
// CHECK-RV64-NEXT: entry:
64-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
64+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
6565
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
6666
//
6767
vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
@@ -70,7 +70,7 @@ vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
7070

7171
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
7272
// CHECK-RV64-NEXT: entry:
73-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
73+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
7474
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
7575
//
7676
vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
@@ -79,7 +79,7 @@ vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
7979

8080
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
8181
// CHECK-RV64-NEXT: entry:
82-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
82+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
8383
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
8484
//
8585
vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
@@ -88,7 +88,7 @@ vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
8888

8989
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
9090
// CHECK-RV64-NEXT: entry:
91-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
91+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
9292
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
9393
//
9494
vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
@@ -98,7 +98,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
9898

9999
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
100100
// CHECK-RV64-NEXT: entry:
101-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
101+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
102102
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
103103
//
104104
vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
@@ -108,7 +108,7 @@ vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
108108

109109
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
110110
// CHECK-RV64-NEXT: entry:
111-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
111+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
112112
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
113113
//
114114
vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
@@ -118,7 +118,7 @@ vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
118118

119119
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
120120
// CHECK-RV64-NEXT: entry:
121-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
121+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
122122
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
123123
//
124124
vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
@@ -128,7 +128,7 @@ vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
128128

129129
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
130130
// CHECK-RV64-NEXT: entry:
131-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
131+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
132132
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
133133
//
134134
vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
@@ -138,7 +138,7 @@ vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
138138

139139
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
140140
// CHECK-RV64-NEXT: entry:
141-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
141+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
142142
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
143143
//
144144
vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
@@ -148,7 +148,7 @@ vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
148148

149149
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
150150
// CHECK-RV64-NEXT: entry:
151-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
151+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
152152
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
153153
//
154154
vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
@@ -158,7 +158,7 @@ vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
158158

159159
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
160160
// CHECK-RV64-NEXT: entry:
161-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
161+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
162162
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
163163
//
164164
vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
@@ -168,7 +168,7 @@ vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
168168

169169
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
170170
// CHECK-RV64-NEXT: entry:
171-
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
171+
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
172172
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
173173
//
174174
vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,

0 commit comments

Comments
 (0)