Skip to content

Commit b91b4ec

Browse files
committed
[RISCV] Add missing mask operand for masked vclmul/vclmulh tests. NFC.
1 parent 8f1d1e2 commit b91b4ec

File tree

2 files changed

+24
-24
lines changed

2 files changed

+24
-24
lines changed

llvm/test/CodeGen/RISCV/rvv/vclmul.ll

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ define <vscale x 1 x i64> @intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64(<vscale x 1
3838
; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64:
3939
; CHECK: # %bb.0: # %entry
4040
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
41-
; CHECK-NEXT: vclmul.vv v8, v9, v10
41+
; CHECK-NEXT: vclmul.vv v8, v9, v10, v0.t
4242
; CHECK-NEXT: ret
4343
entry:
4444
%a = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64(
@@ -85,7 +85,7 @@ define <vscale x 2 x i64> @intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64(<vscale x 2
8585
; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64:
8686
; CHECK: # %bb.0: # %entry
8787
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
88-
; CHECK-NEXT: vclmul.vv v8, v10, v12
88+
; CHECK-NEXT: vclmul.vv v8, v10, v12, v0.t
8989
; CHECK-NEXT: ret
9090
entry:
9191
%a = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64(
@@ -132,7 +132,7 @@ define <vscale x 4 x i64> @intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64(<vscale x 4
132132
; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64:
133133
; CHECK: # %bb.0: # %entry
134134
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
135-
; CHECK-NEXT: vclmul.vv v8, v12, v16
135+
; CHECK-NEXT: vclmul.vv v8, v12, v16, v0.t
136136
; CHECK-NEXT: ret
137137
entry:
138138
%a = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64(
@@ -180,7 +180,7 @@ define <vscale x 8 x i64> @intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64(<vscale x 8
180180
; CHECK: # %bb.0: # %entry
181181
; CHECK-NEXT: vl8re64.v v24, (a0)
182182
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
183-
; CHECK-NEXT: vclmul.vv v8, v16, v24
183+
; CHECK-NEXT: vclmul.vv v8, v16, v24, v0.t
184184
; CHECK-NEXT: ret
185185
entry:
186186
%a = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64(
@@ -244,14 +244,14 @@ define <vscale x 1 x i64> @intrinsic_vclmul_mask_vx_nxv1i64_i64(<vscale x 1 x i6
244244
; RV32-NEXT: addi a0, sp, 8
245245
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
246246
; RV32-NEXT: vlse64.v v10, (a0), zero
247-
; RV32-NEXT: vclmul.vv v8, v9, v10
247+
; RV32-NEXT: vclmul.vv v8, v9, v10, v0.t
248248
; RV32-NEXT: addi sp, sp, 16
249249
; RV32-NEXT: ret
250250
;
251251
; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64:
252252
; RV64: # %bb.0: # %entry
253253
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
254-
; RV64-NEXT: vclmul.vx v8, v9, a0
254+
; RV64-NEXT: vclmul.vx v8, v9, a0, v0.t
255255
; RV64-NEXT: ret
256256
entry:
257257
%a = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64(
@@ -315,14 +315,14 @@ define <vscale x 2 x i64> @intrinsic_vclmul_mask_vx_nxv2i64_i64(<vscale x 2 x i6
315315
; RV32-NEXT: addi a0, sp, 8
316316
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
317317
; RV32-NEXT: vlse64.v v12, (a0), zero
318-
; RV32-NEXT: vclmul.vv v8, v10, v12
318+
; RV32-NEXT: vclmul.vv v8, v10, v12, v0.t
319319
; RV32-NEXT: addi sp, sp, 16
320320
; RV32-NEXT: ret
321321
;
322322
; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64:
323323
; RV64: # %bb.0: # %entry
324324
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
325-
; RV64-NEXT: vclmul.vx v8, v10, a0
325+
; RV64-NEXT: vclmul.vx v8, v10, a0, v0.t
326326
; RV64-NEXT: ret
327327
entry:
328328
%a = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64(
@@ -386,14 +386,14 @@ define <vscale x 4 x i64> @intrinsic_vclmul_mask_vx_nxv4i64_i64(<vscale x 4 x i6
386386
; RV32-NEXT: addi a0, sp, 8
387387
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
388388
; RV32-NEXT: vlse64.v v16, (a0), zero
389-
; RV32-NEXT: vclmul.vv v8, v12, v16
389+
; RV32-NEXT: vclmul.vv v8, v12, v16, v0.t
390390
; RV32-NEXT: addi sp, sp, 16
391391
; RV32-NEXT: ret
392392
;
393393
; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64:
394394
; RV64: # %bb.0: # %entry
395395
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
396-
; RV64-NEXT: vclmul.vx v8, v12, a0
396+
; RV64-NEXT: vclmul.vx v8, v12, a0, v0.t
397397
; RV64-NEXT: ret
398398
entry:
399399
%a = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64(
@@ -457,14 +457,14 @@ define <vscale x 8 x i64> @intrinsic_vclmul_mask_vx_nxv8i64_i64(<vscale x 8 x i6
457457
; RV32-NEXT: addi a0, sp, 8
458458
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
459459
; RV32-NEXT: vlse64.v v24, (a0), zero
460-
; RV32-NEXT: vclmul.vv v8, v16, v24
460+
; RV32-NEXT: vclmul.vv v8, v16, v24, v0.t
461461
; RV32-NEXT: addi sp, sp, 16
462462
; RV32-NEXT: ret
463463
;
464464
; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64:
465465
; RV64: # %bb.0: # %entry
466466
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
467-
; RV64-NEXT: vclmul.vx v8, v16, a0
467+
; RV64-NEXT: vclmul.vx v8, v16, a0, v0.t
468468
; RV64-NEXT: ret
469469
entry:
470470
%a = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64(

llvm/test/CodeGen/RISCV/rvv/vclmulh.ll

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64(<vscale x 1
3838
; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64:
3939
; CHECK: # %bb.0: # %entry
4040
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
41-
; CHECK-NEXT: vclmulh.vv v8, v9, v10
41+
; CHECK-NEXT: vclmulh.vv v8, v9, v10, v0.t
4242
; CHECK-NEXT: ret
4343
entry:
4444
%a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64(
@@ -85,7 +85,7 @@ define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64(<vscale x 2
8585
; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64:
8686
; CHECK: # %bb.0: # %entry
8787
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
88-
; CHECK-NEXT: vclmulh.vv v8, v10, v12
88+
; CHECK-NEXT: vclmulh.vv v8, v10, v12, v0.t
8989
; CHECK-NEXT: ret
9090
entry:
9191
%a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64(
@@ -132,7 +132,7 @@ define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64(<vscale x 4
132132
; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64:
133133
; CHECK: # %bb.0: # %entry
134134
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
135-
; CHECK-NEXT: vclmulh.vv v8, v12, v16
135+
; CHECK-NEXT: vclmulh.vv v8, v12, v16, v0.t
136136
; CHECK-NEXT: ret
137137
entry:
138138
%a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64(
@@ -180,7 +180,7 @@ define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64(<vscale x 8
180180
; CHECK: # %bb.0: # %entry
181181
; CHECK-NEXT: vl8re64.v v24, (a0)
182182
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
183-
; CHECK-NEXT: vclmulh.vv v8, v16, v24
183+
; CHECK-NEXT: vclmulh.vv v8, v16, v24, v0.t
184184
; CHECK-NEXT: ret
185185
entry:
186186
%a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64(
@@ -244,14 +244,14 @@ define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vx_nxv1i64_i64(<vscale x 1 x i
244244
; RV32-NEXT: addi a0, sp, 8
245245
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
246246
; RV32-NEXT: vlse64.v v10, (a0), zero
247-
; RV32-NEXT: vclmulh.vv v8, v9, v10
247+
; RV32-NEXT: vclmulh.vv v8, v9, v10, v0.t
248248
; RV32-NEXT: addi sp, sp, 16
249249
; RV32-NEXT: ret
250250
;
251251
; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64:
252252
; RV64: # %bb.0: # %entry
253253
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
254-
; RV64-NEXT: vclmulh.vx v8, v9, a0
254+
; RV64-NEXT: vclmulh.vx v8, v9, a0, v0.t
255255
; RV64-NEXT: ret
256256
entry:
257257
%a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64(
@@ -315,14 +315,14 @@ define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vx_nxv2i64_i64(<vscale x 2 x i
315315
; RV32-NEXT: addi a0, sp, 8
316316
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
317317
; RV32-NEXT: vlse64.v v12, (a0), zero
318-
; RV32-NEXT: vclmulh.vv v8, v10, v12
318+
; RV32-NEXT: vclmulh.vv v8, v10, v12, v0.t
319319
; RV32-NEXT: addi sp, sp, 16
320320
; RV32-NEXT: ret
321321
;
322322
; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64:
323323
; RV64: # %bb.0: # %entry
324324
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
325-
; RV64-NEXT: vclmulh.vx v8, v10, a0
325+
; RV64-NEXT: vclmulh.vx v8, v10, a0, v0.t
326326
; RV64-NEXT: ret
327327
entry:
328328
%a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64(
@@ -386,14 +386,14 @@ define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vx_nxv4i64_i64(<vscale x 4 x i
386386
; RV32-NEXT: addi a0, sp, 8
387387
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
388388
; RV32-NEXT: vlse64.v v16, (a0), zero
389-
; RV32-NEXT: vclmulh.vv v8, v12, v16
389+
; RV32-NEXT: vclmulh.vv v8, v12, v16, v0.t
390390
; RV32-NEXT: addi sp, sp, 16
391391
; RV32-NEXT: ret
392392
;
393393
; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64:
394394
; RV64: # %bb.0: # %entry
395395
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
396-
; RV64-NEXT: vclmulh.vx v8, v12, a0
396+
; RV64-NEXT: vclmulh.vx v8, v12, a0, v0.t
397397
; RV64-NEXT: ret
398398
entry:
399399
%a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64(
@@ -457,14 +457,14 @@ define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vx_nxv8i64_i64(<vscale x 8 x i
457457
; RV32-NEXT: addi a0, sp, 8
458458
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
459459
; RV32-NEXT: vlse64.v v24, (a0), zero
460-
; RV32-NEXT: vclmulh.vv v8, v16, v24
460+
; RV32-NEXT: vclmulh.vv v8, v16, v24, v0.t
461461
; RV32-NEXT: addi sp, sp, 16
462462
; RV32-NEXT: ret
463463
;
464464
; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64:
465465
; RV64: # %bb.0: # %entry
466466
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
467-
; RV64-NEXT: vclmulh.vx v8, v16, a0
467+
; RV64-NEXT: vclmulh.vx v8, v16, a0, v0.t
468468
; RV64-NEXT: ret
469469
entry:
470470
%a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64(

0 commit comments

Comments
 (0)