@@ -38,7 +38,7 @@ define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64(<vscale x 1
38
38
; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64:
39
39
; CHECK: # %bb.0: # %entry
40
40
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
41
- ; CHECK-NEXT: vclmulh.vv v8, v9, v10
41
+ ; CHECK-NEXT: vclmulh.vv v8, v9, v10, v0.t
42
42
; CHECK-NEXT: ret
43
43
entry:
44
44
%a = call <vscale x 1 x i64 > @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64 (
@@ -85,7 +85,7 @@ define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64(<vscale x 2
85
85
; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64:
86
86
; CHECK: # %bb.0: # %entry
87
87
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
88
- ; CHECK-NEXT: vclmulh.vv v8, v10, v12
88
+ ; CHECK-NEXT: vclmulh.vv v8, v10, v12, v0.t
89
89
; CHECK-NEXT: ret
90
90
entry:
91
91
%a = call <vscale x 2 x i64 > @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64 (
@@ -132,7 +132,7 @@ define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64(<vscale x 4
132
132
; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64:
133
133
; CHECK: # %bb.0: # %entry
134
134
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
135
- ; CHECK-NEXT: vclmulh.vv v8, v12, v16
135
+ ; CHECK-NEXT: vclmulh.vv v8, v12, v16, v0.t
136
136
; CHECK-NEXT: ret
137
137
entry:
138
138
%a = call <vscale x 4 x i64 > @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64 (
@@ -180,7 +180,7 @@ define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64(<vscale x 8
180
180
; CHECK: # %bb.0: # %entry
181
181
; CHECK-NEXT: vl8re64.v v24, (a0)
182
182
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
183
- ; CHECK-NEXT: vclmulh.vv v8, v16, v24
183
+ ; CHECK-NEXT: vclmulh.vv v8, v16, v24, v0.t
184
184
; CHECK-NEXT: ret
185
185
entry:
186
186
%a = call <vscale x 8 x i64 > @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64 (
@@ -244,14 +244,14 @@ define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vx_nxv1i64_i64(<vscale x 1 x i
244
244
; RV32-NEXT: addi a0, sp, 8
245
245
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
246
246
; RV32-NEXT: vlse64.v v10, (a0), zero
247
- ; RV32-NEXT: vclmulh.vv v8, v9, v10
247
+ ; RV32-NEXT: vclmulh.vv v8, v9, v10, v0.t
248
248
; RV32-NEXT: addi sp, sp, 16
249
249
; RV32-NEXT: ret
250
250
;
251
251
; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64:
252
252
; RV64: # %bb.0: # %entry
253
253
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
254
- ; RV64-NEXT: vclmulh.vx v8, v9, a0
254
+ ; RV64-NEXT: vclmulh.vx v8, v9, a0, v0.t
255
255
; RV64-NEXT: ret
256
256
entry:
257
257
%a = call <vscale x 1 x i64 > @llvm.riscv.vclmulh.mask.nxv1i64.i64 (
@@ -315,14 +315,14 @@ define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vx_nxv2i64_i64(<vscale x 2 x i
315
315
; RV32-NEXT: addi a0, sp, 8
316
316
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
317
317
; RV32-NEXT: vlse64.v v12, (a0), zero
318
- ; RV32-NEXT: vclmulh.vv v8, v10, v12
318
+ ; RV32-NEXT: vclmulh.vv v8, v10, v12, v0.t
319
319
; RV32-NEXT: addi sp, sp, 16
320
320
; RV32-NEXT: ret
321
321
;
322
322
; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64:
323
323
; RV64: # %bb.0: # %entry
324
324
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
325
- ; RV64-NEXT: vclmulh.vx v8, v10, a0
325
+ ; RV64-NEXT: vclmulh.vx v8, v10, a0, v0.t
326
326
; RV64-NEXT: ret
327
327
entry:
328
328
%a = call <vscale x 2 x i64 > @llvm.riscv.vclmulh.mask.nxv2i64.i64 (
@@ -386,14 +386,14 @@ define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vx_nxv4i64_i64(<vscale x 4 x i
386
386
; RV32-NEXT: addi a0, sp, 8
387
387
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
388
388
; RV32-NEXT: vlse64.v v16, (a0), zero
389
- ; RV32-NEXT: vclmulh.vv v8, v12, v16
389
+ ; RV32-NEXT: vclmulh.vv v8, v12, v16, v0.t
390
390
; RV32-NEXT: addi sp, sp, 16
391
391
; RV32-NEXT: ret
392
392
;
393
393
; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64:
394
394
; RV64: # %bb.0: # %entry
395
395
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
396
- ; RV64-NEXT: vclmulh.vx v8, v12, a0
396
+ ; RV64-NEXT: vclmulh.vx v8, v12, a0, v0.t
397
397
; RV64-NEXT: ret
398
398
entry:
399
399
%a = call <vscale x 4 x i64 > @llvm.riscv.vclmulh.mask.nxv4i64.i64 (
@@ -457,14 +457,14 @@ define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vx_nxv8i64_i64(<vscale x 8 x i
457
457
; RV32-NEXT: addi a0, sp, 8
458
458
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
459
459
; RV32-NEXT: vlse64.v v24, (a0), zero
460
- ; RV32-NEXT: vclmulh.vv v8, v16, v24
460
+ ; RV32-NEXT: vclmulh.vv v8, v16, v24, v0.t
461
461
; RV32-NEXT: addi sp, sp, 16
462
462
; RV32-NEXT: ret
463
463
;
464
464
; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64:
465
465
; RV64: # %bb.0: # %entry
466
466
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
467
- ; RV64-NEXT: vclmulh.vx v8, v16, a0
467
+ ; RV64-NEXT: vclmulh.vx v8, v16, a0, v0.t
468
468
; RV64-NEXT: ret
469
469
entry:
470
470
%a = call <vscale x 8 x i64 > @llvm.riscv.vclmulh.mask.nxv8i64.i64 (
0 commit comments