Skip to content

Commit 07a27ea

Browse files
author
git apple-llvm automerger
committed
Merge commit '7247f80eeaa7' from apple/main into swift/next
2 parents 3d1794d + 7247f80 commit 07a27ea

File tree

7 files changed

+101
-101
lines changed

7 files changed

+101
-101
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1952,7 +1952,7 @@ static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
19521952

19531953
// For simplicity we reuse the vtype representation here.
19541954
MIB.addImm(RISCVVType::encodeVTYPE(Multiplier, ElementWidth,
1955-
/*TailAgnostic*/ false,
1955+
/*TailAgnostic*/ true,
19561956
/*MaskAgnostic*/ false));
19571957

19581958
// Remove (now) redundant operands from pseudo

llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -40,23 +40,23 @@ body: |
4040

4141
# POST-INSERTER: %0:gpr = COPY $x13
4242
# POST-INSERTER: %4:vr = IMPLICIT_DEF
43-
# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype
43+
# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
4444
# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %4, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
4545
# POST-INSERTER: %6:vr = IMPLICIT_DEF
46-
# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype
46+
# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
4747
# POST-INSERTER: %7:vr = PseudoVLE64_V_M1 %6, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
4848
# POST-INSERTER: %8:vr = IMPLICIT_DEF
49-
# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype
49+
# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
5050
# POST-INSERTER: %9:vr = PseudoVADD_VV_M1 %8, killed %5, killed %7, $noreg, $noreg, -1, implicit $vl, implicit $vtype
51-
# POST-INSERTER: dead %13:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype
51+
# POST-INSERTER: dead %13:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
5252
# POST-INSERTER: PseudoVSE64_V_M1 killed %9, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
5353

54-
# CODEGEN: vsetvli a4, a3, e64,m1,tu,mu
54+
# CODEGEN: vsetvli a4, a3, e64,m1,ta,mu
5555
# CODEGEN-NEXT: vle64.v v25, (a1)
56-
# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,tu,mu
56+
# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,ta,mu
5757
# CODEGEN-NEXT: vle64.v v26, (a2)
58-
# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,tu,mu
58+
# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,ta,mu
5959
# CODEGEN-NEXT: vadd.vv v25, v25, v26
60-
# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,tu,mu
60+
# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,ta,mu
6161
# CODEGEN-NEXT: vse64.v v25, (a0)
6262
# CODEGEN-NEXT: ret

llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,13 @@ define void @vadd_vint64m1(
2929
; PRE-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
3030

3131
; POST-INSERTER: %4:vr = IMPLICIT_DEF
32-
; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
32+
; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
3333
; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8)
3434
; POST-INSERTER: %6:vr = IMPLICIT_DEF
35-
; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
35+
; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
3636
; POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8)
3737
; POST-INSERTER: %8:vr = IMPLICIT_DEF
38-
; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
38+
; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
3939
; POST-INSERTER: %7:vr = PseudoVADD_VV_M1 %8, killed %3, killed %5, $noreg, $noreg, -1, implicit $vl, implicit $vtype
40-
; POST-INSERTER: dead %12:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
40+
; POST-INSERTER: dead %12:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype
4141
; POST-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)

llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77
define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale x 4 x i16> *%pa, <vscale x 4 x i16> *%pb) nounwind {
88
; CHECK-LABEL: vadd_vint16m1:
99
; CHECK: # %bb.0:
10-
; CHECK-NEXT: vsetvli a3, zero, e16,m1,tu,mu
10+
; CHECK-NEXT: vsetvli a3, zero, e16,m1,ta,mu
1111
; CHECK-NEXT: vle16.v v25, (a1)
12-
; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu
12+
; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
1313
; CHECK-NEXT: vle16.v v26, (a2)
14-
; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu
14+
; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
1515
; CHECK-NEXT: vadd.vv v25, v25, v26
16-
; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu
16+
; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
1717
; CHECK-NEXT: vse16.v v25, (a0)
1818
; CHECK-NEXT: ret
1919
%va = load <vscale x 4 x i16>, <vscale x 4 x i16>* %pa
@@ -26,13 +26,13 @@ define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale x 4 x i16> *%pa, <vs
2626
define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale x 8 x i16> *%pa, <vscale x 8 x i16> *%pb) nounwind {
2727
; CHECK-LABEL: vadd_vint16m2:
2828
; CHECK: # %bb.0:
29-
; CHECK-NEXT: vsetvli a3, zero, e16,m2,tu,mu
29+
; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
3030
; CHECK-NEXT: vle16.v v26, (a1)
31-
; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu
31+
; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
3232
; CHECK-NEXT: vle16.v v28, (a2)
33-
; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu
33+
; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
3434
; CHECK-NEXT: vadd.vv v26, v26, v28
35-
; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu
35+
; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
3636
; CHECK-NEXT: vse16.v v26, (a0)
3737
; CHECK-NEXT: ret
3838
%va = load <vscale x 8 x i16>, <vscale x 8 x i16>* %pa
@@ -45,13 +45,13 @@ define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale x 8 x i16> *%pa, <vs
4545
define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, <vscale x 16 x i16> *%pa, <vscale x 16 x i16> *%pb) nounwind {
4646
; CHECK-LABEL: vadd_vint16m4:
4747
; CHECK: # %bb.0:
48-
; CHECK-NEXT: vsetvli a3, zero, e16,m4,tu,mu
48+
; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
4949
; CHECK-NEXT: vle16.v v28, (a1)
50-
; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu
50+
; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
5151
; CHECK-NEXT: vle16.v v8, (a2)
52-
; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu
52+
; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
5353
; CHECK-NEXT: vadd.vv v28, v28, v8
54-
; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu
54+
; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
5555
; CHECK-NEXT: vse16.v v28, (a0)
5656
; CHECK-NEXT: ret
5757
%va = load <vscale x 16 x i16>, <vscale x 16 x i16>* %pa
@@ -64,13 +64,13 @@ define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, <vscale x 16 x i16> *%pa, <
6464
define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, <vscale x 32 x i16> *%pa, <vscale x 32 x i16> *%pb) nounwind {
6565
; CHECK-LABEL: vadd_vint16m8:
6666
; CHECK: # %bb.0:
67-
; CHECK-NEXT: vsetvli a3, zero, e16,m8,tu,mu
67+
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
6868
; CHECK-NEXT: vle16.v v8, (a1)
69-
; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu
69+
; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
7070
; CHECK-NEXT: vle16.v v16, (a2)
71-
; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu
71+
; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
7272
; CHECK-NEXT: vadd.vv v8, v8, v16
73-
; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu
73+
; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
7474
; CHECK-NEXT: vse16.v v8, (a0)
7575
; CHECK-NEXT: ret
7676
%va = load <vscale x 32 x i16>, <vscale x 32 x i16>* %pa
@@ -83,13 +83,13 @@ define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, <vscale x 32 x i16> *%pa, <
8383
define void @vadd_vint16mf2(<vscale x 2 x i16> *%pc, <vscale x 2 x i16> *%pa, <vscale x 2 x i16> *%pb) nounwind {
8484
; CHECK-LABEL: vadd_vint16mf2:
8585
; CHECK: # %bb.0:
86-
; CHECK-NEXT: vsetvli a3, zero, e16,mf2,tu,mu
86+
; CHECK-NEXT: vsetvli a3, zero, e16,mf2,ta,mu
8787
; CHECK-NEXT: vle16.v v25, (a1)
88-
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu
88+
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu
8989
; CHECK-NEXT: vle16.v v26, (a2)
90-
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu
90+
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu
9191
; CHECK-NEXT: vadd.vv v25, v25, v26
92-
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu
92+
; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu
9393
; CHECK-NEXT: vse16.v v25, (a0)
9494
; CHECK-NEXT: ret
9595
%va = load <vscale x 2 x i16>, <vscale x 2 x i16>* %pa
@@ -102,13 +102,13 @@ define void @vadd_vint16mf2(<vscale x 2 x i16> *%pc, <vscale x 2 x i16> *%pa, <v
102102
define void @vadd_vint16mf4(<vscale x 1 x i16> *%pc, <vscale x 1 x i16> *%pa, <vscale x 1 x i16> *%pb) nounwind {
103103
; CHECK-LABEL: vadd_vint16mf4:
104104
; CHECK: # %bb.0:
105-
; CHECK-NEXT: vsetvli a3, zero, e16,mf4,tu,mu
105+
; CHECK-NEXT: vsetvli a3, zero, e16,mf4,ta,mu
106106
; CHECK-NEXT: vle16.v v25, (a1)
107-
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu
107+
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu
108108
; CHECK-NEXT: vle16.v v26, (a2)
109-
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu
109+
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu
110110
; CHECK-NEXT: vadd.vv v25, v25, v26
111-
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu
111+
; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu
112112
; CHECK-NEXT: vse16.v v25, (a0)
113113
; CHECK-NEXT: ret
114114
%va = load <vscale x 1 x i16>, <vscale x 1 x i16>* %pa

llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77
define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale x 2 x i32> *%pa, <vscale x 2 x i32> *%pb) nounwind {
88
; CHECK-LABEL: vadd_vint32m1:
99
; CHECK: # %bb.0:
10-
; CHECK-NEXT: vsetvli a3, zero, e32,m1,tu,mu
10+
; CHECK-NEXT: vsetvli a3, zero, e32,m1,ta,mu
1111
; CHECK-NEXT: vle32.v v25, (a1)
12-
; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu
12+
; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu
1313
; CHECK-NEXT: vle32.v v26, (a2)
14-
; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu
14+
; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu
1515
; CHECK-NEXT: vadd.vv v25, v25, v26
16-
; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu
16+
; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu
1717
; CHECK-NEXT: vse32.v v25, (a0)
1818
; CHECK-NEXT: ret
1919
%va = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pa
@@ -26,13 +26,13 @@ define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale x 2 x i32> *%pa, <vs
2626
define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale x 4 x i32> *%pa, <vscale x 4 x i32> *%pb) nounwind {
2727
; CHECK-LABEL: vadd_vint32m2:
2828
; CHECK: # %bb.0:
29-
; CHECK-NEXT: vsetvli a3, zero, e32,m2,tu,mu
29+
; CHECK-NEXT: vsetvli a3, zero, e32,m2,ta,mu
3030
; CHECK-NEXT: vle32.v v26, (a1)
31-
; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu
31+
; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu
3232
; CHECK-NEXT: vle32.v v28, (a2)
33-
; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu
33+
; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu
3434
; CHECK-NEXT: vadd.vv v26, v26, v28
35-
; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu
35+
; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu
3636
; CHECK-NEXT: vse32.v v26, (a0)
3737
; CHECK-NEXT: ret
3838
%va = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pa
@@ -45,13 +45,13 @@ define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale x 4 x i32> *%pa, <vs
4545
define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale x 8 x i32> *%pa, <vscale x 8 x i32> *%pb) nounwind {
4646
; CHECK-LABEL: vadd_vint32m4:
4747
; CHECK: # %bb.0:
48-
; CHECK-NEXT: vsetvli a3, zero, e32,m4,tu,mu
48+
; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
4949
; CHECK-NEXT: vle32.v v28, (a1)
50-
; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu
50+
; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
5151
; CHECK-NEXT: vle32.v v8, (a2)
52-
; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu
52+
; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
5353
; CHECK-NEXT: vadd.vv v28, v28, v8
54-
; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu
54+
; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
5555
; CHECK-NEXT: vse32.v v28, (a0)
5656
; CHECK-NEXT: ret
5757
%va = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pa
@@ -64,13 +64,13 @@ define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale x 8 x i32> *%pa, <vs
6464
define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, <vscale x 16 x i32> *%pa, <vscale x 16 x i32> *%pb) nounwind {
6565
; CHECK-LABEL: vadd_vint32m8:
6666
; CHECK: # %bb.0:
67-
; CHECK-NEXT: vsetvli a3, zero, e32,m8,tu,mu
67+
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
6868
; CHECK-NEXT: vle32.v v8, (a1)
69-
; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu
69+
; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
7070
; CHECK-NEXT: vle32.v v16, (a2)
71-
; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu
71+
; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
7272
; CHECK-NEXT: vadd.vv v8, v8, v16
73-
; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu
73+
; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
7474
; CHECK-NEXT: vse32.v v8, (a0)
7575
; CHECK-NEXT: ret
7676
%va = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pa
@@ -83,13 +83,13 @@ define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, <vscale x 16 x i32> *%pa, <
8383
define void @vadd_vint32mf2(<vscale x 1 x i32> *%pc, <vscale x 1 x i32> *%pa, <vscale x 1 x i32> *%pb) nounwind {
8484
; CHECK-LABEL: vadd_vint32mf2:
8585
; CHECK: # %bb.0:
86-
; CHECK-NEXT: vsetvli a3, zero, e32,mf2,tu,mu
86+
; CHECK-NEXT: vsetvli a3, zero, e32,mf2,ta,mu
8787
; CHECK-NEXT: vle32.v v25, (a1)
88-
; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu
88+
; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu
8989
; CHECK-NEXT: vle32.v v26, (a2)
90-
; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu
90+
; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu
9191
; CHECK-NEXT: vadd.vv v25, v25, v26
92-
; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu
92+
; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu
9393
; CHECK-NEXT: vse32.v v25, (a0)
9494
; CHECK-NEXT: ret
9595
%va = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pa

llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77
define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale x 1 x i64> *%pa, <vscale x 1 x i64> *%pb) nounwind {
88
; CHECK-LABEL: vadd_vint64m1:
99
; CHECK: # %bb.0:
10-
; CHECK-NEXT: vsetvli a3, zero, e64,m1,tu,mu
10+
; CHECK-NEXT: vsetvli a3, zero, e64,m1,ta,mu
1111
; CHECK-NEXT: vle64.v v25, (a1)
12-
; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu
12+
; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu
1313
; CHECK-NEXT: vle64.v v26, (a2)
14-
; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu
14+
; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu
1515
; CHECK-NEXT: vadd.vv v25, v25, v26
16-
; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu
16+
; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu
1717
; CHECK-NEXT: vse64.v v25, (a0)
1818
; CHECK-NEXT: ret
1919
%va = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pa
@@ -26,13 +26,13 @@ define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale x 1 x i64> *%pa, <vs
2626
define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale x 2 x i64> *%pa, <vscale x 2 x i64> *%pb) nounwind {
2727
; CHECK-LABEL: vadd_vint64m2:
2828
; CHECK: # %bb.0:
29-
; CHECK-NEXT: vsetvli a3, zero, e64,m2,tu,mu
29+
; CHECK-NEXT: vsetvli a3, zero, e64,m2,ta,mu
3030
; CHECK-NEXT: vle64.v v26, (a1)
31-
; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu
31+
; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu
3232
; CHECK-NEXT: vle64.v v28, (a2)
33-
; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu
33+
; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu
3434
; CHECK-NEXT: vadd.vv v26, v26, v28
35-
; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu
35+
; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu
3636
; CHECK-NEXT: vse64.v v26, (a0)
3737
; CHECK-NEXT: ret
3838
%va = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pa
@@ -45,13 +45,13 @@ define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale x 2 x i64> *%pa, <vs
4545
define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale x 4 x i64> *%pa, <vscale x 4 x i64> *%pb) nounwind {
4646
; CHECK-LABEL: vadd_vint64m4:
4747
; CHECK: # %bb.0:
48-
; CHECK-NEXT: vsetvli a3, zero, e64,m4,tu,mu
48+
; CHECK-NEXT: vsetvli a3, zero, e64,m4,ta,mu
4949
; CHECK-NEXT: vle64.v v28, (a1)
50-
; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu
50+
; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu
5151
; CHECK-NEXT: vle64.v v8, (a2)
52-
; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu
52+
; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu
5353
; CHECK-NEXT: vadd.vv v28, v28, v8
54-
; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu
54+
; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu
5555
; CHECK-NEXT: vse64.v v28, (a0)
5656
; CHECK-NEXT: ret
5757
%va = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pa
@@ -64,13 +64,13 @@ define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale x 4 x i64> *%pa, <vs
6464
define void @vadd_vint64m8(<vscale x 8 x i64> *%pc, <vscale x 8 x i64> *%pa, <vscale x 8 x i64> *%pb) nounwind {
6565
; CHECK-LABEL: vadd_vint64m8:
6666
; CHECK: # %bb.0:
67-
; CHECK-NEXT: vsetvli a3, zero, e64,m8,tu,mu
67+
; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
6868
; CHECK-NEXT: vle64.v v8, (a1)
69-
; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu
69+
; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
7070
; CHECK-NEXT: vle64.v v16, (a2)
71-
; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu
71+
; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
7272
; CHECK-NEXT: vadd.vv v8, v8, v16
73-
; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu
73+
; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
7474
; CHECK-NEXT: vse64.v v8, (a0)
7575
; CHECK-NEXT: ret
7676
%va = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pa

0 commit comments

Comments
 (0)