|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2 |
| -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s |
3 |
| -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s |
| 2 | +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V |
| 3 | +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,V |
| 4 | +; RUN: llc -mtriple=riscv32 -mattr=+zve32x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVE32 |
| 5 | +; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVE32 |
4 | 6 |
|
5 | 7 | define <vscale x 1 x i8> @masked_load_nxv1i8(ptr %a, <vscale x 1 x i1> %mask) nounwind {
|
6 |
| -; CHECK-LABEL: masked_load_nxv1i8: |
7 |
| -; CHECK: # %bb.0: |
8 |
| -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma |
9 |
| -; CHECK-NEXT: vle8.v v8, (a0), v0.t |
10 |
| -; CHECK-NEXT: ret |
| 8 | +; V-LABEL: masked_load_nxv1i8: |
| 9 | +; V: # %bb.0: |
| 10 | +; V-NEXT: vsetvli a1, zero, e8, mf8, ta, ma |
| 11 | +; V-NEXT: vle8.v v8, (a0), v0.t |
| 12 | +; V-NEXT: ret |
| 13 | +; |
| 14 | +; ZVE32-LABEL: masked_load_nxv1i8: |
| 15 | +; ZVE32: # %bb.0: |
| 16 | +; ZVE32-NEXT: csrr a1, vlenb |
| 17 | +; ZVE32-NEXT: srli a1, a1, 3 |
| 18 | +; ZVE32-NEXT: vsetvli zero, a1, e8, mf4, ta, ma |
| 19 | +; ZVE32-NEXT: vle8.v v8, (a0), v0.t |
| 20 | +; ZVE32-NEXT: ret |
11 | 21 | %load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
|
12 | 22 | ret <vscale x 1 x i8> %load
|
13 | 23 | }
|
14 | 24 | declare <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i8>)
|
15 | 25 |
|
16 | 26 | define <vscale x 1 x i16> @masked_load_nxv1i16(ptr %a, <vscale x 1 x i1> %mask) nounwind {
|
17 |
| -; CHECK-LABEL: masked_load_nxv1i16: |
18 |
| -; CHECK: # %bb.0: |
19 |
| -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
20 |
| -; CHECK-NEXT: vle16.v v8, (a0), v0.t |
21 |
| -; CHECK-NEXT: ret |
| 27 | +; V-LABEL: masked_load_nxv1i16: |
| 28 | +; V: # %bb.0: |
| 29 | +; V-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 30 | +; V-NEXT: vle16.v v8, (a0), v0.t |
| 31 | +; V-NEXT: ret |
| 32 | +; |
| 33 | +; ZVE32-LABEL: masked_load_nxv1i16: |
| 34 | +; ZVE32: # %bb.0: |
| 35 | +; ZVE32-NEXT: csrr a1, vlenb |
| 36 | +; ZVE32-NEXT: srli a1, a1, 3 |
| 37 | +; ZVE32-NEXT: vsetvli zero, a1, e16, mf2, ta, ma |
| 38 | +; ZVE32-NEXT: vle16.v v8, (a0), v0.t |
| 39 | +; ZVE32-NEXT: ret |
22 | 40 | %load = call <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
|
23 | 41 | ret <vscale x 1 x i16> %load
|
24 | 42 | }
|
25 | 43 | declare <vscale x 1 x i16> @llvm.masked.load.nxv1i16(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i16>)
|
26 | 44 |
|
27 | 45 | define <vscale x 1 x i32> @masked_load_nxv1i32(ptr %a, <vscale x 1 x i1> %mask) nounwind {
|
28 |
| -; CHECK-LABEL: masked_load_nxv1i32: |
29 |
| -; CHECK: # %bb.0: |
30 |
| -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma |
31 |
| -; CHECK-NEXT: vle32.v v8, (a0), v0.t |
32 |
| -; CHECK-NEXT: ret |
| 46 | +; V-LABEL: masked_load_nxv1i32: |
| 47 | +; V: # %bb.0: |
| 48 | +; V-NEXT: vsetvli a1, zero, e32, mf2, ta, ma |
| 49 | +; V-NEXT: vle32.v v8, (a0), v0.t |
| 50 | +; V-NEXT: ret |
| 51 | +; |
| 52 | +; ZVE32-LABEL: masked_load_nxv1i32: |
| 53 | +; ZVE32: # %bb.0: |
| 54 | +; ZVE32-NEXT: csrr a1, vlenb |
| 55 | +; ZVE32-NEXT: srli a1, a1, 3 |
| 56 | +; ZVE32-NEXT: vsetvli zero, a1, e32, m1, ta, ma |
| 57 | +; ZVE32-NEXT: vle32.v v8, (a0), v0.t |
| 58 | +; ZVE32-NEXT: ret |
33 | 59 | %load = call <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
|
34 | 60 | ret <vscale x 1 x i32> %load
|
35 | 61 | }
|
36 | 62 | declare <vscale x 1 x i32> @llvm.masked.load.nxv1i32(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i32>)
|
37 | 63 |
|
38 |
| -define <vscale x 1 x i64> @masked_load_nxv1i64(ptr %a, <vscale x 1 x i1> %mask) nounwind { |
39 |
| -; CHECK-LABEL: masked_load_nxv1i64: |
40 |
| -; CHECK: # %bb.0: |
41 |
| -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma |
42 |
| -; CHECK-NEXT: vle64.v v8, (a0), v0.t |
43 |
| -; CHECK-NEXT: ret |
44 |
| - %load = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64(ptr %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef) |
45 |
| - ret <vscale x 1 x i64> %load |
46 |
| -} |
47 |
| -declare <vscale x 1 x i64> @llvm.masked.load.nxv1i64(ptr, i32, <vscale x 1 x i1>, <vscale x 1 x i64>) |
48 |
| - |
49 | 64 | define <vscale x 2 x i8> @masked_load_nxv2i8(ptr %a, <vscale x 2 x i1> %mask) nounwind {
|
50 | 65 | ; CHECK-LABEL: masked_load_nxv2i8:
|
51 | 66 | ; CHECK: # %bb.0:
|
@@ -79,17 +94,6 @@ define <vscale x 2 x i32> @masked_load_nxv2i32(ptr %a, <vscale x 2 x i1> %mask)
|
79 | 94 | }
|
80 | 95 | declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
|
81 | 96 |
|
82 |
| -define <vscale x 2 x i64> @masked_load_nxv2i64(ptr %a, <vscale x 2 x i1> %mask) nounwind { |
83 |
| -; CHECK-LABEL: masked_load_nxv2i64: |
84 |
| -; CHECK: # %bb.0: |
85 |
| -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma |
86 |
| -; CHECK-NEXT: vle64.v v8, (a0), v0.t |
87 |
| -; CHECK-NEXT: ret |
88 |
| - %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef) |
89 |
| - ret <vscale x 2 x i64> %load |
90 |
| -} |
91 |
| -declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i64>) |
92 |
| - |
93 | 97 | define <vscale x 4 x i8> @masked_load_nxv4i8(ptr %a, <vscale x 4 x i1> %mask) nounwind {
|
94 | 98 | ; CHECK-LABEL: masked_load_nxv4i8:
|
95 | 99 | ; CHECK: # %bb.0:
|
@@ -123,17 +127,6 @@ define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask)
|
123 | 127 | }
|
124 | 128 | declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
|
125 | 129 |
|
126 |
| -define <vscale x 4 x i64> @masked_load_nxv4i64(ptr %a, <vscale x 4 x i1> %mask) nounwind { |
127 |
| -; CHECK-LABEL: masked_load_nxv4i64: |
128 |
| -; CHECK: # %bb.0: |
129 |
| -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma |
130 |
| -; CHECK-NEXT: vle64.v v8, (a0), v0.t |
131 |
| -; CHECK-NEXT: ret |
132 |
| - %load = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64(ptr %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef) |
133 |
| - ret <vscale x 4 x i64> %load |
134 |
| -} |
135 |
| -declare <vscale x 4 x i64> @llvm.masked.load.nxv4i64(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i64>) |
136 |
| - |
137 | 130 | define <vscale x 8 x i8> @masked_load_nxv8i8(ptr %a, <vscale x 8 x i1> %mask) nounwind {
|
138 | 131 | ; CHECK-LABEL: masked_load_nxv8i8:
|
139 | 132 | ; CHECK: # %bb.0:
|
@@ -167,17 +160,6 @@ define <vscale x 8 x i32> @masked_load_nxv8i32(ptr %a, <vscale x 8 x i1> %mask)
|
167 | 160 | }
|
168 | 161 | declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
|
169 | 162 |
|
170 |
| -define <vscale x 8 x i64> @masked_load_nxv8i64(ptr %a, <vscale x 8 x i1> %mask) nounwind { |
171 |
| -; CHECK-LABEL: masked_load_nxv8i64: |
172 |
| -; CHECK: # %bb.0: |
173 |
| -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma |
174 |
| -; CHECK-NEXT: vle64.v v8, (a0), v0.t |
175 |
| -; CHECK-NEXT: ret |
176 |
| - %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef) |
177 |
| - ret <vscale x 8 x i64> %load |
178 |
| -} |
179 |
| -declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i64>) |
180 |
| - |
181 | 163 | define <vscale x 16 x i8> @masked_load_nxv16i8(ptr %a, <vscale x 16 x i1> %mask) nounwind {
|
182 | 164 | ; CHECK-LABEL: masked_load_nxv16i8:
|
183 | 165 | ; CHECK: # %bb.0:
|
|
0 commit comments