|
4 | 4 |
|
5 | 5 | // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme2 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
|
6 | 6 | // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme2 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
|
7 |
| -// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sme2 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s |
8 |
| -// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sme2 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK |
9 | 7 | // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme2 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
|
10 | 8 |
|
11 | 9 | #include <arm_sme_draft_spec_subject_to_change.h>
|
12 | 10 |
|
13 |
| -#ifdef SVE_OVERLOADED_FORMS |
14 |
| -// A simple used,unused... macro, long enough to represent any SVE builtin. |
15 |
| -#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 |
16 |
| -#else |
17 |
| -#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 |
18 |
| -#endif |
19 |
| - |
20 |
| - |
21 |
| - |
22 | 11 | // CHECK-LABEL: @test_svluti2_lane_zt_u8(
|
23 | 12 | // CHECK-NEXT: entry:
|
24 | 13 | // CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sme.luti2.lane.zt.x4.nxv16i8(i32 0, <vscale x 16 x i8> [[ZN:%.*]], i32 0)
|
|
46 | 35 | // CPP-CHECK-NEXT: ret <vscale x 64 x i8> [[TMP8]]
|
47 | 36 | //
|
48 | 37 | svuint8x4_t test_svluti2_lane_zt_u8(svuint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
|
49 |
| - return SVE_ACLE_FUNC(svluti2_lane_zt,_u8,_x4,)(0, zn, 0); |
| 38 | + return svluti2_lane_zt_u8_x4(0, zn, 0); |
50 | 39 | }
|
51 | 40 |
|
52 | 41 |
|
@@ -77,7 +66,7 @@ svuint8x4_t test_svluti2_lane_zt_u8(svuint8_t zn) __arm_streaming __arm_shared_z
|
77 | 66 | // CPP-CHECK-NEXT: ret <vscale x 64 x i8> [[TMP8]]
|
78 | 67 | //
|
79 | 68 | svint8x4_t test_svluti2_lane_zt_s8(svint8_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
|
80 |
| - return SVE_ACLE_FUNC(svluti2_lane_zt,_s8,_x4,)(0, zn, 0); |
| 69 | + return svluti2_lane_zt_s8_x4(0, zn, 0); |
81 | 70 | }
|
82 | 71 |
|
83 | 72 | // CHECK-LABEL: @test_svluti2_lane_zt_u16(
|
@@ -107,7 +96,7 @@ svint8x4_t test_svluti2_lane_zt_s8(svint8_t zn) __arm_streaming __arm_shared_za
|
107 | 96 | // CPP-CHECK-NEXT: ret <vscale x 32 x i16> [[TMP8]]
|
108 | 97 | //
|
109 | 98 | svuint16x4_t test_svluti2_lane_zt_u16(svuint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
|
110 |
| - return SVE_ACLE_FUNC(svluti2_lane_zt,_u16,_x4,)(0, zn, 3); |
| 99 | + return svluti2_lane_zt_u16_x4(0, zn, 3); |
111 | 100 | }
|
112 | 101 |
|
113 | 102 | // CHECK-LABEL: @test_svluti2_lane_zt_s16(
|
@@ -137,7 +126,67 @@ svuint16x4_t test_svluti2_lane_zt_u16(svuint16_t zn) __arm_streaming __arm_share
|
137 | 126 | // CPP-CHECK-NEXT: ret <vscale x 32 x i16> [[TMP8]]
|
138 | 127 | //
|
139 | 128 | svint16x4_t test_svluti2_lane_zt_s16(svint16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
|
140 |
| - return SVE_ACLE_FUNC(svluti2_lane_zt,_s16,_x4,)(0, zn, 3); |
| 129 | + return svluti2_lane_zt_s16_x4(0, zn, 3); |
| 130 | +} |
| 131 | + |
| 132 | +// CHECK-LABEL: @test_svluti2_lane_zt_f16( |
| 133 | +// CHECK-NEXT: entry: |
| 134 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti2.lane.zt.x4.nxv8f16(i32 0, <vscale x 8 x half> [[ZN:%.*]], i32 3) |
| 135 | +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0 |
| 136 | +// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> poison, <vscale x 8 x half> [[TMP1]], i64 0) |
| 137 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1 |
| 138 | +// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], i64 8) |
| 139 | +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2 |
| 140 | +// CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[TMP4]], <vscale x 8 x half> [[TMP5]], i64 16) |
| 141 | +// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3 |
| 142 | +// CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[TMP6]], <vscale x 8 x half> [[TMP7]], i64 24) |
| 143 | +// CHECK-NEXT: ret <vscale x 32 x half> [[TMP8]] |
| 144 | +// |
| 145 | +// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_f16u13__SVFloat16_t( |
| 146 | +// CPP-CHECK-NEXT: entry: |
| 147 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti2.lane.zt.x4.nxv8f16(i32 0, <vscale x 8 x half> [[ZN:%.*]], i32 3) |
| 148 | +// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 0 |
| 149 | +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> poison, <vscale x 8 x half> [[TMP1]], i64 0) |
| 150 | +// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 1 |
| 151 | +// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], i64 8) |
| 152 | +// CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 2 |
| 153 | +// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[TMP4]], <vscale x 8 x half> [[TMP5]], i64 16) |
| 154 | +// CPP-CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } [[TMP0]], 3 |
| 155 | +// CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 32 x half> @llvm.vector.insert.nxv32f16.nxv8f16(<vscale x 32 x half> [[TMP6]], <vscale x 8 x half> [[TMP7]], i64 24) |
| 156 | +// CPP-CHECK-NEXT: ret <vscale x 32 x half> [[TMP8]] |
| 157 | +// |
| 158 | +svfloat16x4_t test_svluti2_lane_zt_f16(svfloat16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za { |
| 159 | + return svluti2_lane_zt_f16_x4(0, zn, 3); |
| 160 | +} |
| 161 | + |
| 162 | +// CHECK-LABEL: @test_svluti2_lane_zt_bf16( |
| 163 | +// CHECK-NEXT: entry: |
| 164 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sme.luti2.lane.zt.x4.nxv8bf16(i32 0, <vscale x 8 x bfloat> [[ZN:%.*]], i32 3) |
| 165 | +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 0 |
| 166 | +// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x bfloat> @llvm.vector.insert.nxv32bf16.nxv8bf16(<vscale x 32 x bfloat> poison, <vscale x 8 x bfloat> [[TMP1]], i64 0) |
| 167 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 1 |
| 168 | +// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x bfloat> @llvm.vector.insert.nxv32bf16.nxv8bf16(<vscale x 32 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], i64 8) |
| 169 | +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 2 |
| 170 | +// CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x bfloat> @llvm.vector.insert.nxv32bf16.nxv8bf16(<vscale x 32 x bfloat> [[TMP4]], <vscale x 8 x bfloat> [[TMP5]], i64 16) |
| 171 | +// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 3 |
| 172 | +// CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 32 x bfloat> @llvm.vector.insert.nxv32bf16.nxv8bf16(<vscale x 32 x bfloat> [[TMP6]], <vscale x 8 x bfloat> [[TMP7]], i64 24) |
| 173 | +// CHECK-NEXT: ret <vscale x 32 x bfloat> [[TMP8]] |
| 174 | +// |
| 175 | +// CPP-CHECK-LABEL: @_Z25test_svluti2_lane_zt_bf16u14__SVBfloat16_t( |
| 176 | +// CPP-CHECK-NEXT: entry: |
| 177 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @llvm.aarch64.sme.luti2.lane.zt.x4.nxv8bf16(i32 0, <vscale x 8 x bfloat> [[ZN:%.*]], i32 3) |
| 178 | +// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 0 |
| 179 | +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 32 x bfloat> @llvm.vector.insert.nxv32bf16.nxv8bf16(<vscale x 32 x bfloat> poison, <vscale x 8 x bfloat> [[TMP1]], i64 0) |
| 180 | +// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 1 |
| 181 | +// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 32 x bfloat> @llvm.vector.insert.nxv32bf16.nxv8bf16(<vscale x 32 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], i64 8) |
| 182 | +// CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 2 |
| 183 | +// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 32 x bfloat> @llvm.vector.insert.nxv32bf16.nxv8bf16(<vscale x 32 x bfloat> [[TMP4]], <vscale x 8 x bfloat> [[TMP5]], i64 16) |
| 184 | +// CPP-CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } [[TMP0]], 3 |
| 185 | +// CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 32 x bfloat> @llvm.vector.insert.nxv32bf16.nxv8bf16(<vscale x 32 x bfloat> [[TMP6]], <vscale x 8 x bfloat> [[TMP7]], i64 24) |
| 186 | +// CPP-CHECK-NEXT: ret <vscale x 32 x bfloat> [[TMP8]] |
| 187 | +// |
| 188 | +svbfloat16x4_t test_svluti2_lane_zt_bf16(svbfloat16_t zn) __arm_streaming __arm_shared_za __arm_preserves_za { |
| 189 | + return svluti2_lane_zt_bf16_x4(0, zn, 3); |
141 | 190 | }
|
142 | 191 |
|
143 | 192 | // CHECK-LABEL: @test_svluti2_lane_zt_u32(
|
@@ -167,7 +216,7 @@ svint16x4_t test_svluti2_lane_zt_s16(svint16_t zn) __arm_streaming __arm_shared_
|
167 | 216 | // CPP-CHECK-NEXT: ret <vscale x 16 x i32> [[TMP8]]
|
168 | 217 | //
|
169 | 218 | svuint32x4_t test_svluti2_lane_zt_u32(svuint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
|
170 |
| - return SVE_ACLE_FUNC(svluti2_lane_zt,_u32,_x4,)(0, zn, 3); |
| 219 | + return svluti2_lane_zt_u32_x4(0, zn, 3); |
171 | 220 | }
|
172 | 221 |
|
173 | 222 | // CHECK-LABEL: @test_svluti2_lane_zt_s32(
|
@@ -197,5 +246,35 @@ svuint32x4_t test_svluti2_lane_zt_u32(svuint32_t zn) __arm_streaming __arm_share
|
197 | 246 | // CPP-CHECK-NEXT: ret <vscale x 16 x i32> [[TMP8]]
|
198 | 247 | //
|
199 | 248 | svint32x4_t test_svluti2_lane_zt_s32(svint32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za {
|
200 |
| - return SVE_ACLE_FUNC(svluti2_lane_zt,_s32,_x4,)(0, zn, 3); |
| 249 | + return svluti2_lane_zt_s32_x4(0, zn, 3); |
| 250 | +} |
| 251 | + |
| 252 | +// CHECK-LABEL: @test_svluti2_lane_zt_f32( |
| 253 | +// CHECK-NEXT: entry: |
| 254 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sme.luti2.lane.zt.x4.nxv4f32(i32 0, <vscale x 4 x float> [[ZN:%.*]], i32 3) |
| 255 | +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 0 |
| 256 | +// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> poison, <vscale x 4 x float> [[TMP1]], i64 0) |
| 257 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 1 |
| 258 | +// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], i64 4) |
| 259 | +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 2 |
| 260 | +// CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[TMP4]], <vscale x 4 x float> [[TMP5]], i64 8) |
| 261 | +// CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 3 |
| 262 | +// CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[TMP6]], <vscale x 4 x float> [[TMP7]], i64 12) |
| 263 | +// CHECK-NEXT: ret <vscale x 16 x float> [[TMP8]] |
| 264 | +// |
| 265 | +// CPP-CHECK-LABEL: @_Z24test_svluti2_lane_zt_f32u13__SVFloat32_t( |
| 266 | +// CPP-CHECK-NEXT: entry: |
| 267 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sme.luti2.lane.zt.x4.nxv4f32(i32 0, <vscale x 4 x float> [[ZN:%.*]], i32 3) |
| 268 | +// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 0 |
| 269 | +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> poison, <vscale x 4 x float> [[TMP1]], i64 0) |
| 270 | +// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 1 |
| 271 | +// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], i64 4) |
| 272 | +// CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 2 |
| 273 | +// CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[TMP4]], <vscale x 4 x float> [[TMP5]], i64 8) |
| 274 | +// CPP-CHECK-NEXT: [[TMP7:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[TMP0]], 3 |
| 275 | +// CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[TMP6]], <vscale x 4 x float> [[TMP7]], i64 12) |
| 276 | +// CPP-CHECK-NEXT: ret <vscale x 16 x float> [[TMP8]] |
| 277 | +// |
| 278 | +svfloat32x4_t test_svluti2_lane_zt_f32(svfloat32_t zn) __arm_streaming __arm_shared_za __arm_preserves_za { |
| 279 | + return svluti2_lane_zt_f32_x4(0, zn, 3); |
201 | 280 | }
|
0 commit comments