@@ -20,7 +20,7 @@ define void @extract_row_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 %
20
20
; CHECK-NEXT: mov z5.b, p0/m, za0h.b[w12, 10]
21
21
; CHECK-NEXT: mov z6.b, p0/m, za0h.b[w12, 12]
22
22
; CHECK-NEXT: mov z7.b, p0/m, za0h.b[w12, 14]
23
- ; CHECK-NEXT: b dummy_use_8_nxv16i8
23
+ ; CHECK-NEXT: b use
24
24
%z0 = call <vscale x 16 x i8 > @llvm.aarch64.sme.read.horiz.nxv16i8 (<vscale x 16 x i8 > %zd , <vscale x 16 x i1 > %pg , i32 0 , i32 %tileslice )
25
25
%tileslice.2 = add i32 %tileslice , 2
26
26
%z1 = call <vscale x 16 x i8 > @llvm.aarch64.sme.read.horiz.nxv16i8 (<vscale x 16 x i8 > %zd , <vscale x 16 x i1 > %pg , i32 0 , i32 %tileslice.2 )
@@ -38,8 +38,8 @@ define void @extract_row_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 %
38
38
%z7 = call <vscale x 16 x i8 > @llvm.aarch64.sme.read.horiz.nxv16i8 (<vscale x 16 x i8 > %zd , <vscale x 16 x i1 > %pg , i32 0 , i32 %tileslice.14 )
39
39
40
40
; Force retention of z0..z7
41
- tail call void @dummy_use_8_nxv16i8 (<vscale x 16 x i8 > %z0 , <vscale x 16 x i8 > %z1 , <vscale x 16 x i8 > %z2 , <vscale x 16 x i8 > %z3 ,
42
- <vscale x 16 x i8 > %z4 , <vscale x 16 x i8 > %z5 , <vscale x 16 x i8 > %z6 , <vscale x 16 x i8 > %z7 )
41
+ tail call void @use (<vscale x 16 x i8 > %z0 , <vscale x 16 x i8 > %z1 , <vscale x 16 x i8 > %z2 , <vscale x 16 x i8 > %z3 ,
42
+ <vscale x 16 x i8 > %z4 , <vscale x 16 x i8 > %z5 , <vscale x 16 x i8 > %z6 , <vscale x 16 x i8 > %z7 )
43
43
ret void
44
44
}
45
45
@@ -62,7 +62,7 @@ define void @extract_col_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 %
62
62
; CHECK-NEXT: mov z5.b, p0/m, za0v.b[w12, 11]
63
63
; CHECK-NEXT: mov z6.b, p0/m, za0v.b[w12, 13]
64
64
; CHECK-NEXT: mov z7.b, p0/m, za0v.b[w12, 15]
65
- ; CHECK-NEXT: b dummy_use_8_nxv16i8
65
+ ; CHECK-NEXT: b use
66
66
%tileslice.1 = add i32 %tileslice , 1
67
67
%z0 = call <vscale x 16 x i8 > @llvm.aarch64.sme.read.vert.nxv16i8 (<vscale x 16 x i8 > %zd , <vscale x 16 x i1 > %pg , i32 0 , i32 %tileslice.1 )
68
68
%tileslice.3 = add i32 %tileslice , 3
@@ -80,8 +80,8 @@ define void @extract_col_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 %
80
80
%tileslice.15 = add i32 %tileslice , 15
81
81
%z7 = call <vscale x 16 x i8 > @llvm.aarch64.sme.read.vert.nxv16i8 (<vscale x 16 x i8 > %zd , <vscale x 16 x i1 > %pg , i32 0 , i32 %tileslice.15 )
82
82
83
- tail call void @dummy_use_8_nxv16i8 (<vscale x 16 x i8 > %z0 , <vscale x 16 x i8 > %z1 , <vscale x 16 x i8 > %z2 , <vscale x 16 x i8 > %z3 ,
84
- <vscale x 16 x i8 > %z4 , <vscale x 16 x i8 > %z5 , <vscale x 16 x i8 > %z6 , <vscale x 16 x i8 > %z7 )
83
+ tail call void @use (<vscale x 16 x i8 > %z0 , <vscale x 16 x i8 > %z1 , <vscale x 16 x i8 > %z2 , <vscale x 16 x i8 > %z3 ,
84
+ <vscale x 16 x i8 > %z4 , <vscale x 16 x i8 > %z5 , <vscale x 16 x i8 > %z6 , <vscale x 16 x i8 > %z7 )
85
85
ret void
86
86
}
87
87
@@ -96,7 +96,7 @@ define void @extract_row_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 %t
96
96
; CHECK-NEXT: mov z1.h, p0/m, za0h.h[w12, 2]
97
97
; CHECK-NEXT: mov z2.h, p0/m, za0h.h[w12, 4]
98
98
; CHECK-NEXT: mov z3.h, p0/m, za0h.h[w12, 6]
99
- ; CHECK-NEXT: b dummy_use_4_nxv8i16
99
+ ; CHECK-NEXT: b use
100
100
%z0 = call <vscale x 8 x i16 > @llvm.aarch64.sme.read.horiz.nxv8i16 (<vscale x 8 x i16 > %zd , <vscale x 8 x i1 > %pg , i32 0 , i32 %tileslice )
101
101
%tileslice.2 = add i32 %tileslice , 2
102
102
%z1 = call <vscale x 8 x i16 > @llvm.aarch64.sme.read.horiz.nxv8i16 (<vscale x 8 x i16 > %zd , <vscale x 8 x i1 > %pg , i32 0 , i32 %tileslice.2 )
@@ -105,7 +105,7 @@ define void @extract_row_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 %t
105
105
%tileslice.6 = add i32 %tileslice , 6
106
106
%z3 = call <vscale x 8 x i16 > @llvm.aarch64.sme.read.horiz.nxv8i16 (<vscale x 8 x i16 > %zd , <vscale x 8 x i1 > %pg , i32 0 , i32 %tileslice.6 )
107
107
108
- tail call void @dummy_use_4_nxv8i16 (<vscale x 8 x i16 > %z0 , <vscale x 8 x i16 > %z1 , <vscale x 8 x i16 > %z2 , <vscale x 8 x i16 > %z3 )
108
+ tail call void @use (<vscale x 8 x i16 > %z0 , <vscale x 8 x i16 > %z1 , <vscale x 8 x i16 > %z2 , <vscale x 8 x i16 > %z3 )
109
109
ret void
110
110
}
111
111
@@ -120,7 +120,7 @@ define void @extract_col_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 %t
120
120
; CHECK-NEXT: mov z1.h, p0/m, za1v.h[w12, 3]
121
121
; CHECK-NEXT: mov z2.h, p0/m, za1v.h[w12, 5]
122
122
; CHECK-NEXT: mov z3.h, p0/m, za1v.h[w12, 7]
123
- ; CHECK-NEXT: b dummy_use_4_nxv8i16
123
+ ; CHECK-NEXT: b use
124
124
%tileslice.1 = add i32 %tileslice , 1
125
125
%z0 = call <vscale x 8 x i16 > @llvm.aarch64.sme.read.vert.nxv8i16 (<vscale x 8 x i16 > %zd , <vscale x 8 x i1 > %pg , i32 1 , i32 %tileslice.1 )
126
126
%tileslice.3 = add i32 %tileslice , 3
@@ -130,7 +130,7 @@ define void @extract_col_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 %t
130
130
%tileslice.7 = add i32 %tileslice , 7
131
131
%z3 = call <vscale x 8 x i16 > @llvm.aarch64.sme.read.vert.nxv8i16 (<vscale x 8 x i16 > %zd , <vscale x 8 x i1 > %pg , i32 1 , i32 %tileslice.7 )
132
132
133
- tail call void @dummy_use_4_nxv8i16 (<vscale x 8 x i16 > %z0 , <vscale x 8 x i16 > %z1 , <vscale x 8 x i16 > %z2 , <vscale x 8 x i16 > %z3 )
133
+ tail call void @use (<vscale x 8 x i16 > %z0 , <vscale x 8 x i16 > %z1 , <vscale x 8 x i16 > %z2 , <vscale x 8 x i16 > %z3 )
134
134
ret void
135
135
}
136
136
@@ -153,7 +153,7 @@ define void @extract_f16(<vscale x 8 x half> %zd, <vscale x 8 x i1> %pg, i32 %ti
153
153
; CHECK-NEXT: mov z5.h, p0/m, za0h.h[w12, 5]
154
154
; CHECK-NEXT: mov z6.h, p0/m, za0v.h[w12, 6]
155
155
; CHECK-NEXT: mov z7.h, p0/m, za0v.h[w12, 7]
156
- ; CHECK-NEXT: b dummy_use_8_nxv8f16
156
+ ; CHECK-NEXT: b use
157
157
%z0 = call <vscale x 8 x half > @llvm.aarch64.sme.read.horiz.nxv8f16 (<vscale x 8 x half > %zd , <vscale x 8 x i1 > %pg , i32 0 , i32 %tileslice )
158
158
%tileslice.1 = add i32 %tileslice , 1
159
159
%z1 = call <vscale x 8 x half > @llvm.aarch64.sme.read.horiz.nxv8f16 (<vscale x 8 x half > %zd , <vscale x 8 x i1 > %pg , i32 0 , i32 %tileslice.1 )
@@ -170,8 +170,8 @@ define void @extract_f16(<vscale x 8 x half> %zd, <vscale x 8 x i1> %pg, i32 %ti
170
170
%tileslice.7 = add i32 %tileslice , 7
171
171
%z7 = call <vscale x 8 x half > @llvm.aarch64.sme.read.vert.nxv8f16 (<vscale x 8 x half > %zd , <vscale x 8 x i1 > %pg , i32 0 , i32 %tileslice.7 )
172
172
173
- tail call void @dummy_use_8_nxv8f16 (<vscale x 8 x half > %z0 , <vscale x 8 x half > %z1 , <vscale x 8 x half > %z2 , <vscale x 8 x half > %z3 ,
174
- <vscale x 8 x half > %z4 , <vscale x 8 x half > %z5 , <vscale x 8 x half > %z6 , <vscale x 8 x half > %z7 )
173
+ tail call void @use (<vscale x 8 x half > %z0 , <vscale x 8 x half > %z1 , <vscale x 8 x half > %z2 , <vscale x 8 x half > %z3 ,
174
+ <vscale x 8 x half > %z4 , <vscale x 8 x half > %z5 , <vscale x 8 x half > %z6 , <vscale x 8 x half > %z7 )
175
175
ret void
176
176
}
177
177
@@ -194,7 +194,7 @@ define void @extract_bf16(<vscale x 8 x bfloat> %zd, <vscale x 8 x i1> %pg, i32
194
194
; CHECK-NEXT: mov z5.h, p0/m, za0h.h[w12, 5]
195
195
; CHECK-NEXT: mov z6.h, p0/m, za0v.h[w12, 6]
196
196
; CHECK-NEXT: mov z7.h, p0/m, za0v.h[w12, 7]
197
- ; CHECK-NEXT: b dummy_use_8_nxv8bf16
197
+ ; CHECK-NEXT: b use
198
198
%z0 = call <vscale x 8 x bfloat> @llvm.aarch64.sme.read.horiz.nxv8bf16 (<vscale x 8 x bfloat> %zd , <vscale x 8 x i1 > %pg , i32 0 , i32 %tileslice )
199
199
%tileslice.1 = add i32 %tileslice , 1
200
200
%z1 = call <vscale x 8 x bfloat> @llvm.aarch64.sme.read.horiz.nxv8bf16 (<vscale x 8 x bfloat> %zd , <vscale x 8 x i1 > %pg , i32 0 , i32 %tileslice.1 )
@@ -211,8 +211,8 @@ define void @extract_bf16(<vscale x 8 x bfloat> %zd, <vscale x 8 x i1> %pg, i32
211
211
%tileslice.7 = add i32 %tileslice , 7
212
212
%z7 = call <vscale x 8 x bfloat> @llvm.aarch64.sme.read.vert.nxv8bf16 (<vscale x 8 x bfloat> %zd , <vscale x 8 x i1 > %pg , i32 0 , i32 %tileslice.7 )
213
213
214
- tail call void @dummy_use_8_nxv8bf16 (<vscale x 8 x bfloat> %z0 , <vscale x 8 x bfloat> %z1 , <vscale x 8 x bfloat> %z2 , <vscale x 8 x bfloat> %z3 ,
215
- <vscale x 8 x bfloat> %z4 , <vscale x 8 x bfloat> %z5 , <vscale x 8 x bfloat> %z6 , <vscale x 8 x bfloat> %z7 )
214
+ tail call void @use (<vscale x 8 x bfloat> %z0 , <vscale x 8 x bfloat> %z1 , <vscale x 8 x bfloat> %z2 , <vscale x 8 x bfloat> %z3 ,
215
+ <vscale x 8 x bfloat> %z4 , <vscale x 8 x bfloat> %z5 , <vscale x 8 x bfloat> %z6 , <vscale x 8 x bfloat> %z7 )
216
216
ret void
217
217
}
218
218
@@ -223,12 +223,12 @@ define void @extract_row_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 %t
223
223
; CHECK-NEXT: mov w12, w0
224
224
; CHECK-NEXT: mov z0.s, p0/m, za0h.s[w12, 0]
225
225
; CHECK-NEXT: mov z1.s, p0/m, za0h.s[w12, 2]
226
- ; CHECK-NEXT: b dummy_use_2_nxv4i32
226
+ ; CHECK-NEXT: b use
227
227
%z0 = call <vscale x 4 x i32 > @llvm.aarch64.sme.read.horiz.nxv4i32 (<vscale x 4 x i32 > %zd , <vscale x 4 x i1 > %pg , i32 0 , i32 %tileslice )
228
228
%tileslice.2 = add i32 %tileslice , 2
229
229
%z1 = call <vscale x 4 x i32 > @llvm.aarch64.sme.read.horiz.nxv4i32 (<vscale x 4 x i32 > %zd , <vscale x 4 x i1 > %pg , i32 0 , i32 %tileslice.2 )
230
230
231
- tail call void @dummy_use_2_nxv4i32 (<vscale x 4 x i32 > %z0 , <vscale x 4 x i32 > %z1 )
231
+ tail call void @use (<vscale x 4 x i32 > %z0 , <vscale x 4 x i32 > %z1 )
232
232
ret void
233
233
}
234
234
@@ -239,13 +239,13 @@ define void @extract_col_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 %t
239
239
; CHECK-NEXT: mov w12, w0
240
240
; CHECK-NEXT: mov z0.s, p0/m, za3v.s[w12, 1]
241
241
; CHECK-NEXT: mov z1.s, p0/m, za3v.s[w12, 3]
242
- ; CHECK-NEXT: b dummy_use_2_nxv4i32
242
+ ; CHECK-NEXT: b use
243
243
%tileslice.1 = add i32 %tileslice , 1
244
244
%z0 = call <vscale x 4 x i32 > @llvm.aarch64.sme.read.vert.nxv4i32 (<vscale x 4 x i32 > %zd , <vscale x 4 x i1 > %pg , i32 3 , i32 %tileslice.1 )
245
245
%tileslice.3 = add i32 %tileslice , 3
246
246
%z1 = call <vscale x 4 x i32 > @llvm.aarch64.sme.read.vert.nxv4i32 (<vscale x 4 x i32 > %zd , <vscale x 4 x i1 > %pg , i32 3 , i32 %tileslice.3 )
247
247
248
- tail call void @dummy_use_2_nxv4i32 (<vscale x 4 x i32 > %z0 , <vscale x 4 x i32 > %z1 )
248
+ tail call void @use (<vscale x 4 x i32 > %z0 , <vscale x 4 x i32 > %z1 )
249
249
ret void
250
250
}
251
251
@@ -260,7 +260,7 @@ define void @extract_f32(<vscale x 4 x float> %zd, <vscale x 4 x i1> %pg, i32 %t
260
260
; CHECK-NEXT: mov z1.s, p0/m, za0h.s[w12, 1]
261
261
; CHECK-NEXT: mov z2.s, p0/m, za0v.s[w12, 2]
262
262
; CHECK-NEXT: mov z3.s, p0/m, za0v.s[w12, 3]
263
- ; CHECK-NEXT: b dummy_use_4_nxv4f32
263
+ ; CHECK-NEXT: b use
264
264
%z0 = call <vscale x 4 x float > @llvm.aarch64.sme.read.horiz.nxv4f32 (<vscale x 4 x float > %zd , <vscale x 4 x i1 > %pg , i32 0 , i32 %tileslice )
265
265
%tileslice.1 = add i32 %tileslice , 1
266
266
%z1 = call <vscale x 4 x float > @llvm.aarch64.sme.read.horiz.nxv4f32 (<vscale x 4 x float > %zd , <vscale x 4 x i1 > %pg , i32 0 , i32 %tileslice.1 )
@@ -269,7 +269,7 @@ define void @extract_f32(<vscale x 4 x float> %zd, <vscale x 4 x i1> %pg, i32 %t
269
269
%tileslice.3 = add i32 %tileslice , 3
270
270
%z3 = call <vscale x 4 x float > @llvm.aarch64.sme.read.vert.nxv4f32 (<vscale x 4 x float > %zd , <vscale x 4 x i1 > %pg , i32 0 , i32 %tileslice.3 )
271
271
272
- tail call void @dummy_use_4_nxv4f32 (<vscale x 4 x float > %z0 , <vscale x 4 x float > %z1 , <vscale x 4 x float > %z2 , <vscale x 4 x float > %z3 )
272
+ tail call void @use (<vscale x 4 x float > %z0 , <vscale x 4 x float > %z1 , <vscale x 4 x float > %z2 , <vscale x 4 x float > %z3 )
273
273
ret void
274
274
}
275
275
@@ -301,12 +301,12 @@ define void @extract_f64(<vscale x 2 x double> %zd, <vscale x 2 x i1> %pg, i32 %
301
301
; CHECK-NEXT: mov w12, w0
302
302
; CHECK-NEXT: mov z0.d, p0/m, za0h.d[w12, 0]
303
303
; CHECK-NEXT: mov z1.d, p0/m, za0v.d[w12, 1]
304
- ; CHECK-NEXT: b dummy_use_2_nxv2f64
304
+ ; CHECK-NEXT: b use
305
305
%z0 = call <vscale x 2 x double > @llvm.aarch64.sme.read.horiz.nxv2f64 (<vscale x 2 x double > %zd , <vscale x 2 x i1 > %pg , i32 0 , i32 %tileslice )
306
306
%tileslice.1 = add i32 %tileslice , 1
307
307
%z1 = call <vscale x 2 x double > @llvm.aarch64.sme.read.vert.nxv2f64 (<vscale x 2 x double > %zd , <vscale x 2 x i1 > %pg , i32 0 , i32 %tileslice.1 )
308
308
309
- tail call void @dummy_use_2_nxv2f64 (<vscale x 2 x double > %z0 , <vscale x 2 x double > %z1 )
309
+ tail call void @use (<vscale x 2 x double > %z0 , <vscale x 2 x double > %z1 )
310
310
ret void
311
311
}
312
312
@@ -453,21 +453,33 @@ define <vscale x 2 x double> @extract_col_q_v2f64(<vscale x 2 x double> %zd, <vs
453
453
define <vscale x 4 x i32 > @test_sink_offset_operand (<vscale x 4 x i1 > %pg , i32 %base , i32 %N ) {
454
454
; CHECK-LABEL: test_sink_offset_operand:
455
455
; CHECK: // %bb.0: // %entry
456
- ; CHECK-NEXT: mov z0.s, #0 // =0x0
456
+ ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
457
+ ; CHECK-NEXT: addvl sp, sp, #-1
458
+ ; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill
459
+ ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
460
+ ; CHECK-NEXT: .cfi_offset w30, -8
461
+ ; CHECK-NEXT: .cfi_offset w29, -16
462
+ ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
463
+ ; CHECK-NEXT: mov z3.s, #0 // =0x0
457
464
; CHECK-NEXT: mov w12, w0
458
465
; CHECK-NEXT: .LBB26_1: // %for.body
459
466
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
460
- ; CHECK-NEXT: mov z1 .d, z0 .d
461
- ; CHECK-NEXT: mov z2 .d, z0 .d
467
+ ; CHECK-NEXT: mov z0 .d, z3 .d
468
+ ; CHECK-NEXT: mov z1 .d, z3 .d
462
469
; CHECK-NEXT: subs w1, w1, #3
463
- ; CHECK-NEXT: mov z3 .d, z0 .d
464
- ; CHECK-NEXT: mov z1 .s, p0/m, za0h.s[w12, 0]
465
- ; CHECK-NEXT: mov z2 .s, p0/m, za0h.s[w12, 1]
466
- ; CHECK-NEXT: mov z3 .s, p0/m, za0h.s[w12, 2]
470
+ ; CHECK-NEXT: mov z2 .d, z3 .d
471
+ ; CHECK-NEXT: mov z0 .s, p0/m, za0h.s[w12, 0]
472
+ ; CHECK-NEXT: mov z1 .s, p0/m, za0h.s[w12, 1]
473
+ ; CHECK-NEXT: mov z2 .s, p0/m, za0h.s[w12, 2]
467
474
; CHECK-NEXT: b.ne .LBB26_1
468
475
; CHECK-NEXT: // %bb.2: // %exit
469
- ; CHECK-NEXT: add z0.s, z1.s, z2.s
470
- ; CHECK-NEXT: add z0.s, z0.s, z3.s
476
+ ; CHECK-NEXT: add z3.s, z0.s, z1.s
477
+ ; CHECK-NEXT: add z8.s, z3.s, z2.s
478
+ ; CHECK-NEXT: bl use
479
+ ; CHECK-NEXT: mov z0.d, z8.d
480
+ ; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload
481
+ ; CHECK-NEXT: addvl sp, sp, #1
482
+ ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
471
483
; CHECK-NEXT: ret
472
484
entry:
473
485
%add1 = add i32 %base , 1
@@ -486,6 +498,7 @@ for.body:
486
498
exit:
487
499
%tmp1 = add <vscale x 4 x i32 > %z0 , %z1
488
500
%res = add <vscale x 4 x i32 > %tmp1 , %z2
501
+ tail call void @use (<vscale x 4 x i32 > %z0 , <vscale x 4 x i32 > %z1 , <vscale x 4 x i32 > %z2 )
489
502
ret <vscale x 4 x i32 > %res
490
503
}
491
504
@@ -524,33 +537,7 @@ declare <vscale x 2 x i64> @llvm.aarch64.sme.readq.vert.nxv2i64(<vscale x 2 x i6
524
537
declare <vscale x 2 x double > @llvm.aarch64.sme.readq.vert.nxv2f64 (<vscale x 2 x double >, <vscale x 2 x i1 >, i32 , i32 )
525
538
526
539
; ------------------------------------------------------------------------------
527
- ; Dummy external functions to force code retention.
528
- ; The compiler does not see their implementations, so it must keep the calls.
540
+ ; Dummy external function to force code retention.
529
541
; ------------------------------------------------------------------------------
530
542
531
- declare void @dummy_use_8_nxv16i8 (
532
- <vscale x 16 x i8 >, <vscale x 16 x i8 >, <vscale x 16 x i8 >, <vscale x 16 x i8 >,
533
- <vscale x 16 x i8 >, <vscale x 16 x i8 >, <vscale x 16 x i8 >, <vscale x 16 x i8 >
534
- )
535
-
536
- declare void @dummy_use_4_nxv8i16 (
537
- <vscale x 8 x i16 >, <vscale x 8 x i16 >, <vscale x 8 x i16 >, <vscale x 8 x i16 >
538
- )
539
-
540
- declare void @dummy_use_8_nxv8f16 (
541
- <vscale x 8 x half >, <vscale x 8 x half >, <vscale x 8 x half >, <vscale x 8 x half >,
542
- <vscale x 8 x half >, <vscale x 8 x half >, <vscale x 8 x half >, <vscale x 8 x half >
543
- )
544
-
545
- declare void @dummy_use_8_nxv8bf16 (
546
- <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>,
547
- <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>
548
- )
549
-
550
- declare void @dummy_use_2_nxv4i32 (<vscale x 4 x i32 >, <vscale x 4 x i32 >)
551
-
552
- declare void @dummy_use_4_nxv4f32 (
553
- <vscale x 4 x float >, <vscale x 4 x float >, <vscale x 4 x float >, <vscale x 4 x float >
554
- )
555
-
556
- declare void @dummy_use_2_nxv2f64 (<vscale x 2 x double >, <vscale x 2 x double >)
543
+ declare void @use (...)
0 commit comments