Skip to content

Commit 9077c3a

Browse files
committed
Simplify dummy functions
1 parent 1e85f2a commit 9077c3a

File tree

1 file changed

+48
-61
lines changed

1 file changed

+48
-61
lines changed

llvm/test/CodeGen/AArch64/sme-intrinsics-mova-extract.ll

Lines changed: 48 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ define void @extract_row_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 %
2020
; CHECK-NEXT: mov z5.b, p0/m, za0h.b[w12, 10]
2121
; CHECK-NEXT: mov z6.b, p0/m, za0h.b[w12, 12]
2222
; CHECK-NEXT: mov z7.b, p0/m, za0h.b[w12, 14]
23-
; CHECK-NEXT: b dummy_use_8_nxv16i8
23+
; CHECK-NEXT: b use
2424
%z0 = call <vscale x 16 x i8> @llvm.aarch64.sme.read.horiz.nxv16i8(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 0, i32 %tileslice)
2525
%tileslice.2 = add i32 %tileslice, 2
2626
%z1 = call <vscale x 16 x i8> @llvm.aarch64.sme.read.horiz.nxv16i8(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 0, i32 %tileslice.2)
@@ -38,8 +38,8 @@ define void @extract_row_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 %
3838
%z7 = call <vscale x 16 x i8> @llvm.aarch64.sme.read.horiz.nxv16i8(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 0, i32 %tileslice.14)
3939

4040
; Force retention of z0..z7
41-
tail call void @dummy_use_8_nxv16i8(<vscale x 16 x i8> %z0, <vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2, <vscale x 16 x i8> %z3,
42-
<vscale x 16 x i8> %z4, <vscale x 16 x i8> %z5, <vscale x 16 x i8> %z6, <vscale x 16 x i8> %z7)
41+
tail call void @use(<vscale x 16 x i8> %z0, <vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2, <vscale x 16 x i8> %z3,
42+
<vscale x 16 x i8> %z4, <vscale x 16 x i8> %z5, <vscale x 16 x i8> %z6, <vscale x 16 x i8> %z7)
4343
ret void
4444
}
4545

@@ -62,7 +62,7 @@ define void @extract_col_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 %
6262
; CHECK-NEXT: mov z5.b, p0/m, za0v.b[w12, 11]
6363
; CHECK-NEXT: mov z6.b, p0/m, za0v.b[w12, 13]
6464
; CHECK-NEXT: mov z7.b, p0/m, za0v.b[w12, 15]
65-
; CHECK-NEXT: b dummy_use_8_nxv16i8
65+
; CHECK-NEXT: b use
6666
%tileslice.1 = add i32 %tileslice, 1
6767
%z0 = call <vscale x 16 x i8> @llvm.aarch64.sme.read.vert.nxv16i8(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 0, i32 %tileslice.1)
6868
%tileslice.3 = add i32 %tileslice, 3
@@ -80,8 +80,8 @@ define void @extract_col_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 %
8080
%tileslice.15 = add i32 %tileslice, 15
8181
%z7 = call <vscale x 16 x i8> @llvm.aarch64.sme.read.vert.nxv16i8(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i32 0, i32 %tileslice.15)
8282

83-
tail call void @dummy_use_8_nxv16i8(<vscale x 16 x i8> %z0, <vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2, <vscale x 16 x i8> %z3,
84-
<vscale x 16 x i8> %z4, <vscale x 16 x i8> %z5, <vscale x 16 x i8> %z6, <vscale x 16 x i8> %z7)
83+
tail call void @use(<vscale x 16 x i8> %z0, <vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2, <vscale x 16 x i8> %z3,
84+
<vscale x 16 x i8> %z4, <vscale x 16 x i8> %z5, <vscale x 16 x i8> %z6, <vscale x 16 x i8> %z7)
8585
ret void
8686
}
8787

@@ -96,7 +96,7 @@ define void @extract_row_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 %t
9696
; CHECK-NEXT: mov z1.h, p0/m, za0h.h[w12, 2]
9797
; CHECK-NEXT: mov z2.h, p0/m, za0h.h[w12, 4]
9898
; CHECK-NEXT: mov z3.h, p0/m, za0h.h[w12, 6]
99-
; CHECK-NEXT: b dummy_use_4_nxv8i16
99+
; CHECK-NEXT: b use
100100
%z0 = call <vscale x 8 x i16> @llvm.aarch64.sme.read.horiz.nxv8i16(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 0, i32 %tileslice)
101101
%tileslice.2 = add i32 %tileslice, 2
102102
%z1 = call <vscale x 8 x i16> @llvm.aarch64.sme.read.horiz.nxv8i16(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 0, i32 %tileslice.2)
@@ -105,7 +105,7 @@ define void @extract_row_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 %t
105105
%tileslice.6 = add i32 %tileslice, 6
106106
%z3 = call <vscale x 8 x i16> @llvm.aarch64.sme.read.horiz.nxv8i16(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 0, i32 %tileslice.6)
107107

108-
tail call void @dummy_use_4_nxv8i16(<vscale x 8 x i16> %z0, <vscale x 8 x i16> %z1, <vscale x 8 x i16> %z2, <vscale x 8 x i16> %z3)
108+
tail call void @use(<vscale x 8 x i16> %z0, <vscale x 8 x i16> %z1, <vscale x 8 x i16> %z2, <vscale x 8 x i16> %z3)
109109
ret void
110110
}
111111

@@ -120,7 +120,7 @@ define void @extract_col_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 %t
120120
; CHECK-NEXT: mov z1.h, p0/m, za1v.h[w12, 3]
121121
; CHECK-NEXT: mov z2.h, p0/m, za1v.h[w12, 5]
122122
; CHECK-NEXT: mov z3.h, p0/m, za1v.h[w12, 7]
123-
; CHECK-NEXT: b dummy_use_4_nxv8i16
123+
; CHECK-NEXT: b use
124124
%tileslice.1 = add i32 %tileslice, 1
125125
%z0 = call <vscale x 8 x i16> @llvm.aarch64.sme.read.vert.nxv8i16(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 1, i32 %tileslice.1)
126126
%tileslice.3 = add i32 %tileslice, 3
@@ -130,7 +130,7 @@ define void @extract_col_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 %t
130130
%tileslice.7 = add i32 %tileslice, 7
131131
%z3 = call <vscale x 8 x i16> @llvm.aarch64.sme.read.vert.nxv8i16(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i32 1, i32 %tileslice.7)
132132

133-
tail call void @dummy_use_4_nxv8i16(<vscale x 8 x i16> %z0, <vscale x 8 x i16> %z1, <vscale x 8 x i16> %z2, <vscale x 8 x i16> %z3)
133+
tail call void @use(<vscale x 8 x i16> %z0, <vscale x 8 x i16> %z1, <vscale x 8 x i16> %z2, <vscale x 8 x i16> %z3)
134134
ret void
135135
}
136136

@@ -153,7 +153,7 @@ define void @extract_f16(<vscale x 8 x half> %zd, <vscale x 8 x i1> %pg, i32 %ti
153153
; CHECK-NEXT: mov z5.h, p0/m, za0h.h[w12, 5]
154154
; CHECK-NEXT: mov z6.h, p0/m, za0v.h[w12, 6]
155155
; CHECK-NEXT: mov z7.h, p0/m, za0v.h[w12, 7]
156-
; CHECK-NEXT: b dummy_use_8_nxv8f16
156+
; CHECK-NEXT: b use
157157
%z0 = call <vscale x 8 x half> @llvm.aarch64.sme.read.horiz.nxv8f16(<vscale x 8 x half> %zd, <vscale x 8 x i1> %pg, i32 0, i32 %tileslice)
158158
%tileslice.1 = add i32 %tileslice, 1
159159
%z1 = call <vscale x 8 x half> @llvm.aarch64.sme.read.horiz.nxv8f16(<vscale x 8 x half> %zd, <vscale x 8 x i1> %pg, i32 0, i32 %tileslice.1)
@@ -170,8 +170,8 @@ define void @extract_f16(<vscale x 8 x half> %zd, <vscale x 8 x i1> %pg, i32 %ti
170170
%tileslice.7 = add i32 %tileslice, 7
171171
%z7 = call <vscale x 8 x half> @llvm.aarch64.sme.read.vert.nxv8f16(<vscale x 8 x half> %zd, <vscale x 8 x i1> %pg, i32 0, i32 %tileslice.7)
172172

173-
tail call void @dummy_use_8_nxv8f16(<vscale x 8 x half> %z0, <vscale x 8 x half> %z1, <vscale x 8 x half> %z2, <vscale x 8 x half> %z3,
174-
<vscale x 8 x half> %z4, <vscale x 8 x half> %z5, <vscale x 8 x half> %z6, <vscale x 8 x half> %z7)
173+
tail call void @use(<vscale x 8 x half> %z0, <vscale x 8 x half> %z1, <vscale x 8 x half> %z2, <vscale x 8 x half> %z3,
174+
<vscale x 8 x half> %z4, <vscale x 8 x half> %z5, <vscale x 8 x half> %z6, <vscale x 8 x half> %z7)
175175
ret void
176176
}
177177

@@ -194,7 +194,7 @@ define void @extract_bf16(<vscale x 8 x bfloat> %zd, <vscale x 8 x i1> %pg, i32
194194
; CHECK-NEXT: mov z5.h, p0/m, za0h.h[w12, 5]
195195
; CHECK-NEXT: mov z6.h, p0/m, za0v.h[w12, 6]
196196
; CHECK-NEXT: mov z7.h, p0/m, za0v.h[w12, 7]
197-
; CHECK-NEXT: b dummy_use_8_nxv8bf16
197+
; CHECK-NEXT: b use
198198
%z0 = call <vscale x 8 x bfloat> @llvm.aarch64.sme.read.horiz.nxv8bf16(<vscale x 8 x bfloat> %zd, <vscale x 8 x i1> %pg, i32 0, i32 %tileslice)
199199
%tileslice.1 = add i32 %tileslice, 1
200200
%z1 = call <vscale x 8 x bfloat> @llvm.aarch64.sme.read.horiz.nxv8bf16(<vscale x 8 x bfloat> %zd, <vscale x 8 x i1> %pg, i32 0, i32 %tileslice.1)
@@ -211,8 +211,8 @@ define void @extract_bf16(<vscale x 8 x bfloat> %zd, <vscale x 8 x i1> %pg, i32
211211
%tileslice.7 = add i32 %tileslice, 7
212212
%z7 = call <vscale x 8 x bfloat> @llvm.aarch64.sme.read.vert.nxv8bf16(<vscale x 8 x bfloat> %zd, <vscale x 8 x i1> %pg, i32 0, i32 %tileslice.7)
213213

214-
tail call void @dummy_use_8_nxv8bf16(<vscale x 8 x bfloat> %z0, <vscale x 8 x bfloat> %z1, <vscale x 8 x bfloat> %z2, <vscale x 8 x bfloat> %z3,
215-
<vscale x 8 x bfloat> %z4, <vscale x 8 x bfloat> %z5, <vscale x 8 x bfloat> %z6, <vscale x 8 x bfloat> %z7)
214+
tail call void @use(<vscale x 8 x bfloat> %z0, <vscale x 8 x bfloat> %z1, <vscale x 8 x bfloat> %z2, <vscale x 8 x bfloat> %z3,
215+
<vscale x 8 x bfloat> %z4, <vscale x 8 x bfloat> %z5, <vscale x 8 x bfloat> %z6, <vscale x 8 x bfloat> %z7)
216216
ret void
217217
}
218218

@@ -223,12 +223,12 @@ define void @extract_row_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 %t
223223
; CHECK-NEXT: mov w12, w0
224224
; CHECK-NEXT: mov z0.s, p0/m, za0h.s[w12, 0]
225225
; CHECK-NEXT: mov z1.s, p0/m, za0h.s[w12, 2]
226-
; CHECK-NEXT: b dummy_use_2_nxv4i32
226+
; CHECK-NEXT: b use
227227
%z0 = call <vscale x 4 x i32> @llvm.aarch64.sme.read.horiz.nxv4i32(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 0, i32 %tileslice)
228228
%tileslice.2 = add i32 %tileslice, 2
229229
%z1 = call <vscale x 4 x i32> @llvm.aarch64.sme.read.horiz.nxv4i32(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 0, i32 %tileslice.2)
230230

231-
tail call void @dummy_use_2_nxv4i32(<vscale x 4 x i32> %z0, <vscale x 4 x i32> %z1)
231+
tail call void @use(<vscale x 4 x i32> %z0, <vscale x 4 x i32> %z1)
232232
ret void
233233
}
234234

@@ -239,13 +239,13 @@ define void @extract_col_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 %t
239239
; CHECK-NEXT: mov w12, w0
240240
; CHECK-NEXT: mov z0.s, p0/m, za3v.s[w12, 1]
241241
; CHECK-NEXT: mov z1.s, p0/m, za3v.s[w12, 3]
242-
; CHECK-NEXT: b dummy_use_2_nxv4i32
242+
; CHECK-NEXT: b use
243243
%tileslice.1 = add i32 %tileslice, 1
244244
%z0 = call <vscale x 4 x i32> @llvm.aarch64.sme.read.vert.nxv4i32(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 3, i32 %tileslice.1)
245245
%tileslice.3 = add i32 %tileslice, 3
246246
%z1 = call <vscale x 4 x i32> @llvm.aarch64.sme.read.vert.nxv4i32(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 3, i32 %tileslice.3)
247247

248-
tail call void @dummy_use_2_nxv4i32(<vscale x 4 x i32> %z0, <vscale x 4 x i32> %z1)
248+
tail call void @use(<vscale x 4 x i32> %z0, <vscale x 4 x i32> %z1)
249249
ret void
250250
}
251251

@@ -260,7 +260,7 @@ define void @extract_f32(<vscale x 4 x float> %zd, <vscale x 4 x i1> %pg, i32 %t
260260
; CHECK-NEXT: mov z1.s, p0/m, za0h.s[w12, 1]
261261
; CHECK-NEXT: mov z2.s, p0/m, za0v.s[w12, 2]
262262
; CHECK-NEXT: mov z3.s, p0/m, za0v.s[w12, 3]
263-
; CHECK-NEXT: b dummy_use_4_nxv4f32
263+
; CHECK-NEXT: b use
264264
%z0 = call <vscale x 4 x float> @llvm.aarch64.sme.read.horiz.nxv4f32(<vscale x 4 x float> %zd, <vscale x 4 x i1> %pg, i32 0, i32 %tileslice)
265265
%tileslice.1 = add i32 %tileslice, 1
266266
%z1 = call <vscale x 4 x float> @llvm.aarch64.sme.read.horiz.nxv4f32(<vscale x 4 x float> %zd, <vscale x 4 x i1> %pg, i32 0, i32 %tileslice.1)
@@ -269,7 +269,7 @@ define void @extract_f32(<vscale x 4 x float> %zd, <vscale x 4 x i1> %pg, i32 %t
269269
%tileslice.3 = add i32 %tileslice, 3
270270
%z3 = call <vscale x 4 x float> @llvm.aarch64.sme.read.vert.nxv4f32(<vscale x 4 x float> %zd, <vscale x 4 x i1> %pg, i32 0, i32 %tileslice.3)
271271

272-
tail call void @dummy_use_4_nxv4f32(<vscale x 4 x float> %z0, <vscale x 4 x float> %z1, <vscale x 4 x float> %z2, <vscale x 4 x float> %z3)
272+
tail call void @use(<vscale x 4 x float> %z0, <vscale x 4 x float> %z1, <vscale x 4 x float> %z2, <vscale x 4 x float> %z3)
273273
ret void
274274
}
275275

@@ -301,12 +301,12 @@ define void @extract_f64(<vscale x 2 x double> %zd, <vscale x 2 x i1> %pg, i32 %
301301
; CHECK-NEXT: mov w12, w0
302302
; CHECK-NEXT: mov z0.d, p0/m, za0h.d[w12, 0]
303303
; CHECK-NEXT: mov z1.d, p0/m, za0v.d[w12, 1]
304-
; CHECK-NEXT: b dummy_use_2_nxv2f64
304+
; CHECK-NEXT: b use
305305
%z0 = call <vscale x 2 x double> @llvm.aarch64.sme.read.horiz.nxv2f64(<vscale x 2 x double> %zd, <vscale x 2 x i1> %pg, i32 0, i32 %tileslice)
306306
%tileslice.1 = add i32 %tileslice, 1
307307
%z1 = call <vscale x 2 x double> @llvm.aarch64.sme.read.vert.nxv2f64(<vscale x 2 x double> %zd, <vscale x 2 x i1> %pg, i32 0, i32 %tileslice.1)
308308

309-
tail call void @dummy_use_2_nxv2f64(<vscale x 2 x double> %z0, <vscale x 2 x double> %z1)
309+
tail call void @use(<vscale x 2 x double> %z0, <vscale x 2 x double> %z1)
310310
ret void
311311
}
312312

@@ -453,21 +453,33 @@ define <vscale x 2 x double> @extract_col_q_v2f64(<vscale x 2 x double> %zd, <vs
453453
define <vscale x 4 x i32> @test_sink_offset_operand(<vscale x 4 x i1> %pg, i32 %base, i32 %N) {
454454
; CHECK-LABEL: test_sink_offset_operand:
455455
; CHECK: // %bb.0: // %entry
456-
; CHECK-NEXT: mov z0.s, #0 // =0x0
456+
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
457+
; CHECK-NEXT: addvl sp, sp, #-1
458+
; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill
459+
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
460+
; CHECK-NEXT: .cfi_offset w30, -8
461+
; CHECK-NEXT: .cfi_offset w29, -16
462+
; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
463+
; CHECK-NEXT: mov z3.s, #0 // =0x0
457464
; CHECK-NEXT: mov w12, w0
458465
; CHECK-NEXT: .LBB26_1: // %for.body
459466
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
460-
; CHECK-NEXT: mov z1.d, z0.d
461-
; CHECK-NEXT: mov z2.d, z0.d
467+
; CHECK-NEXT: mov z0.d, z3.d
468+
; CHECK-NEXT: mov z1.d, z3.d
462469
; CHECK-NEXT: subs w1, w1, #3
463-
; CHECK-NEXT: mov z3.d, z0.d
464-
; CHECK-NEXT: mov z1.s, p0/m, za0h.s[w12, 0]
465-
; CHECK-NEXT: mov z2.s, p0/m, za0h.s[w12, 1]
466-
; CHECK-NEXT: mov z3.s, p0/m, za0h.s[w12, 2]
470+
; CHECK-NEXT: mov z2.d, z3.d
471+
; CHECK-NEXT: mov z0.s, p0/m, za0h.s[w12, 0]
472+
; CHECK-NEXT: mov z1.s, p0/m, za0h.s[w12, 1]
473+
; CHECK-NEXT: mov z2.s, p0/m, za0h.s[w12, 2]
467474
; CHECK-NEXT: b.ne .LBB26_1
468475
; CHECK-NEXT: // %bb.2: // %exit
469-
; CHECK-NEXT: add z0.s, z1.s, z2.s
470-
; CHECK-NEXT: add z0.s, z0.s, z3.s
476+
; CHECK-NEXT: add z3.s, z0.s, z1.s
477+
; CHECK-NEXT: add z8.s, z3.s, z2.s
478+
; CHECK-NEXT: bl use
479+
; CHECK-NEXT: mov z0.d, z8.d
480+
; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload
481+
; CHECK-NEXT: addvl sp, sp, #1
482+
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
471483
; CHECK-NEXT: ret
472484
entry:
473485
%add1 = add i32 %base, 1
@@ -486,6 +498,7 @@ for.body:
486498
exit:
487499
%tmp1 = add <vscale x 4 x i32> %z0, %z1
488500
%res = add <vscale x 4 x i32> %tmp1, %z2
501+
tail call void @use(<vscale x 4 x i32> %z0, <vscale x 4 x i32> %z1, <vscale x 4 x i32> %z2)
489502
ret <vscale x 4 x i32> %res
490503
}
491504

@@ -524,33 +537,7 @@ declare <vscale x 2 x i64> @llvm.aarch64.sme.readq.vert.nxv2i64(<vscale x 2 x i6
524537
declare <vscale x 2 x double> @llvm.aarch64.sme.readq.vert.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32, i32)
525538

526539
; ------------------------------------------------------------------------------
527-
; Dummy external functions to force code retention.
528-
; The compiler does not see their implementations, so it must keep the calls.
540+
; Dummy external function to force code retention.
529541
; ------------------------------------------------------------------------------
530542

531-
declare void @dummy_use_8_nxv16i8(
532-
<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>,
533-
<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>
534-
)
535-
536-
declare void @dummy_use_4_nxv8i16(
537-
<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>
538-
)
539-
540-
declare void @dummy_use_8_nxv8f16(
541-
<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>,
542-
<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>
543-
)
544-
545-
declare void @dummy_use_8_nxv8bf16(
546-
<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>,
547-
<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>
548-
)
549-
550-
declare void @dummy_use_2_nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
551-
552-
declare void @dummy_use_4_nxv4f32(
553-
<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>
554-
)
555-
556-
declare void @dummy_use_2_nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
543+
declare void @use(...)

0 commit comments

Comments
 (0)