Skip to content

Commit 858b465

Browse files
committed
[RISCV] Add tests for VP strided stores with unit stride. NFC
1 parent 469f6b9 commit 858b465

File tree

2 files changed

+168
-14
lines changed

2 files changed

+168
-14
lines changed

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll

Lines changed: 81 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,17 @@ define void @strided_vpstore_v8i8(<8 x i8> %val, ptr %ptr, i32 signext %stride,
8484
ret void
8585
}
8686

87+
define void @strided_vpstore_v8i8_unit_stride(<8 x i8> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
88+
; CHECK-LABEL: strided_vpstore_v8i8_unit_stride:
89+
; CHECK: # %bb.0:
90+
; CHECK-NEXT: li a2, 1
91+
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
92+
; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t
93+
; CHECK-NEXT: ret
94+
call void @llvm.experimental.vp.strided.store.v8i8.p0.i32(<8 x i8> %val, ptr %ptr, i32 1, <8 x i1> %m, i32 %evl)
95+
ret void
96+
}
97+
8798
declare void @llvm.experimental.vp.strided.store.v2i16.p0.i32(<2 x i16>, ptr, i32, <2 x i1>, i32)
8899

89100
define void @strided_vpstore_v2i16(<2 x i16> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
@@ -120,6 +131,17 @@ define void @strided_vpstore_v8i16(<8 x i16> %val, ptr %ptr, i32 signext %stride
120131
ret void
121132
}
122133

134+
define void @strided_vpstore_v8i16_unit_stride(<8 x i16> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
135+
; CHECK-LABEL: strided_vpstore_v8i16_unit_stride:
136+
; CHECK: # %bb.0:
137+
; CHECK-NEXT: li a2, 2
138+
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
139+
; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t
140+
; CHECK-NEXT: ret
141+
call void @llvm.experimental.vp.strided.store.v8i16.p0.i32(<8 x i16> %val, ptr %ptr, i32 2, <8 x i1> %m, i32 %evl)
142+
ret void
143+
}
144+
123145
declare void @llvm.experimental.vp.strided.store.v2i32.p0.i32(<2 x i32>, ptr, i32, <2 x i1>, i32)
124146

125147
define void @strided_vpstore_v2i32(<2 x i32> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
@@ -144,6 +166,17 @@ define void @strided_vpstore_v4i32(<4 x i32> %val, ptr %ptr, i32 signext %stride
144166
ret void
145167
}
146168

169+
define void @strided_vpstore_v4i32_unit_stride(<4 x i32> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
170+
; CHECK-LABEL: strided_vpstore_v4i32_unit_stride:
171+
; CHECK: # %bb.0:
172+
; CHECK-NEXT: li a2, 4
173+
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
174+
; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t
175+
; CHECK-NEXT: ret
176+
call void @llvm.experimental.vp.strided.store.v4i32.p0.i32(<4 x i32> %val, ptr %ptr, i32 4, <4 x i1> %m, i32 %evl)
177+
ret void
178+
}
179+
147180
declare void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32>, ptr, i32, <8 x i1>, i32)
148181

149182
define void @strided_vpstore_v8i32(<8 x i32> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
@@ -168,6 +201,17 @@ define void @strided_vpstore_v2i64(<2 x i64> %val, ptr %ptr, i32 signext %stride
168201
ret void
169202
}
170203

204+
define void @strided_vpstore_v2i64_unit_stride(<2 x i64> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
205+
; CHECK-LABEL: strided_vpstore_v2i64_unit_stride:
206+
; CHECK: # %bb.0:
207+
; CHECK-NEXT: li a2, 8
208+
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
209+
; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t
210+
; CHECK-NEXT: ret
211+
call void @llvm.experimental.vp.strided.store.v2i64.p0.i32(<2 x i64> %val, ptr %ptr, i32 8, <2 x i1> %m, i32 %evl)
212+
ret void
213+
}
214+
171215
declare void @llvm.experimental.vp.strided.store.v4i64.p0.i32(<4 x i64>, ptr, i32, <4 x i1>, i32)
172216

173217
define void @strided_vpstore_v4i64(<4 x i64> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
@@ -228,6 +272,17 @@ define void @strided_vpstore_v8f16(<8 x half> %val, ptr %ptr, i32 signext %strid
228272
ret void
229273
}
230274

275+
define void @strided_vpstore_v8f16_unit_stride(<8 x half> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
276+
; CHECK-LABEL: strided_vpstore_v8f16_unit_stride:
277+
; CHECK: # %bb.0:
278+
; CHECK-NEXT: li a2, 2
279+
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
280+
; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t
281+
; CHECK-NEXT: ret
282+
call void @llvm.experimental.vp.strided.store.v8f16.p0.i32(<8 x half> %val, ptr %ptr, i32 2, <8 x i1> %m, i32 %evl)
283+
ret void
284+
}
285+
231286
declare void @llvm.experimental.vp.strided.store.v2f32.p0.i32(<2 x float>, ptr, i32, <2 x i1>, i32)
232287

233288
define void @strided_vpstore_v2f32(<2 x float> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
@@ -252,6 +307,17 @@ define void @strided_vpstore_v4f32(<4 x float> %val, ptr %ptr, i32 signext %stri
252307
ret void
253308
}
254309

310+
define void @strided_vpstore_v4f32_unit_stride(<4 x float> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
311+
; CHECK-LABEL: strided_vpstore_v4f32_unit_stride:
312+
; CHECK: # %bb.0:
313+
; CHECK-NEXT: li a2, 4
314+
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
315+
; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t
316+
; CHECK-NEXT: ret
317+
call void @llvm.experimental.vp.strided.store.v4f32.p0.i32(<4 x float> %val, ptr %ptr, i32 4, <4 x i1> %m, i32 %evl)
318+
ret void
319+
}
320+
255321
declare void @llvm.experimental.vp.strided.store.v8f32.p0.i32(<8 x float>, ptr, i32, <8 x i1>, i32)
256322

257323
define void @strided_vpstore_v8f32(<8 x float> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
@@ -276,6 +342,17 @@ define void @strided_vpstore_v2f64(<2 x double> %val, ptr %ptr, i32 signext %str
276342
ret void
277343
}
278344

345+
define void @strided_vpstore_v2f64_unit_stride(<2 x double> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
346+
; CHECK-LABEL: strided_vpstore_v2f64_unit_stride:
347+
; CHECK: # %bb.0:
348+
; CHECK-NEXT: li a2, 8
349+
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
350+
; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t
351+
; CHECK-NEXT: ret
352+
call void @llvm.experimental.vp.strided.store.v2f64.p0.i32(<2 x double> %val, ptr %ptr, i32 8, <2 x i1> %m, i32 %evl)
353+
ret void
354+
}
355+
279356
declare void @llvm.experimental.vp.strided.store.v4f64.p0.i32(<4 x double>, ptr, i32, <4 x i1>, i32)
280357

281358
define void @strided_vpstore_v4f64(<4 x double> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
@@ -343,10 +420,10 @@ define void @strided_store_v32f64(<32 x double> %v, ptr %ptr, i32 signext %strid
343420
; CHECK: # %bb.0:
344421
; CHECK-NEXT: li a4, 16
345422
; CHECK-NEXT: mv a3, a2
346-
; CHECK-NEXT: bltu a2, a4, .LBB27_2
423+
; CHECK-NEXT: bltu a2, a4, .LBB34_2
347424
; CHECK-NEXT: # %bb.1:
348425
; CHECK-NEXT: li a3, 16
349-
; CHECK-NEXT: .LBB27_2:
426+
; CHECK-NEXT: .LBB34_2:
350427
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
351428
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
352429
; CHECK-NEXT: mul a3, a3, a1
@@ -369,10 +446,10 @@ define void @strided_store_v32f64_allones_mask(<32 x double> %v, ptr %ptr, i32 s
369446
; CHECK: # %bb.0:
370447
; CHECK-NEXT: li a4, 16
371448
; CHECK-NEXT: mv a3, a2
372-
; CHECK-NEXT: bltu a2, a4, .LBB28_2
449+
; CHECK-NEXT: bltu a2, a4, .LBB35_2
373450
; CHECK-NEXT: # %bb.1:
374451
; CHECK-NEXT: li a3, 16
375-
; CHECK-NEXT: .LBB28_2:
452+
; CHECK-NEXT: .LBB35_2:
376453
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
377454
; CHECK-NEXT: vsse64.v v8, (a0), a1
378455
; CHECK-NEXT: mul a3, a3, a1

llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll

Lines changed: 87 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,17 @@ define void @strided_vpstore_nxv8i8(<vscale x 8 x i8> %val, ptr %ptr, i32 signex
9696
ret void
9797
}
9898

99+
define void @strided_vpstore_nxv8i8_unit_stride(<vscale x 8 x i8> %val, ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
100+
; CHECK-LABEL: strided_vpstore_nxv8i8_unit_stride:
101+
; CHECK: # %bb.0:
102+
; CHECK-NEXT: li a2, 1
103+
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
104+
; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t
105+
; CHECK-NEXT: ret
106+
call void @llvm.experimental.vp.strided.store.nxv8i8.p0.i32(<vscale x 8 x i8> %val, ptr %ptr, i32 1, <vscale x 8 x i1> %m, i32 %evl)
107+
ret void
108+
}
109+
99110
declare void @llvm.experimental.vp.strided.store.nxv1i16.p0.i32(<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>, i32)
100111

101112
define void @strided_vpstore_nxv1i16(<vscale x 1 x i16> %val, ptr %ptr, i32 signext %strided, <vscale x 1 x i1> %m, i32 zeroext %evl) {
@@ -132,6 +143,17 @@ define void @strided_vpstore_nxv4i16(<vscale x 4 x i16> %val, ptr %ptr, i32 sign
132143
ret void
133144
}
134145

146+
define void @strided_vpstore_nxv4i16_unit_stride(<vscale x 4 x i16> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
147+
; CHECK-LABEL: strided_vpstore_nxv4i16_unit_stride:
148+
; CHECK: # %bb.0:
149+
; CHECK-NEXT: li a2, 2
150+
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
151+
; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t
152+
; CHECK-NEXT: ret
153+
call void @llvm.experimental.vp.strided.store.nxv4i16.p0.i32(<vscale x 4 x i16> %val, ptr %ptr, i32 2, <vscale x 4 x i1> %m, i32 %evl)
154+
ret void
155+
}
156+
135157
declare void @llvm.experimental.vp.strided.store.nxv8i16.p0.i32(<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>, i32)
136158

137159
define void @strided_vpstore_nxv8i16(<vscale x 8 x i16> %val, ptr %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
@@ -180,6 +202,17 @@ define void @strided_vpstore_nxv4i32(<vscale x 4 x i32> %val, ptr %ptr, i32 sign
180202
ret void
181203
}
182204

205+
define void @strided_vpstore_nxv4i32_unit_stride(<vscale x 4 x i32> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
206+
; CHECK-LABEL: strided_vpstore_nxv4i32_unit_stride:
207+
; CHECK: # %bb.0:
208+
; CHECK-NEXT: li a2, 4
209+
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
210+
; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t
211+
; CHECK-NEXT: ret
212+
call void @llvm.experimental.vp.strided.store.nxv4i32.p0.i32(<vscale x 4 x i32> %val, ptr %ptr, i32 4, <vscale x 4 x i1> %m, i32 %evl)
213+
ret void
214+
}
215+
183216
declare void @llvm.experimental.vp.strided.store.nxv8i32.p0.i32(<vscale x 8 x i32>, ptr, i32, <vscale x 8 x i1>, i32)
184217

185218
define void @strided_vpstore_nxv8i32(<vscale x 8 x i32> %val, ptr %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
@@ -204,6 +237,17 @@ define void @strided_vpstore_nxv1i64(<vscale x 1 x i64> %val, ptr %ptr, i32 sign
204237
ret void
205238
}
206239

240+
define void @strided_vpstore_nxv1i64_unit_stride(<vscale x 1 x i64> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
241+
; CHECK-LABEL: strided_vpstore_nxv1i64_unit_stride:
242+
; CHECK: # %bb.0:
243+
; CHECK-NEXT: li a2, 8
244+
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
245+
; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t
246+
; CHECK-NEXT: ret
247+
call void @llvm.experimental.vp.strided.store.nxv1i64.p0.i32(<vscale x 1 x i64> %val, ptr %ptr, i32 8, <vscale x 1 x i1> %m, i32 %evl)
248+
ret void
249+
}
250+
207251
declare void @llvm.experimental.vp.strided.store.nxv2i64.p0.i32(<vscale x 2 x i64>, ptr, i32, <vscale x 2 x i1>, i32)
208252

209253
define void @strided_vpstore_nxv2i64(<vscale x 2 x i64> %val, ptr %ptr, i32 signext %strided, <vscale x 2 x i1> %m, i32 zeroext %evl) {
@@ -276,6 +320,17 @@ define void @strided_vpstore_nxv4f16(<vscale x 4 x half> %val, ptr %ptr, i32 sig
276320
ret void
277321
}
278322

323+
define void @strided_vpstore_nxv4f16_unit_stride(<vscale x 4 x half> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
324+
; CHECK-LABEL: strided_vpstore_nxv4f16_unit_stride:
325+
; CHECK: # %bb.0:
326+
; CHECK-NEXT: li a2, 2
327+
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
328+
; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t
329+
; CHECK-NEXT: ret
330+
call void @llvm.experimental.vp.strided.store.nxv4f16.p0.i32(<vscale x 4 x half> %val, ptr %ptr, i32 2, <vscale x 4 x i1> %m, i32 %evl)
331+
ret void
332+
}
333+
279334
declare void @llvm.experimental.vp.strided.store.nxv8f16.p0.i32(<vscale x 8 x half>, ptr, i32, <vscale x 8 x i1>, i32)
280335

281336
define void @strided_vpstore_nxv8f16(<vscale x 8 x half> %val, ptr %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
@@ -324,6 +379,17 @@ define void @strided_vpstore_nxv4f32(<vscale x 4 x float> %val, ptr %ptr, i32 si
324379
ret void
325380
}
326381

382+
define void @strided_vpstore_nxv4f32_unit_stride(<vscale x 4 x float> %val, ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
383+
; CHECK-LABEL: strided_vpstore_nxv4f32_unit_stride:
384+
; CHECK: # %bb.0:
385+
; CHECK-NEXT: li a2, 4
386+
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
387+
; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t
388+
; CHECK-NEXT: ret
389+
call void @llvm.experimental.vp.strided.store.nxv4f32.p0.i32(<vscale x 4 x float> %val, ptr %ptr, i32 4, <vscale x 4 x i1> %m, i32 %evl)
390+
ret void
391+
}
392+
327393
declare void @llvm.experimental.vp.strided.store.nxv8f32.p0.i32(<vscale x 8 x float>, ptr, i32, <vscale x 8 x i1>, i32)
328394

329395
define void @strided_vpstore_nxv8f32(<vscale x 8 x float> %val, ptr %ptr, i32 signext %strided, <vscale x 8 x i1> %m, i32 zeroext %evl) {
@@ -348,6 +414,17 @@ define void @strided_vpstore_nxv1f64(<vscale x 1 x double> %val, ptr %ptr, i32 s
348414
ret void
349415
}
350416

417+
define void @strided_vpstore_nxv1f64_unit_stride(<vscale x 1 x double> %val, ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
418+
; CHECK-LABEL: strided_vpstore_nxv1f64_unit_stride:
419+
; CHECK: # %bb.0:
420+
; CHECK-NEXT: li a2, 8
421+
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
422+
; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t
423+
; CHECK-NEXT: ret
424+
call void @llvm.experimental.vp.strided.store.nxv1f64.p0.i32(<vscale x 1 x double> %val, ptr %ptr, i32 8, <vscale x 1 x i1> %m, i32 %evl)
425+
ret void
426+
}
427+
351428
declare void @llvm.experimental.vp.strided.store.nxv2f64.p0.i32(<vscale x 2 x double>, ptr, i32, <vscale x 2 x i1>, i32)
352429

353430
define void @strided_vpstore_nxv2f64(<vscale x 2 x double> %val, ptr %ptr, i32 signext %strided, <vscale x 2 x i1> %m, i32 zeroext %evl) {
@@ -427,10 +504,10 @@ define void @strided_store_nxv16f64(<vscale x 16 x double> %v, ptr %ptr, i32 sig
427504
; CHECK: # %bb.0:
428505
; CHECK-NEXT: csrr a3, vlenb
429506
; CHECK-NEXT: mv a4, a2
430-
; CHECK-NEXT: bltu a2, a3, .LBB34_2
507+
; CHECK-NEXT: bltu a2, a3, .LBB41_2
431508
; CHECK-NEXT: # %bb.1:
432509
; CHECK-NEXT: mv a4, a3
433-
; CHECK-NEXT: .LBB34_2:
510+
; CHECK-NEXT: .LBB41_2:
434511
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
435512
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
436513
; CHECK-NEXT: sub a5, a2, a3
@@ -454,10 +531,10 @@ define void @strided_store_nxv16f64_allones_mask(<vscale x 16 x double> %v, ptr
454531
; CHECK: # %bb.0:
455532
; CHECK-NEXT: csrr a3, vlenb
456533
; CHECK-NEXT: mv a4, a2
457-
; CHECK-NEXT: bltu a2, a3, .LBB35_2
534+
; CHECK-NEXT: bltu a2, a3, .LBB42_2
458535
; CHECK-NEXT: # %bb.1:
459536
; CHECK-NEXT: mv a4, a3
460-
; CHECK-NEXT: .LBB35_2:
537+
; CHECK-NEXT: .LBB42_2:
461538
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
462539
; CHECK-NEXT: vsse64.v v8, (a0), a1
463540
; CHECK-NEXT: sub a3, a2, a3
@@ -485,15 +562,15 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
485562
; CHECK-NEXT: slli a6, a4, 1
486563
; CHECK-NEXT: vmv1r.v v24, v0
487564
; CHECK-NEXT: mv a5, a3
488-
; CHECK-NEXT: bltu a3, a6, .LBB36_2
565+
; CHECK-NEXT: bltu a3, a6, .LBB43_2
489566
; CHECK-NEXT: # %bb.1:
490567
; CHECK-NEXT: mv a5, a6
491-
; CHECK-NEXT: .LBB36_2:
568+
; CHECK-NEXT: .LBB43_2:
492569
; CHECK-NEXT: mv a7, a5
493-
; CHECK-NEXT: bltu a5, a4, .LBB36_4
570+
; CHECK-NEXT: bltu a5, a4, .LBB43_4
494571
; CHECK-NEXT: # %bb.3:
495572
; CHECK-NEXT: mv a7, a4
496-
; CHECK-NEXT: .LBB36_4:
573+
; CHECK-NEXT: .LBB43_4:
497574
; CHECK-NEXT: addi sp, sp, -16
498575
; CHECK-NEXT: .cfi_def_cfa_offset 16
499576
; CHECK-NEXT: csrr t0, vlenb
@@ -521,10 +598,10 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
521598
; CHECK-NEXT: addi a3, a3, -1
522599
; CHECK-NEXT: and a0, a3, a0
523600
; CHECK-NEXT: vsse64.v v16, (a7), a2, v0.t
524-
; CHECK-NEXT: bltu a0, a4, .LBB36_6
601+
; CHECK-NEXT: bltu a0, a4, .LBB43_6
525602
; CHECK-NEXT: # %bb.5:
526603
; CHECK-NEXT: mv a0, a4
527-
; CHECK-NEXT: .LBB36_6:
604+
; CHECK-NEXT: .LBB43_6:
528605
; CHECK-NEXT: mul a3, a5, a2
529606
; CHECK-NEXT: add a1, a1, a3
530607
; CHECK-NEXT: srli a4, a4, 2

0 commit comments

Comments
 (0)