@@ -247,34 +247,34 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @v
247
247
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
248
248
; RV32-NEXT: csrr s1, vlenb
249
249
; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma
250
- ; RV32-NEXT: vslidedown.vi v10 , v8, 10
251
- ; RV32-NEXT: vslidedown.vi v11 , v8, 8
250
+ ; RV32-NEXT: vslidedown.vi v11 , v8, 10
251
+ ; RV32-NEXT: vslidedown.vi v10 , v8, 8
252
252
; RV32-NEXT: vslidedown.vi v9, v8, 2
253
253
; RV32-NEXT: srli s0, s1, 3
254
254
; RV32-NEXT: add a0, s0, s0
255
255
; RV32-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
256
- ; RV32-NEXT: vslideup.vx v11, v10 , s0
257
- ; RV32-NEXT: vmv1r.v v10 , v8
258
- ; RV32-NEXT: vslideup.vx v10 , v9, s0
256
+ ; RV32-NEXT: vslideup.vx v10, v11 , s0
257
+ ; RV32-NEXT: vmv1r.v v11 , v8
258
+ ; RV32-NEXT: vslideup.vx v11 , v9, s0
259
259
; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma
260
260
; RV32-NEXT: vslidedown.vi v9, v8, 12
261
261
; RV32-NEXT: srli a0, s1, 2
262
262
; RV32-NEXT: add a1, a0, s0
263
263
; RV32-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
264
- ; RV32-NEXT: vslideup.vx v11 , v9, a0
264
+ ; RV32-NEXT: vslideup.vx v10 , v9, a0
265
265
; RV32-NEXT: csrr a2, vlenb
266
266
; RV32-NEXT: slli a2, a2, 1
267
267
; RV32-NEXT: add a2, sp, a2
268
268
; RV32-NEXT: addi a2, a2, 32
269
- ; RV32-NEXT: vs1r.v v11 , (a2) # Unknown-size Folded Spill
269
+ ; RV32-NEXT: vs1r.v v10 , (a2) # Unknown-size Folded Spill
270
270
; RV32-NEXT: vsetivli zero, 2, e8, m1, ta, ma
271
271
; RV32-NEXT: vslidedown.vi v9, v8, 4
272
272
; RV32-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
273
- ; RV32-NEXT: vslideup.vx v10 , v9, a0
273
+ ; RV32-NEXT: vslideup.vx v11 , v9, a0
274
274
; RV32-NEXT: csrr a0, vlenb
275
275
; RV32-NEXT: add a0, sp, a0
276
276
; RV32-NEXT: addi a0, a0, 32
277
- ; RV32-NEXT: vs1r.v v10 , (a0) # Unknown-size Folded Spill
277
+ ; RV32-NEXT: vs1r.v v11 , (a0) # Unknown-size Folded Spill
278
278
; RV32-NEXT: li a1, 3
279
279
; RV32-NEXT: mv a0, s0
280
280
; RV32-NEXT: call __mulsi3
@@ -338,34 +338,34 @@ define {<2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>, <2 x i8>} @v
338
338
; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
339
339
; RV64-NEXT: csrr s1, vlenb
340
340
; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma
341
- ; RV64-NEXT: vslidedown.vi v10 , v8, 10
342
- ; RV64-NEXT: vslidedown.vi v11 , v8, 8
341
+ ; RV64-NEXT: vslidedown.vi v11 , v8, 10
342
+ ; RV64-NEXT: vslidedown.vi v10 , v8, 8
343
343
; RV64-NEXT: vslidedown.vi v9, v8, 2
344
344
; RV64-NEXT: srli s0, s1, 3
345
345
; RV64-NEXT: add a0, s0, s0
346
346
; RV64-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
347
- ; RV64-NEXT: vslideup.vx v11, v10 , s0
348
- ; RV64-NEXT: vmv1r.v v10 , v8
349
- ; RV64-NEXT: vslideup.vx v10 , v9, s0
347
+ ; RV64-NEXT: vslideup.vx v10, v11 , s0
348
+ ; RV64-NEXT: vmv1r.v v11 , v8
349
+ ; RV64-NEXT: vslideup.vx v11 , v9, s0
350
350
; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma
351
351
; RV64-NEXT: vslidedown.vi v9, v8, 12
352
352
; RV64-NEXT: srli a0, s1, 2
353
353
; RV64-NEXT: add a1, a0, s0
354
354
; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
355
- ; RV64-NEXT: vslideup.vx v11 , v9, a0
355
+ ; RV64-NEXT: vslideup.vx v10 , v9, a0
356
356
; RV64-NEXT: csrr a2, vlenb
357
357
; RV64-NEXT: slli a2, a2, 1
358
358
; RV64-NEXT: add a2, sp, a2
359
359
; RV64-NEXT: addi a2, a2, 32
360
- ; RV64-NEXT: vs1r.v v11 , (a2) # Unknown-size Folded Spill
360
+ ; RV64-NEXT: vs1r.v v10 , (a2) # Unknown-size Folded Spill
361
361
; RV64-NEXT: vsetivli zero, 2, e8, m1, ta, ma
362
362
; RV64-NEXT: vslidedown.vi v9, v8, 4
363
363
; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
364
- ; RV64-NEXT: vslideup.vx v10 , v9, a0
364
+ ; RV64-NEXT: vslideup.vx v11 , v9, a0
365
365
; RV64-NEXT: csrr a0, vlenb
366
366
; RV64-NEXT: add a0, sp, a0
367
367
; RV64-NEXT: addi a0, a0, 32
368
- ; RV64-NEXT: vs1r.v v10 , (a0) # Unknown-size Folded Spill
368
+ ; RV64-NEXT: vs1r.v v11 , (a0) # Unknown-size Folded Spill
369
369
; RV64-NEXT: li a1, 3
370
370
; RV64-NEXT: mv a0, s0
371
371
; RV64-NEXT: call __muldi3
0 commit comments