@@ -1081,10 +1081,10 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
1081
1081
; RV32-NEXT: slli a0, a0, 4
1082
1082
; RV32-NEXT: add a0, sp, a0
1083
1083
; RV32-NEXT: addi a0, a0, 16
1084
- ; RV32-NEXT: vl8re8 .v v24, (a0) # Unknown-size Folded Reload
1084
+ ; RV32-NEXT: vl8r .v v24, (a0) # Unknown-size Folded Reload
1085
1085
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
1086
1086
; RV32-NEXT: addi a0, sp, 16
1087
- ; RV32-NEXT: vl8re8 .v v24, (a0) # Unknown-size Folded Reload
1087
+ ; RV32-NEXT: vl8r .v v24, (a0) # Unknown-size Folded Reload
1088
1088
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
1089
1089
; RV32-NEXT: csrr a0, vlenb
1090
1090
; RV32-NEXT: slli a0, a0, 4
@@ -1104,17 +1104,17 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
1104
1104
; RV32-NEXT: slli a0, a0, 3
1105
1105
; RV32-NEXT: add a0, sp, a0
1106
1106
; RV32-NEXT: addi a0, a0, 16
1107
- ; RV32-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1107
+ ; RV32-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1108
1108
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
1109
1109
; RV32-NEXT: vor.vv v8, v8, v24, v0.t
1110
1110
; RV32-NEXT: addi a0, sp, 16
1111
- ; RV32-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1111
+ ; RV32-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1112
1112
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
1113
1113
; RV32-NEXT: csrr a0, vlenb
1114
1114
; RV32-NEXT: slli a0, a0, 4
1115
1115
; RV32-NEXT: add a0, sp, a0
1116
1116
; RV32-NEXT: addi a0, a0, 16
1117
- ; RV32-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1117
+ ; RV32-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1118
1118
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
1119
1119
; RV32-NEXT: csrr a0, vlenb
1120
1120
; RV32-NEXT: li a1, 24
@@ -1151,7 +1151,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
1151
1151
; RV64-NEXT: vsll.vx v16, v16, a4, v0.t
1152
1152
; RV64-NEXT: vor.vv v16, v24, v16, v0.t
1153
1153
; RV64-NEXT: addi a5, sp, 16
1154
- ; RV64-NEXT: vl8re8 .v v24, (a5) # Unknown-size Folded Reload
1154
+ ; RV64-NEXT: vl8r .v v24, (a5) # Unknown-size Folded Reload
1155
1155
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
1156
1156
; RV64-NEXT: addi a5, sp, 16
1157
1157
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
@@ -1166,7 +1166,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
1166
1166
; RV64-NEXT: vor.vv v8, v8, v16, v0.t
1167
1167
; RV64-NEXT: vor.vv v8, v8, v24, v0.t
1168
1168
; RV64-NEXT: addi a0, sp, 16
1169
- ; RV64-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1169
+ ; RV64-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1170
1170
; RV64-NEXT: vor.vv v8, v16, v8, v0.t
1171
1171
; RV64-NEXT: csrr a0, vlenb
1172
1172
; RV64-NEXT: slli a0, a0, 3
@@ -1211,7 +1211,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
1211
1211
; RV32-NEXT: vsll.vi v24, v24, 8
1212
1212
; RV32-NEXT: vor.vv v24, v0, v24
1213
1213
; RV32-NEXT: addi a0, sp, 16
1214
- ; RV32-NEXT: vl8re8 .v v0, (a0) # Unknown-size Folded Reload
1214
+ ; RV32-NEXT: vl8r .v v0, (a0) # Unknown-size Folded Reload
1215
1215
; RV32-NEXT: vor.vv v24, v0, v24
1216
1216
; RV32-NEXT: addi a0, sp, 16
1217
1217
; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
@@ -1226,7 +1226,7 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
1226
1226
; RV32-NEXT: vor.vv v8, v16, v8
1227
1227
; RV32-NEXT: vor.vv v8, v8, v24
1228
1228
; RV32-NEXT: addi a0, sp, 16
1229
- ; RV32-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1229
+ ; RV32-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1230
1230
; RV32-NEXT: vor.vv v8, v16, v8
1231
1231
; RV32-NEXT: csrr a0, vlenb
1232
1232
; RV32-NEXT: slli a0, a0, 3
@@ -1321,10 +1321,10 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
1321
1321
; RV32-NEXT: slli a0, a0, 4
1322
1322
; RV32-NEXT: add a0, sp, a0
1323
1323
; RV32-NEXT: addi a0, a0, 16
1324
- ; RV32-NEXT: vl8re8 .v v24, (a0) # Unknown-size Folded Reload
1324
+ ; RV32-NEXT: vl8r .v v24, (a0) # Unknown-size Folded Reload
1325
1325
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
1326
1326
; RV32-NEXT: addi a0, sp, 16
1327
- ; RV32-NEXT: vl8re8 .v v24, (a0) # Unknown-size Folded Reload
1327
+ ; RV32-NEXT: vl8r .v v24, (a0) # Unknown-size Folded Reload
1328
1328
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
1329
1329
; RV32-NEXT: csrr a0, vlenb
1330
1330
; RV32-NEXT: slli a0, a0, 4
@@ -1344,17 +1344,17 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
1344
1344
; RV32-NEXT: slli a0, a0, 3
1345
1345
; RV32-NEXT: add a0, sp, a0
1346
1346
; RV32-NEXT: addi a0, a0, 16
1347
- ; RV32-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1347
+ ; RV32-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1348
1348
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
1349
1349
; RV32-NEXT: vor.vv v8, v8, v24, v0.t
1350
1350
; RV32-NEXT: addi a0, sp, 16
1351
- ; RV32-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1351
+ ; RV32-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1352
1352
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
1353
1353
; RV32-NEXT: csrr a0, vlenb
1354
1354
; RV32-NEXT: slli a0, a0, 4
1355
1355
; RV32-NEXT: add a0, sp, a0
1356
1356
; RV32-NEXT: addi a0, a0, 16
1357
- ; RV32-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1357
+ ; RV32-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1358
1358
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
1359
1359
; RV32-NEXT: csrr a0, vlenb
1360
1360
; RV32-NEXT: li a1, 24
@@ -1391,7 +1391,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
1391
1391
; RV64-NEXT: vsll.vx v16, v16, a4, v0.t
1392
1392
; RV64-NEXT: vor.vv v16, v24, v16, v0.t
1393
1393
; RV64-NEXT: addi a5, sp, 16
1394
- ; RV64-NEXT: vl8re8 .v v24, (a5) # Unknown-size Folded Reload
1394
+ ; RV64-NEXT: vl8r .v v24, (a5) # Unknown-size Folded Reload
1395
1395
; RV64-NEXT: vor.vv v16, v16, v24, v0.t
1396
1396
; RV64-NEXT: addi a5, sp, 16
1397
1397
; RV64-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
@@ -1406,7 +1406,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
1406
1406
; RV64-NEXT: vor.vv v8, v8, v16, v0.t
1407
1407
; RV64-NEXT: vor.vv v8, v8, v24, v0.t
1408
1408
; RV64-NEXT: addi a0, sp, 16
1409
- ; RV64-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1409
+ ; RV64-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1410
1410
; RV64-NEXT: vor.vv v8, v16, v8, v0.t
1411
1411
; RV64-NEXT: csrr a0, vlenb
1412
1412
; RV64-NEXT: slli a0, a0, 3
@@ -1451,7 +1451,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
1451
1451
; RV32-NEXT: vsll.vi v24, v24, 8
1452
1452
; RV32-NEXT: vor.vv v24, v0, v24
1453
1453
; RV32-NEXT: addi a0, sp, 16
1454
- ; RV32-NEXT: vl8re8 .v v0, (a0) # Unknown-size Folded Reload
1454
+ ; RV32-NEXT: vl8r .v v0, (a0) # Unknown-size Folded Reload
1455
1455
; RV32-NEXT: vor.vv v24, v0, v24
1456
1456
; RV32-NEXT: addi a0, sp, 16
1457
1457
; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
@@ -1466,7 +1466,7 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
1466
1466
; RV32-NEXT: vor.vv v8, v16, v8
1467
1467
; RV32-NEXT: vor.vv v8, v8, v24
1468
1468
; RV32-NEXT: addi a0, sp, 16
1469
- ; RV32-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1469
+ ; RV32-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1470
1470
; RV32-NEXT: vor.vv v8, v16, v8
1471
1471
; RV32-NEXT: csrr a0, vlenb
1472
1472
; RV32-NEXT: slli a0, a0, 3
@@ -1555,12 +1555,12 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
1555
1555
; CHECK-NEXT: slli a0, a0, 3
1556
1556
; CHECK-NEXT: add a0, sp, a0
1557
1557
; CHECK-NEXT: addi a0, a0, 16
1558
- ; CHECK-NEXT: vl8re8 .v v8, (a0) # Unknown-size Folded Reload
1558
+ ; CHECK-NEXT: vl8r .v v8, (a0) # Unknown-size Folded Reload
1559
1559
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
1560
1560
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
1561
1561
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
1562
1562
; CHECK-NEXT: addi a0, sp, 16
1563
- ; CHECK-NEXT: vl8re8 .v v16, (a0) # Unknown-size Folded Reload
1563
+ ; CHECK-NEXT: vl8r .v v16, (a0) # Unknown-size Folded Reload
1564
1564
; CHECK-NEXT: csrr a0, vlenb
1565
1565
; CHECK-NEXT: slli a0, a0, 4
1566
1566
; CHECK-NEXT: add sp, sp, a0
0 commit comments