@@ -413,6 +413,40 @@ def RISCVVIntrinsicsTable : GenericTable {
413
413
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
414
414
}
415
415
416
+ class RISCVVLX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
417
+ bits<1> Masked = M;
418
+ bits<1> Ordered = O;
419
+ bits<7> SEW = S;
420
+ bits<3> LMUL = L;
421
+ bits<3> IndexLMUL = IL;
422
+ Pseudo Pseudo = !cast<Pseudo>(NAME);
423
+ }
424
+
425
+ def RISCVVLXTable : GenericTable {
426
+ let FilterClass = "RISCVVLX";
427
+ let CppTypeName = "VLX_VSXPseudo";
428
+ let Fields = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
429
+ let PrimaryKey = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
430
+ let PrimaryKeyName = "getVLXPseudo";
431
+ }
432
+
433
+ class RISCVVSX<bit M, bit O, bits<7> S, bits<3> L, bits<3> IL> {
434
+ bits<1> Masked = M;
435
+ bits<1> Ordered = O;
436
+ bits<7> SEW = S;
437
+ bits<3> LMUL = L;
438
+ bits<3> IndexLMUL = IL;
439
+ Pseudo Pseudo = !cast<Pseudo>(NAME);
440
+ }
441
+
442
+ def RISCVVSXTable : GenericTable {
443
+ let FilterClass = "RISCVVSX";
444
+ let CppTypeName = "VLX_VSXPseudo";
445
+ let Fields = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL", "Pseudo"];
446
+ let PrimaryKey = ["Masked", "Ordered", "SEW", "LMUL", "IndexLMUL"];
447
+ let PrimaryKeyName = "getVSXPseudo";
448
+ }
449
+
416
450
class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<7> S, bits<3> L> {
417
451
bits<4> NF = N;
418
452
bits<1> Masked = M;
@@ -616,10 +650,12 @@ class VPseudoSLoadMask<VReg RetClass>:
616
650
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
617
651
}
618
652
619
- class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass>:
653
+ class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
654
+ bit Ordered>:
620
655
Pseudo<(outs RetClass:$rd),
621
656
(ins GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
622
- RISCVVPseudo {
657
+ RISCVVPseudo,
658
+ RISCVVLX</*Masked*/0, Ordered, EEW, VLMul, LMUL> {
623
659
let mayLoad = 1;
624
660
let mayStore = 0;
625
661
let hasSideEffects = 0;
@@ -631,12 +667,14 @@ class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass>:
631
667
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
632
668
}
633
669
634
- class VPseudoILoadMask<VReg RetClass, VReg IdxClass>:
670
+ class VPseudoILoadMask<VReg RetClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
671
+ bit Ordered>:
635
672
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
636
673
(ins GetVRegNoV0<RetClass>.R:$merge,
637
674
GPR:$rs1, IdxClass:$rs2,
638
675
VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
639
- RISCVVPseudo {
676
+ RISCVVPseudo,
677
+ RISCVVLX</*Masked*/1, Ordered, EEW, VLMul, LMUL> {
640
678
let mayLoad = 1;
641
679
let mayStore = 0;
642
680
let hasSideEffects = 0;
@@ -877,10 +915,12 @@ class VPseudoBinaryNoMask<VReg RetClass,
877
915
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
878
916
}
879
917
880
- class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass>:
918
+ class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
919
+ bit Ordered>:
881
920
Pseudo<(outs),
882
921
(ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
883
- RISCVVPseudo {
922
+ RISCVVPseudo,
923
+ RISCVVSX</*Masked*/0, Ordered, EEW, VLMul, LMUL> {
884
924
let mayLoad = 0;
885
925
let mayStore = 1;
886
926
let hasSideEffects = 0;
@@ -892,10 +932,12 @@ class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass>:
892
932
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
893
933
}
894
934
895
- class VPseudoIStoreMask<VReg StClass, VReg IdxClass>:
935
+ class VPseudoIStoreMask<VReg StClass, VReg IdxClass, bits<7> EEW, bits<3> LMUL,
936
+ bit Ordered>:
896
937
Pseudo<(outs),
897
938
(ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
898
- RISCVVPseudo {
939
+ RISCVVPseudo,
940
+ RISCVVSX</*Masked*/1, Ordered, EEW, VLMul, LMUL> {
899
941
let mayLoad = 0;
900
942
let mayStore = 1;
901
943
let hasSideEffects = 0;
@@ -1284,7 +1326,7 @@ multiclass VPseudoSLoad {
1284
1326
}
1285
1327
}
1286
1328
1287
- multiclass VPseudoILoad {
1329
+ multiclass VPseudoILoad<bit Ordered> {
1288
1330
foreach eew = EEWList in {
1289
1331
foreach sew = EEWList in {
1290
1332
foreach lmul = MxSet<sew>.m in {
@@ -1298,8 +1340,10 @@ multiclass VPseudoILoad {
1298
1340
defvar Vreg = lmul.vrclass;
1299
1341
defvar IdxVreg = idx_lmul.vrclass;
1300
1342
let VLMul = lmul.value in {
1301
- def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo : VPseudoILoadNoMask<Vreg, IdxVreg>;
1302
- def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoILoadMask<Vreg, IdxVreg>;
1343
+ def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1344
+ VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
1345
+ def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1346
+ VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
1303
1347
}
1304
1348
}
1305
1349
}
@@ -1341,7 +1385,7 @@ multiclass VPseudoSStore {
1341
1385
}
1342
1386
}
1343
1387
1344
- multiclass VPseudoIStore {
1388
+ multiclass VPseudoIStore<bit Ordered> {
1345
1389
foreach eew = EEWList in {
1346
1390
foreach sew = EEWList in {
1347
1391
foreach lmul = MxSet<sew>.m in {
@@ -1356,9 +1400,9 @@ multiclass VPseudoIStore {
1356
1400
defvar IdxVreg = idx_lmul.vrclass;
1357
1401
let VLMul = lmul.value in {
1358
1402
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
1359
- VPseudoIStoreNoMask<Vreg, IdxVreg>;
1403
+ VPseudoIStoreNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered >;
1360
1404
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
1361
- VPseudoIStoreMask<Vreg, IdxVreg>;
1405
+ VPseudoIStoreMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered >;
1362
1406
}
1363
1407
}
1364
1408
}
@@ -3263,10 +3307,10 @@ defm PseudoVSS : VPseudoSStore;
3263
3307
//===----------------------------------------------------------------------===//
3264
3308
3265
3309
// Vector Indexed Loads and Stores
3266
- defm PseudoVLUX : VPseudoILoad;
3267
- defm PseudoVLOX : VPseudoILoad;
3268
- defm PseudoVSOX : VPseudoIStore;
3269
- defm PseudoVSUX : VPseudoIStore;
3310
+ defm PseudoVLUX : VPseudoILoad</*Ordered=*/false> ;
3311
+ defm PseudoVLOX : VPseudoILoad</*Ordered=*/true> ;
3312
+ defm PseudoVSOX : VPseudoIStore</*Ordered=*/true> ;
3313
+ defm PseudoVSUX : VPseudoIStore</*Ordered=*/false> ;
3270
3314
3271
3315
//===----------------------------------------------------------------------===//
3272
3316
// 7.7. Unit-stride Fault-Only-First Loads
@@ -3844,45 +3888,6 @@ foreach vti = AllVectors in
3844
3888
vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
3845
3889
}
3846
3890
3847
- //===----------------------------------------------------------------------===//
3848
- // 7.6 Vector Indexed Instructions
3849
- //===----------------------------------------------------------------------===//
3850
-
3851
- foreach vti = AllVectors in
3852
- foreach eew = EEWList in {
3853
- defvar vlmul = vti.LMul;
3854
- defvar octuple_lmul = octuple_from_str<vti.LMul.MX>.ret;
3855
- defvar log_sew = shift_amount<vti.SEW>.val;
3856
- // The data vector register group has EEW=SEW, EMUL=LMUL, while the offset
3857
- // vector register group has EEW encoding in the instruction and EMUL=(EEW/SEW)*LMUL.
3858
- // calculate octuple elmul which is (eew * octuple_lmul) >> log_sew
3859
- defvar octuple_elmul = !srl(!mul(eew, octuple_lmul), log_sew);
3860
- // legal octuple elmul should be more than 0 and less than equal 64
3861
- if !gt(octuple_elmul, 0) then {
3862
- if !le(octuple_elmul, 64) then {
3863
- defvar elmul_str = octuple_to_str<octuple_elmul>.ret;
3864
- defvar elmul =!cast<LMULInfo>("V_" # elmul_str);
3865
- defvar idx_vti = !cast<VTypeInfo>("VI" # eew # elmul_str);
3866
-
3867
- defm : VPatILoad<"int_riscv_vluxei",
3868
- "PseudoVLUXEI"#eew,
3869
- vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
3870
- vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
3871
- defm : VPatILoad<"int_riscv_vloxei",
3872
- "PseudoVLOXEI"#eew,
3873
- vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
3874
- vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
3875
- defm : VPatIStore<"int_riscv_vsoxei",
3876
- "PseudoVSOXEI"#eew,
3877
- vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
3878
- vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
3879
- defm : VPatIStore<"int_riscv_vsuxei",
3880
- "PseudoVSUXEI"#eew,
3881
- vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
3882
- vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
3883
- }
3884
- }
3885
- }
3886
3891
} // Predicates = [HasStdExtV]
3887
3892
3888
3893
//===----------------------------------------------------------------------===//
0 commit comments