@@ -784,47 +784,47 @@ class VCMP<bits<10> xo, string asmstr, ValueType Ty>
784
784
: VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
785
785
IIC_VecFPCompare,
786
786
[(set Ty:$vD, (Ty (PPCvcmp Ty:$vA, Ty:$vB, xo)))]>;
787
- class VCMPo <bits<10> xo, string asmstr, ValueType Ty>
787
+ class VCMP_rec <bits<10> xo, string asmstr, ValueType Ty>
788
788
: VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
789
789
IIC_VecFPCompare,
790
- [(set Ty:$vD, (Ty (PPCvcmp_o Ty:$vA, Ty:$vB, xo)))]> {
790
+ [(set Ty:$vD, (Ty (PPCvcmp_rec Ty:$vA, Ty:$vB, xo)))]> {
791
791
let Defs = [CR6];
792
792
let RC = 1;
793
793
}
794
794
795
795
// f32 element comparisons.0
796
796
def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>;
797
- def VCMPBFP_rec : VCMPo <966, "vcmpbfp. $vD, $vA, $vB" , v4f32>;
797
+ def VCMPBFP_rec : VCMP_rec <966, "vcmpbfp. $vD, $vA, $vB" , v4f32>;
798
798
def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>;
799
- def VCMPEQFP_rec : VCMPo <198, "vcmpeqfp. $vD, $vA, $vB", v4f32>;
799
+ def VCMPEQFP_rec : VCMP_rec <198, "vcmpeqfp. $vD, $vA, $vB", v4f32>;
800
800
def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>;
801
- def VCMPGEFP_rec : VCMPo <454, "vcmpgefp. $vD, $vA, $vB", v4f32>;
801
+ def VCMPGEFP_rec : VCMP_rec <454, "vcmpgefp. $vD, $vA, $vB", v4f32>;
802
802
def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>;
803
- def VCMPGTFP_rec : VCMPo <710, "vcmpgtfp. $vD, $vA, $vB", v4f32>;
803
+ def VCMPGTFP_rec : VCMP_rec <710, "vcmpgtfp. $vD, $vA, $vB", v4f32>;
804
804
805
805
// i8 element comparisons.
806
806
def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>;
807
- def VCMPEQUB_rec : VCMPo < 6, "vcmpequb. $vD, $vA, $vB", v16i8>;
807
+ def VCMPEQUB_rec : VCMP_rec < 6, "vcmpequb. $vD, $vA, $vB", v16i8>;
808
808
def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>;
809
- def VCMPGTSB_rec : VCMPo <774, "vcmpgtsb. $vD, $vA, $vB", v16i8>;
809
+ def VCMPGTSB_rec : VCMP_rec <774, "vcmpgtsb. $vD, $vA, $vB", v16i8>;
810
810
def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>;
811
- def VCMPGTUB_rec : VCMPo <518, "vcmpgtub. $vD, $vA, $vB", v16i8>;
811
+ def VCMPGTUB_rec : VCMP_rec <518, "vcmpgtub. $vD, $vA, $vB", v16i8>;
812
812
813
813
// i16 element comparisons.
814
814
def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>;
815
- def VCMPEQUH_rec : VCMPo < 70, "vcmpequh. $vD, $vA, $vB", v8i16>;
815
+ def VCMPEQUH_rec : VCMP_rec < 70, "vcmpequh. $vD, $vA, $vB", v8i16>;
816
816
def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>;
817
- def VCMPGTSH_rec : VCMPo <838, "vcmpgtsh. $vD, $vA, $vB", v8i16>;
817
+ def VCMPGTSH_rec : VCMP_rec <838, "vcmpgtsh. $vD, $vA, $vB", v8i16>;
818
818
def VCMPGTUH : VCMP <582, "vcmpgtuh $vD, $vA, $vB" , v8i16>;
819
- def VCMPGTUH_rec : VCMPo <582, "vcmpgtuh. $vD, $vA, $vB", v8i16>;
819
+ def VCMPGTUH_rec : VCMP_rec <582, "vcmpgtuh. $vD, $vA, $vB", v8i16>;
820
820
821
821
// i32 element comparisons.
822
822
def VCMPEQUW : VCMP <134, "vcmpequw $vD, $vA, $vB" , v4i32>;
823
- def VCMPEQUW_rec : VCMPo <134, "vcmpequw. $vD, $vA, $vB", v4i32>;
823
+ def VCMPEQUW_rec : VCMP_rec <134, "vcmpequw. $vD, $vA, $vB", v4i32>;
824
824
def VCMPGTSW : VCMP <902, "vcmpgtsw $vD, $vA, $vB" , v4i32>;
825
- def VCMPGTSW_rec : VCMPo <902, "vcmpgtsw. $vD, $vA, $vB", v4i32>;
825
+ def VCMPGTSW_rec : VCMP_rec <902, "vcmpgtsw. $vD, $vA, $vB", v4i32>;
826
826
def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>;
827
- def VCMPGTUW_rec : VCMPo <646, "vcmpgtuw. $vD, $vA, $vB", v4i32>;
827
+ def VCMPGTUW_rec : VCMP_rec <646, "vcmpgtuw. $vD, $vA, $vB", v4i32>;
828
828
829
829
let isCodeGenOnly = 1, isMoveImm = 1, isAsCheapAsAMove = 1,
830
830
isReMaterializable = 1 in {
@@ -1291,11 +1291,11 @@ def VORC : VXForm_1<1348, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1291
1291
1292
1292
// i64 element comparisons.
1293
1293
def VCMPEQUD : VCMP <199, "vcmpequd $vD, $vA, $vB" , v2i64>;
1294
- def VCMPEQUD_rec : VCMPo <199, "vcmpequd. $vD, $vA, $vB", v2i64>;
1294
+ def VCMPEQUD_rec : VCMP_rec <199, "vcmpequd. $vD, $vA, $vB", v2i64>;
1295
1295
def VCMPGTSD : VCMP <967, "vcmpgtsd $vD, $vA, $vB" , v2i64>;
1296
- def VCMPGTSD_rec : VCMPo <967, "vcmpgtsd. $vD, $vA, $vB", v2i64>;
1296
+ def VCMPGTSD_rec : VCMP_rec <967, "vcmpgtsd. $vD, $vA, $vB", v2i64>;
1297
1297
def VCMPGTUD : VCMP <711, "vcmpgtud $vD, $vA, $vB" , v2i64>;
1298
- def VCMPGTUD_rec : VCMPo <711, "vcmpgtud. $vD, $vA, $vB", v2i64>;
1298
+ def VCMPGTUD_rec : VCMP_rec <711, "vcmpgtud. $vD, $vA, $vB", v2i64>;
1299
1299
1300
1300
// The cryptography instructions that do not require Category:Vector.Crypto
1301
1301
def VPMSUMB : VX1_Int_Ty<1032, "vpmsumb",
@@ -1363,21 +1363,21 @@ def VMSUMUDM : VA1a_Int_Ty3<35, "vmsumudm", int_ppc_altivec_vmsumudm,
1363
1363
1364
1364
// i8 element comparisons.
1365
1365
def VCMPNEB : VCMP < 7, "vcmpneb $vD, $vA, $vB" , v16i8>;
1366
- def VCMPNEB_rec : VCMPo < 7, "vcmpneb. $vD, $vA, $vB" , v16i8>;
1366
+ def VCMPNEB_rec : VCMP_rec < 7, "vcmpneb. $vD, $vA, $vB" , v16i8>;
1367
1367
def VCMPNEZB : VCMP <263, "vcmpnezb $vD, $vA, $vB" , v16i8>;
1368
- def VCMPNEZB_rec : VCMPo <263, "vcmpnezb. $vD, $vA, $vB", v16i8>;
1368
+ def VCMPNEZB_rec : VCMP_rec <263, "vcmpnezb. $vD, $vA, $vB", v16i8>;
1369
1369
1370
1370
// i16 element comparisons.
1371
1371
def VCMPNEH : VCMP < 71, "vcmpneh $vD, $vA, $vB" , v8i16>;
1372
- def VCMPNEH_rec : VCMPo < 71, "vcmpneh. $vD, $vA, $vB" , v8i16>;
1372
+ def VCMPNEH_rec : VCMP_rec < 71, "vcmpneh. $vD, $vA, $vB" , v8i16>;
1373
1373
def VCMPNEZH : VCMP <327, "vcmpnezh $vD, $vA, $vB" , v8i16>;
1374
- def VCMPNEZH_rec : VCMPo <327, "vcmpnezh. $vD, $vA, $vB", v8i16>;
1374
+ def VCMPNEZH_rec : VCMP_rec <327, "vcmpnezh. $vD, $vA, $vB", v8i16>;
1375
1375
1376
1376
// i32 element comparisons.
1377
1377
def VCMPNEW : VCMP <135, "vcmpnew $vD, $vA, $vB" , v4i32>;
1378
- def VCMPNEW_rec : VCMPo <135, "vcmpnew. $vD, $vA, $vB" , v4i32>;
1378
+ def VCMPNEW_rec : VCMP_rec <135, "vcmpnew. $vD, $vA, $vB" , v4i32>;
1379
1379
def VCMPNEZW : VCMP <391, "vcmpnezw $vD, $vA, $vB" , v4i32>;
1380
- def VCMPNEZW_rec : VCMPo <391, "vcmpnezw. $vD, $vA, $vB", v4i32>;
1380
+ def VCMPNEZW_rec : VCMP_rec <391, "vcmpnezw. $vD, $vA, $vB", v4i32>;
1381
1381
1382
1382
// VX-Form: [PO VRT / UIM VRB XO].
1383
1383
// We use VXForm_1 to implement it, that is, we use "VRA" (5 bit) to represent
0 commit comments