@@ -786,16 +786,6 @@ defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
786
786
defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
787
787
defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
788
788
789
- def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs),
790
- (X86lock_add node:$lhs, node:$rhs), [{
791
- return hasNoCarryFlagUses(SDValue(N, 0));
792
- }]>;
793
-
794
- def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs),
795
- (X86lock_sub node:$lhs, node:$rhs), [{
796
- return hasNoCarryFlagUses(SDValue(N, 0));
797
- }]>;
798
-
799
789
let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
800
790
SchedRW = [WriteALURMW] in {
801
791
let Predicates = [UseIncDec] in {
@@ -1304,31 +1294,6 @@ def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 tglobaladdr:$dst)),
1304
1294
// %r11. This happens when calling a vararg function with 6 arguments.
1305
1295
//
1306
1296
// Match an X86tcret that uses less than 7 volatile registers.
1307
- def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1308
- (X86tcret node:$ptr, node:$off), [{
1309
- // X86tcret args: (*chain, ptr, imm, regs..., glue)
1310
- unsigned NumRegs = 0;
1311
- for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1312
- if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1313
- return false;
1314
- return true;
1315
- }]>;
1316
-
1317
- def X86tcret_1reg : PatFrag<(ops node:$ptr, node:$off),
1318
- (X86tcret node:$ptr, node:$off), [{
1319
- // X86tcret args: (*chain, ptr, imm, regs..., glue)
1320
- unsigned NumRegs = 1;
1321
- const SDValue& BasePtr = cast<LoadSDNode>(N->getOperand(1))->getBasePtr();
1322
- if (isa<FrameIndexSDNode>(BasePtr))
1323
- NumRegs = 3;
1324
- else if (BasePtr->getNumOperands() && isa<GlobalAddressSDNode>(BasePtr->getOperand(0)))
1325
- NumRegs = 3;
1326
- for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1327
- if (isa<RegisterSDNode>(N->getOperand(i)) && ( NumRegs-- == 0))
1328
- return false;
1329
- return true;
1330
- }]>;
1331
-
1332
1297
def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1333
1298
(TCRETURNri ptr_rc_tailcall:$dst, timm:$off)>,
1334
1299
Requires<[Not64BitMode, NotUseIndirectThunkCalls]>;
@@ -1449,32 +1414,8 @@ def : Pat<(i64 (anyext GR16:$src)),
1449
1414
def : Pat<(i64 (anyext GR32:$src)),
1450
1415
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;
1451
1416
1452
- // If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX
1453
- // instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move
1454
- // %ah to the lower byte of a register. By using a MOVSX here we allow a
1455
- // post-isel peephole to merge the two MOVSX instructions into one.
1456
- def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{
1457
- return (N->getOperand(0).getOpcode() == ISD::SDIVREM &&
1458
- N->getOperand(0).getResNo() == 1);
1459
- }]>;
1460
1417
def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>;
1461
1418
1462
- // Any instruction that defines a 32-bit result leaves the high half of the
1463
- // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1464
- // be copying from a truncate. AssertSext/AssertZext/AssertAlign aren't saying
1465
- // anything about the upper 32 bits, they're probably just qualifying a
1466
- // CopyFromReg. FREEZE may be coming from a a truncate. Any other 32-bit
1467
- // operation will zero-extend up to 64 bits.
1468
- def def32 : PatLeaf<(i32 GR32:$src), [{
1469
- return N->getOpcode() != ISD::TRUNCATE &&
1470
- N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1471
- N->getOpcode() != ISD::CopyFromReg &&
1472
- N->getOpcode() != ISD::AssertSext &&
1473
- N->getOpcode() != ISD::AssertZext &&
1474
- N->getOpcode() != ISD::AssertAlign &&
1475
- N->getOpcode() != ISD::FREEZE;
1476
- }]>;
1477
-
1478
1419
// In the case of a 32-bit def that is known to implicitly zero-extend,
1479
1420
// we can use a SUBREG_TO_REG.
1480
1421
def : Pat<(i64 (zext def32:$src)),
@@ -1492,17 +1433,6 @@ def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)),
1492
1433
// generator to make the generated code easier to read. To do this, we select
1493
1434
// into "disjoint bits" pseudo ops.
1494
1435
1495
- // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1496
- def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1497
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1498
- return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1499
-
1500
- KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
1501
- KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
1502
- return (~Known0.Zero & ~Known1.Zero) == 0;
1503
- }]>;
1504
-
1505
-
1506
1436
// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1507
1437
// Try this before the selecting to OR.
1508
1438
let SchedRW = [WriteALU] in {
@@ -1820,23 +1750,6 @@ def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1820
1750
def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1821
1751
def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1822
1752
1823
- def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1824
- return isUnneededShiftMask(N, 3);
1825
- }]>;
1826
-
1827
- def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1828
- return isUnneededShiftMask(N, 4);
1829
- }]>;
1830
-
1831
- def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1832
- return isUnneededShiftMask(N, 5);
1833
- }]>;
1834
-
1835
- def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1836
- return isUnneededShiftMask(N, 6);
1837
- }]>;
1838
-
1839
-
1840
1753
// Shift amount is implicitly masked.
1841
1754
multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1842
1755
// (shift x (and y, 31)) ==> (shift x, y)
0 commit comments