Skip to content

[X86][NFC] Reorgnize the X86Instr*.td #74454

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions llvm/lib/Target/X86/X86Instr3DNow.td
Original file line number Diff line number Diff line change
Expand Up @@ -79,16 +79,6 @@ let SchedRW = [WriteEMMS],
def FEMMS : I3DNow<0x0E, RawFrm, (outs), (ins), "femms",
[(int_x86_mmx_femms)]>, TB;

// PREFETCHWT1 is supported we want to use it for everything but T0.
def PrefetchWLevel : PatFrag<(ops), (i32 timm), [{
return N->getSExtValue() == 3 || !Subtarget->hasPREFETCHWT1();
}]>;

// Use PREFETCHWT1 for NTA, T2, T1.
def PrefetchWT1Level : TImmLeaf<i32, [{
return Imm < 3;
}]>;

let SchedRW = [WriteLoad] in {
let Predicates = [Has3DNow, NoSSEPrefetch] in
def PREFETCH : I3DNow<0x0D, MRM0m, (outs), (ins i8mem:$addr),
Expand Down
283 changes: 0 additions & 283 deletions llvm/lib/Target/X86/X86InstrAVX512.td

Large diffs are not rendered by default.

24 changes: 0 additions & 24 deletions llvm/lib/Target/X86/X86InstrArithmetic.td
Original file line number Diff line number Diff line change
Expand Up @@ -48,16 +48,6 @@ def PLEA64r : PseudoI<(outs GR64:$dst), (ins anymem:$src), []>;
// Fixed-Register Multiplication and Division Instructions.
//

// SchedModel info for instruction that loads one value and gets the second
// (and possibly third) value from a register.
// This is used for instructions that put the memory operands before other
// uses.
class SchedLoadReg<X86FoldableSchedWrite Sched> : Sched<[Sched.Folded,
// Memory operand.
ReadDefault, ReadDefault, ReadDefault, ReadDefault, ReadDefault,
// Register reads (implicit or explicit).
Sched.ReadAfterFold, Sched.ReadAfterFold]>;

// BinOpRR - Binary instructions with inputs "reg, reg".
class BinOpRR<bits<8> opcode, string mnemonic, X86TypeInfo typeinfo,
dag outlist, X86FoldableSchedWrite sched, list<dag> pattern>
Expand Down Expand Up @@ -506,17 +496,6 @@ class IMulOpRMI<bits<8> opcode, string mnemonic, X86TypeInfo info,
let ImmT = info.ImmEncoding;
}

def X86add_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
(X86add_flag node:$lhs, node:$rhs), [{
return hasNoCarryFlagUses(SDValue(N, 1));
}]>;

def X86sub_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
(X86sub_flag node:$lhs, node:$rhs), [{
// Only use DEC if the result is used.
return !SDValue(N, 0).use_empty() && hasNoCarryFlagUses(SDValue(N, 1));
}]>;

let Defs = [EFLAGS] in {
let Constraints = "$src1 = $dst", SchedRW = [WriteALU] in {
// Short forms only valid in 32-bit mode. Selected during MCInst lowering.
Expand Down Expand Up @@ -1221,9 +1200,6 @@ def : Pat<(store (X86adc_flag i64relocImmSExt32_su:$src, (load addr:$dst), EFLAG
// generate a result. From an encoding perspective, they are very different:
// they don't have all the usual imm8 and REV forms, and are encoded into a
// different space.
def X86testpat : PatFrag<(ops node:$lhs, node:$rhs),
(X86cmp (and_su node:$lhs, node:$rhs), 0)>;

let isCompare = 1 in {
let Defs = [EFLAGS] in {
let isCommutable = 1 in {
Expand Down
87 changes: 0 additions & 87 deletions llvm/lib/Target/X86/X86InstrCompiler.td
Original file line number Diff line number Diff line change
Expand Up @@ -786,16 +786,6 @@ defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;

def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs),
(X86lock_add node:$lhs, node:$rhs), [{
return hasNoCarryFlagUses(SDValue(N, 0));
}]>;

def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs),
(X86lock_sub node:$lhs, node:$rhs), [{
return hasNoCarryFlagUses(SDValue(N, 0));
}]>;

let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
SchedRW = [WriteALURMW] in {
let Predicates = [UseIncDec] in {
Expand Down Expand Up @@ -1304,31 +1294,6 @@ def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 tglobaladdr:$dst)),
// %r11. This happens when calling a vararg function with 6 arguments.
//
// Match an X86tcret that uses less than 7 volatile registers.
def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
(X86tcret node:$ptr, node:$off), [{
// X86tcret args: (*chain, ptr, imm, regs..., glue)
unsigned NumRegs = 0;
for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
return false;
return true;
}]>;

def X86tcret_1reg : PatFrag<(ops node:$ptr, node:$off),
(X86tcret node:$ptr, node:$off), [{
// X86tcret args: (*chain, ptr, imm, regs..., glue)
unsigned NumRegs = 1;
const SDValue& BasePtr = cast<LoadSDNode>(N->getOperand(1))->getBasePtr();
if (isa<FrameIndexSDNode>(BasePtr))
NumRegs = 3;
else if (BasePtr->getNumOperands() && isa<GlobalAddressSDNode>(BasePtr->getOperand(0)))
NumRegs = 3;
for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
if (isa<RegisterSDNode>(N->getOperand(i)) && ( NumRegs-- == 0))
return false;
return true;
}]>;

def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
(TCRETURNri ptr_rc_tailcall:$dst, timm:$off)>,
Requires<[Not64BitMode, NotUseIndirectThunkCalls]>;
Expand Down Expand Up @@ -1449,32 +1414,8 @@ def : Pat<(i64 (anyext GR16:$src)),
def : Pat<(i64 (anyext GR32:$src)),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;

// If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX
// instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move
// %ah to the lower byte of a register. By using a MOVSX here we allow a
// post-isel peephole to merge the two MOVSX instructions into one.
def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{
return (N->getOperand(0).getOpcode() == ISD::SDIVREM &&
N->getOperand(0).getResNo() == 1);
}]>;
def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>;

// Any instruction that defines a 32-bit result leaves the high half of the
// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
// be copying from a truncate. AssertSext/AssertZext/AssertAlign aren't saying
// anything about the upper 32 bits, they're probably just qualifying a
// CopyFromReg. FREEZE may be coming from a a truncate. Any other 32-bit
// operation will zero-extend up to 64 bits.
def def32 : PatLeaf<(i32 GR32:$src), [{
return N->getOpcode() != ISD::TRUNCATE &&
N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
N->getOpcode() != ISD::CopyFromReg &&
N->getOpcode() != ISD::AssertSext &&
N->getOpcode() != ISD::AssertZext &&
N->getOpcode() != ISD::AssertAlign &&
N->getOpcode() != ISD::FREEZE;
}]>;

// In the case of a 32-bit def that is known to implicitly zero-extend,
// we can use a SUBREG_TO_REG.
def : Pat<(i64 (zext def32:$src)),
Expand All @@ -1492,17 +1433,6 @@ def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)),
// generator to make the generated code easier to read. To do this, we select
// into "disjoint bits" pseudo ops.

// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());

KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
return (~Known0.Zero & ~Known1.Zero) == 0;
}]>;


// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
// Try this before the selecting to OR.
let SchedRW = [WriteALU] in {
Expand Down Expand Up @@ -1820,23 +1750,6 @@ def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;

def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
return isUnneededShiftMask(N, 3);
}]>;

def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
return isUnneededShiftMask(N, 4);
}]>;

def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
return isUnneededShiftMask(N, 5);
}]>;

def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
return isUnneededShiftMask(N, 6);
}]>;


// Shift amount is implicitly masked.
multiclass MaskedShiftAmountPats<SDNode frag, string name> {
// (shift x (and y, 31)) ==> (shift x, y)
Expand Down
121 changes: 0 additions & 121 deletions llvm/lib/Target/X86/X86InstrFPStack.td
Original file line number Diff line number Diff line change
Expand Up @@ -12,127 +12,6 @@
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// FPStack specific DAG Nodes.
//===----------------------------------------------------------------------===//

def SDTX86Fld : SDTypeProfile<1, 1, [SDTCisFP<0>,
SDTCisPtrTy<1>]>;
def SDTX86Fst : SDTypeProfile<0, 2, [SDTCisFP<0>,
SDTCisPtrTy<1>]>;
def SDTX86Fild : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisPtrTy<1>]>;
def SDTX86Fist : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisPtrTy<1>]>;

def SDTX86CwdStore : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def SDTX86CwdLoad : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
def SDTX86FPEnv : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;

def X86fp80_add : SDNode<"X86ISD::FP80_ADD", SDTFPBinOp, [SDNPCommutative]>;
def X86strict_fp80_add : SDNode<"X86ISD::STRICT_FP80_ADD", SDTFPBinOp,
[SDNPHasChain,SDNPCommutative]>;
def any_X86fp80_add : PatFrags<(ops node:$lhs, node:$rhs),
[(X86strict_fp80_add node:$lhs, node:$rhs),
(X86fp80_add node:$lhs, node:$rhs)]>;

def X86fld : SDNode<"X86ISD::FLD", SDTX86Fld,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def X86fst : SDNode<"X86ISD::FST", SDTX86Fst,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fild : SDNode<"X86ISD::FILD", SDTX86Fild,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def X86fist : SDNode<"X86ISD::FIST", SDTX86Fist,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fp_to_mem : SDNode<"X86ISD::FP_TO_INT_IN_MEM", SDTX86Fst,
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def X86fp_cwd_get16 : SDNode<"X86ISD::FNSTCW16m", SDTX86CwdStore,
[SDNPHasChain, SDNPMayStore, SDNPSideEffect,
SDNPMemOperand]>;
def X86fp_cwd_set16 : SDNode<"X86ISD::FLDCW16m", SDTX86CwdLoad,
[SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
SDNPMemOperand]>;
def X86fpenv_get : SDNode<"X86ISD::FNSTENVm", SDTX86FPEnv,
[SDNPHasChain, SDNPMayStore, SDNPSideEffect,
SDNPMemOperand]>;
def X86fpenv_set : SDNode<"X86ISD::FLDENVm", SDTX86FPEnv,
[SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
SDNPMemOperand]>;

def X86fstf32 : PatFrag<(ops node:$val, node:$ptr),
(X86fst node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
def X86fstf64 : PatFrag<(ops node:$val, node:$ptr),
(X86fst node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f64;
}]>;
def X86fstf80 : PatFrag<(ops node:$val, node:$ptr),
(X86fst node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f80;
}]>;

def X86fldf32 : PatFrag<(ops node:$ptr), (X86fld node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
def X86fldf64 : PatFrag<(ops node:$ptr), (X86fld node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f64;
}]>;
def X86fldf80 : PatFrag<(ops node:$ptr), (X86fld node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f80;
}]>;

def X86fild16 : PatFrag<(ops node:$ptr), (X86fild node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def X86fild32 : PatFrag<(ops node:$ptr), (X86fild node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def X86fild64 : PatFrag<(ops node:$ptr), (X86fild node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;

def X86fist32 : PatFrag<(ops node:$val, node:$ptr),
(X86fist node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;

def X86fist64 : PatFrag<(ops node:$val, node:$ptr),
(X86fist node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;

def X86fp_to_i16mem : PatFrag<(ops node:$val, node:$ptr),
(X86fp_to_mem node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def X86fp_to_i32mem : PatFrag<(ops node:$val, node:$ptr),
(X86fp_to_mem node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def X86fp_to_i64mem : PatFrag<(ops node:$val, node:$ptr),
(X86fp_to_mem node:$val, node:$ptr), [{
return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;

//===----------------------------------------------------------------------===//
// FPStack pattern fragments
//===----------------------------------------------------------------------===//

def fpimm0 : FPImmLeaf<fAny, [{
return Imm.isExactlyValue(+0.0);
}]>;

def fpimmneg0 : FPImmLeaf<fAny, [{
return Imm.isExactlyValue(-0.0);
}]>;

def fpimm1 : FPImmLeaf<fAny, [{
return Imm.isExactlyValue(+1.0);
}]>;

def fpimmneg1 : FPImmLeaf<fAny, [{
return Imm.isExactlyValue(-1.0);
}]>;

// Some 'special' instructions - expanded after instruction selection.
// Clobbers EFLAGS due to OR instruction used internally.
// FIXME: Can we model this in SelectionDAG?
Expand Down
Loading