-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[X86] Use MCRegister in more places. NFC #108682
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@llvm/pr-subscribers-backend-x86 Author: Craig Topper (topperc) ChangesPatch is 27.37 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/108682.diff 11 Files Affected:
diff --git a/llvm/lib/Target/X86/AsmParser/X86Operand.h b/llvm/lib/Target/X86/AsmParser/X86Operand.h
index 78669784dd035b..000278538e398b 100644
--- a/llvm/lib/Target/X86/AsmParser/X86Operand.h
+++ b/llvm/lib/Target/X86/AsmParser/X86Operand.h
@@ -47,7 +47,7 @@ struct X86Operand final : public MCParsedAsmOperand {
};
struct RegOp {
- unsigned RegNo;
+ MCRegister RegNo;
};
struct PrefOp {
@@ -60,11 +60,11 @@ struct X86Operand final : public MCParsedAsmOperand {
};
struct MemOp {
- unsigned SegReg;
+ MCRegister SegReg;
const MCExpr *Disp;
- unsigned BaseReg;
- unsigned DefaultBaseReg;
- unsigned IndexReg;
+ MCRegister BaseReg;
+ MCRegister DefaultBaseReg;
+ MCRegister IndexReg;
unsigned Scale;
unsigned Size;
unsigned ModeSize;
@@ -186,19 +186,19 @@ struct X86Operand final : public MCParsedAsmOperand {
assert(Kind == Memory && "Invalid access!");
return Mem.Disp;
}
- unsigned getMemSegReg() const {
+ MCRegister getMemSegReg() const {
assert(Kind == Memory && "Invalid access!");
return Mem.SegReg;
}
- unsigned getMemBaseReg() const {
+ MCRegister getMemBaseReg() const {
assert(Kind == Memory && "Invalid access!");
return Mem.BaseReg;
}
- unsigned getMemDefaultBaseReg() const {
+ MCRegister getMemDefaultBaseReg() const {
assert(Kind == Memory && "Invalid access!");
return Mem.DefaultBaseReg;
}
- unsigned getMemIndexReg() const {
+ MCRegister getMemIndexReg() const {
assert(Kind == Memory && "Invalid access!");
return Mem.IndexReg;
}
@@ -600,8 +600,8 @@ struct X86Operand final : public MCParsedAsmOperand {
void addMaskPairOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- unsigned Reg = getReg();
- switch (Reg) {
+ MCRegister Reg = getReg();
+ switch (Reg.id()) {
case X86::K0:
case X86::K1:
Reg = X86::K0_K1;
@@ -673,11 +673,11 @@ struct X86Operand final : public MCParsedAsmOperand {
}
static std::unique_ptr<X86Operand>
- CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
+ CreateReg(MCRegister Reg, SMLoc StartLoc, SMLoc EndLoc,
bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
StringRef SymName = StringRef(), void *OpDecl = nullptr) {
auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc);
- Res->Reg.RegNo = RegNo;
+ Res->Reg.RegNo = Reg;
Res->AddressOf = AddressOf;
Res->OffsetOfLoc = OffsetOfLoc;
Res->SymName = SymName;
@@ -718,11 +718,11 @@ struct X86Operand final : public MCParsedAsmOperand {
void *OpDecl = nullptr, unsigned FrontendSize = 0,
bool UseUpRegs = false, bool MaybeDirectBranchDest = true) {
auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
- Res->Mem.SegReg = 0;
+ Res->Mem.SegReg = MCRegister();
Res->Mem.Disp = Disp;
- Res->Mem.BaseReg = 0;
- Res->Mem.DefaultBaseReg = 0;
- Res->Mem.IndexReg = 0;
+ Res->Mem.BaseReg = MCRegister();
+ Res->Mem.DefaultBaseReg = MCRegister();
+ Res->Mem.IndexReg = MCRegister();
Res->Mem.Scale = 1;
Res->Mem.Size = Size;
Res->Mem.ModeSize = ModeSize;
@@ -737,10 +737,10 @@ struct X86Operand final : public MCParsedAsmOperand {
/// Create a generalized memory operand.
static std::unique_ptr<X86Operand>
- CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
- unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
- SMLoc EndLoc, unsigned Size = 0,
- unsigned DefaultBaseReg = X86::NoRegister,
+ CreateMem(unsigned ModeSize, MCRegister SegReg, const MCExpr *Disp,
+ MCRegister BaseReg, MCRegister IndexReg, unsigned Scale,
+ SMLoc StartLoc, SMLoc EndLoc, unsigned Size = 0,
+ MCRegister DefaultBaseReg = MCRegister(),
StringRef SymName = StringRef(), void *OpDecl = nullptr,
unsigned FrontendSize = 0, bool UseUpRegs = false,
bool MaybeDirectBranchDest = true) {
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
index cb34b56fbb07a7..58b4527af6557b 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86ATTInstPrinter.cpp
@@ -516,8 +516,7 @@ void X86ATTInstPrinter::printU8Imm(const MCInst *MI, unsigned Op,
void X86ATTInstPrinter::printSTiRegOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &OS) {
- const MCOperand &Op = MI->getOperand(OpNo);
- unsigned Reg = Op.getReg();
+ MCRegister Reg = MI->getOperand(OpNo).getReg();
// Override the default printing to print st(0) instead st.
if (Reg == X86::ST0)
markup(OS, Markup::Register) << "%st(0)";
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 82ada2559837e1..87b46a3f55e771 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -262,7 +262,7 @@ static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
if (MemoryOperand < 0)
return false;
unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
- unsigned BaseReg = MI.getOperand(BaseRegNum).getReg();
+ MCRegister BaseReg = MI.getOperand(BaseRegNum).getReg();
return (BaseReg == X86::RIP);
}
@@ -302,7 +302,7 @@ uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
if (MemoryOperand != -1)
MemoryOperand += X86II::getOperandBias(Desc);
- unsigned SegmentReg = 0;
+ MCRegister SegmentReg;
if (MemoryOperand >= 0) {
// Check for explicit segment override on memory operand.
SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
@@ -338,7 +338,7 @@ uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
if (MemoryOperand >= 0) {
unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
- unsigned BaseReg = Inst.getOperand(BaseRegNum).getReg();
+ MCRegister BaseReg = Inst.getOperand(BaseRegNum).getReg();
if (BaseReg == X86::ESP || BaseReg == X86::EBP)
return X86::SS_Encoding;
}
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index a3af9affa5fd0a..569484704a249f 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -329,8 +329,8 @@ enum EncodingOfSegmentOverridePrefix : uint8_t {
/// Given a segment register, return the encoding of the segment override
/// prefix for it.
inline EncodingOfSegmentOverridePrefix
-getSegmentOverridePrefixForReg(unsigned Reg) {
- switch (Reg) {
+getSegmentOverridePrefixForReg(MCRegister Reg) {
+ switch (Reg.id()) {
default:
llvm_unreachable("Unknown segment register!");
case X86::CS:
@@ -1156,52 +1156,52 @@ inline int getMemoryOperandNo(uint64_t TSFlags) {
}
/// \returns true if the register is a XMM.
-inline bool isXMMReg(unsigned RegNo) {
+inline bool isXMMReg(MCRegister Reg) {
static_assert(X86::XMM15 - X86::XMM0 == 15,
"XMM0-15 registers are not continuous");
static_assert(X86::XMM31 - X86::XMM16 == 15,
"XMM16-31 registers are not continuous");
- return (RegNo >= X86::XMM0 && RegNo <= X86::XMM15) ||
- (RegNo >= X86::XMM16 && RegNo <= X86::XMM31);
+ return (Reg >= X86::XMM0 && Reg <= X86::XMM15) ||
+ (Reg >= X86::XMM16 && Reg <= X86::XMM31);
}
/// \returns true if the register is a YMM.
-inline bool isYMMReg(unsigned RegNo) {
+inline bool isYMMReg(MCRegister Reg) {
static_assert(X86::YMM15 - X86::YMM0 == 15,
"YMM0-15 registers are not continuous");
static_assert(X86::YMM31 - X86::YMM16 == 15,
"YMM16-31 registers are not continuous");
- return (RegNo >= X86::YMM0 && RegNo <= X86::YMM15) ||
- (RegNo >= X86::YMM16 && RegNo <= X86::YMM31);
+ return (Reg >= X86::YMM0 && Reg <= X86::YMM15) ||
+ (Reg >= X86::YMM16 && Reg <= X86::YMM31);
}
/// \returns true if the register is a ZMM.
-inline bool isZMMReg(unsigned RegNo) {
+inline bool isZMMReg(MCRegister Reg) {
static_assert(X86::ZMM31 - X86::ZMM0 == 31,
"ZMM registers are not continuous");
- return RegNo >= X86::ZMM0 && RegNo <= X86::ZMM31;
+ return Reg >= X86::ZMM0 && Reg <= X86::ZMM31;
}
-/// \returns true if \p RegNo is an apx extended register.
-inline bool isApxExtendedReg(unsigned RegNo) {
+/// \returns true if \p Reg is an apx extended register.
+inline bool isApxExtendedReg(MCRegister Reg) {
static_assert(X86::R31WH - X86::R16 == 95, "EGPRs are not continuous");
- return RegNo >= X86::R16 && RegNo <= X86::R31WH;
+ return Reg >= X86::R16 && Reg <= X86::R31WH;
}
/// \returns true if the MachineOperand is a x86-64 extended (r8 or
/// higher) register, e.g. r8, xmm8, xmm13, etc.
-inline bool isX86_64ExtendedReg(unsigned RegNo) {
- if ((RegNo >= X86::XMM8 && RegNo <= X86::XMM15) ||
- (RegNo >= X86::XMM16 && RegNo <= X86::XMM31) ||
- (RegNo >= X86::YMM8 && RegNo <= X86::YMM15) ||
- (RegNo >= X86::YMM16 && RegNo <= X86::YMM31) ||
- (RegNo >= X86::ZMM8 && RegNo <= X86::ZMM31))
+inline bool isX86_64ExtendedReg(MCRegister Reg) {
+ if ((Reg >= X86::XMM8 && Reg <= X86::XMM15) ||
+ (Reg >= X86::XMM16 && Reg <= X86::XMM31) ||
+ (Reg >= X86::YMM8 && Reg <= X86::YMM15) ||
+ (Reg >= X86::YMM16 && Reg <= X86::YMM31) ||
+ (Reg >= X86::ZMM8 && Reg <= X86::ZMM31))
return true;
- if (isApxExtendedReg(RegNo))
+ if (isApxExtendedReg(Reg))
return true;
- switch (RegNo) {
+ switch (Reg.id()) {
default:
break;
case X86::R8:
@@ -1299,15 +1299,15 @@ inline bool canUseApxExtendedReg(const MCInstrDesc &Desc) {
/// \returns true if the MemoryOperand is a 32 extended (zmm16 or higher)
/// registers, e.g. zmm21, etc.
-static inline bool is32ExtendedReg(unsigned RegNo) {
- return ((RegNo >= X86::XMM16 && RegNo <= X86::XMM31) ||
- (RegNo >= X86::YMM16 && RegNo <= X86::YMM31) ||
- (RegNo >= X86::ZMM16 && RegNo <= X86::ZMM31));
+static inline bool is32ExtendedReg(MCRegister Reg) {
+ return ((Reg >= X86::XMM16 && Reg <= X86::XMM31) ||
+ (Reg >= X86::YMM16 && Reg <= X86::YMM31) ||
+ (Reg >= X86::ZMM16 && Reg <= X86::ZMM31));
}
-inline bool isX86_64NonExtLowByteReg(unsigned reg) {
- return (reg == X86::SPL || reg == X86::BPL || reg == X86::SIL ||
- reg == X86::DIL);
+inline bool isX86_64NonExtLowByteReg(MCRegister Reg) {
+ return (Reg == X86::SPL || Reg == X86::BPL || Reg == X86::SIL ||
+ Reg == X86::DIL);
}
/// \returns true if this is a masked instruction.
@@ -1321,7 +1321,7 @@ inline bool isKMergeMasked(uint64_t TSFlags) {
}
/// \returns true if the intruction needs a SIB.
-inline bool needSIB(unsigned BaseReg, unsigned IndexReg, bool In64BitMode) {
+inline bool needSIB(MCRegister BaseReg, MCRegister IndexReg, bool In64BitMode) {
// The SIB byte must be used if there is an index register.
if (IndexReg)
return true;
@@ -1329,7 +1329,7 @@ inline bool needSIB(unsigned BaseReg, unsigned IndexReg, bool In64BitMode) {
// The SIB byte must be used if the base is ESP/RSP/R12/R20/R28, all of
// which encode to an R/M value of 4, which indicates that a SIB byte is
// present.
- switch (BaseReg) {
+ switch (BaseReg.id()) {
default:
// If there is no base register and we're in 64-bit mode, we need a SIB
// byte to emit an addr that is just 'disp32' (the non-RIP relative form).
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
index f97777f6341f3f..ad7fdd7f637732 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp
@@ -329,7 +329,7 @@ bool X86::optimizeINCDEC(MCInst &MI, bool In64BitMode) {
return true;
}
-static bool isARegister(unsigned Reg) {
+static bool isARegister(MCRegister Reg) {
return Reg == X86::AL || Reg == X86::AX || Reg == X86::EAX || Reg == X86::RAX;
}
@@ -364,7 +364,7 @@ bool X86::optimizeMOV(MCInst &MI, bool In64BitMode) {
unsigned RegOp = IsStore ? 0 : 5;
unsigned AddrOp = AddrBase + 3;
// Check whether the destination register can be fixed.
- unsigned Reg = MI.getOperand(RegOp).getReg();
+ MCRegister Reg = MI.getOperand(RegOp).getReg();
if (!isARegister(Reg))
return false;
// Check whether this is an absolute address.
@@ -436,7 +436,7 @@ static bool optimizeToFixedRegisterForm(MCInst &MI) {
FROM_TO(XOR64ri32, XOR64i32)
}
// Check whether the destination register can be fixed.
- unsigned Reg = MI.getOperand(0).getReg();
+ MCRegister Reg = MI.getOperand(0).getReg();
if (!isARegister(Reg))
return false;
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
index 9cc72d32d85f94..66675759ee52d6 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstComments.cpp
@@ -223,14 +223,14 @@ using namespace llvm;
CASE_AVX_INS_COMMON(Inst##SD4, , mr_Int) \
CASE_AVX_INS_COMMON(Inst##SS4, , mr_Int)
-static unsigned getVectorRegSize(unsigned RegNo) {
- if (X86II::isZMMReg(RegNo))
+static unsigned getVectorRegSize(MCRegister Reg) {
+ if (X86II::isZMMReg(Reg))
return 512;
- if (X86II::isYMMReg(RegNo))
+ if (X86II::isYMMReg(Reg))
return 256;
- if (X86II::isXMMReg(RegNo))
+ if (X86II::isXMMReg(Reg))
return 128;
- if (X86::MM0 <= RegNo && RegNo <= X86::MM7)
+ if (Reg >= X86::MM0 && Reg <= X86::MM7)
return 64;
llvm_unreachable("Unknown vector reg!");
@@ -238,7 +238,7 @@ static unsigned getVectorRegSize(unsigned RegNo) {
static unsigned getRegOperandNumElts(const MCInst *MI, unsigned ScalarSize,
unsigned OperandIndex) {
- unsigned OpReg = MI->getOperand(OperandIndex).getReg();
+ MCRegister OpReg = MI->getOperand(OperandIndex).getReg();
return getVectorRegSize(OpReg) / ScalarSize;
}
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
index 0e00b4d0d5b160..cd8b9aa6257300 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86IntelInstPrinter.cpp
@@ -487,8 +487,7 @@ void X86IntelInstPrinter::printU8Imm(const MCInst *MI, unsigned Op,
void X86IntelInstPrinter::printSTiRegOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &OS) {
- const MCOperand &Op = MI->getOperand(OpNo);
- unsigned Reg = Op.getReg();
+ MCRegister Reg = MI->getOperand(OpNo).getReg();
// Override the default printing to print st(0) instead st.
if (Reg == X86::ST0)
OS << "st(0)";
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 469a385e085271..1bfb080ff50723 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -190,7 +190,7 @@ class X86OpcodePrefixHelper {
setR(getRegEncoding(MI, OpNum));
}
void setX(const MCInst &MI, unsigned OpNum, unsigned Shift = 3) {
- unsigned Reg = MI.getOperand(OpNum).getReg();
+ MCRegister Reg = MI.getOperand(OpNum).getReg();
// X is used to extend vector register only when shift is not 3.
if (Shift != 3 && X86II::isApxExtendedReg(Reg))
return;
@@ -220,7 +220,7 @@ class X86OpcodePrefixHelper {
}
void setM(bool V) { M = V; }
void setXX2(const MCInst &MI, unsigned OpNum) {
- unsigned Reg = MI.getOperand(OpNum).getReg();
+ MCRegister Reg = MI.getOperand(OpNum).getReg();
unsigned Encoding = MRI.getEncodingValue(Reg);
setX(Encoding);
// Index can be a vector register while X2 is used to extend GPR only.
@@ -228,7 +228,7 @@ class X86OpcodePrefixHelper {
setX2(Encoding);
}
void setBB2(const MCInst &MI, unsigned OpNum) {
- unsigned Reg = MI.getOperand(OpNum).getReg();
+ MCRegister Reg = MI.getOperand(OpNum).getReg();
unsigned Encoding = MRI.getEncodingValue(Reg);
setB(Encoding);
// Base can be a vector register while B2 is used to extend GPR only
@@ -243,7 +243,7 @@ class X86OpcodePrefixHelper {
// Only needed with VSIB which don't use VVVV.
if (HasVEX_4V)
return;
- unsigned Reg = MI.getOperand(OpNum).getReg();
+ MCRegister Reg = MI.getOperand(OpNum).getReg();
if (X86II::isApxExtendedReg(Reg))
return;
setV2(MRI.getEncodingValue(Reg));
@@ -614,7 +614,7 @@ void X86MCCodeEmitter::emitMemModRMByte(
const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
const MCOperand &Scale = MI.getOperand(Op + X86::AddrScaleAmt);
const MCOperand &IndexReg = MI.getOperand(Op + X86::AddrIndexReg);
- unsigned BaseReg = Base.getReg();
+ MCRegister BaseReg = Base.getReg();
// Handle %rip relative addressing.
if (BaseReg == X86::RIP ||
@@ -746,7 +746,7 @@ void X86MCCodeEmitter::emitMemModRMByte(
// This is the [REG]+disp16 case.
emitByte(modRMByte(2, RegOpcodeField, RMfield), CB);
} else {
- assert(IndexReg.getReg() == 0 && "Unexpected index register!");
+ assert(!IndexReg.getReg() && "Unexpected index register!");
// There is no BaseReg; this is the plain [disp16] case.
emitByte(modRMByte(0, RegOpcodeField, 6), CB);
}
@@ -768,7 +768,7 @@ void X86MCCodeEmitter::emitMemModRMByte(
// Determine whether a SIB byte is needed.
if (!ForceSIB && !X86II::needSIB(BaseReg, IndexReg.getReg(),
STI.hasFeature(X86::Is64Bit))) {
- if (BaseReg == 0) { // [disp32] in X86-32 mode
+ if (!BaseReg) { // [disp32] in X86-32 mode
emitByte(modRMByte(0, RegOpcodeField, 5), CB);
emitImmediate(Disp, MI.getLoc(), 4, FK_Data_4, StartByte, CB, Fixups);
return;
@@ -831,7 +831,7 @@ void X86MCCodeEmitter::emitMemModRMByte(
bool ForceDisp32 = false;
bool ForceDisp8 = false;
int ImmOffset = 0;
- if (BaseReg == 0) {
+ if (!BaseReg) {
// If there is no base register, we emit the special case SIB byte with
// MOD=0, BASE=5, to JUST get the index, scale, and displacement.
BaseRegNo = 5;
@@ -968,7 +968,7 @@ X86MCCodeEmitter::emitVEXOpcodePrefix(int MemOperand, const MCInst &MI,
const MCOperand &MO = MI.getOperand(I);
if (!MO.isReg())
continue;
- unsigned Reg = MO.getReg();
+ MCRegister Reg = MO.getReg();
if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
report_fatal_error(
"Cannot encode high byte register in VEX/EVEX-prefixed instruction");
@@ -1351,7 +1351,7 @@ PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI,
#ifndef NDEBUG
HasRegOp = true;
#endif
- unsigned Reg = MO.getReg();
+ MCRegister Reg = MO.getReg();
if (Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH)
UsesHighByteReg = true;
// If it accesses SPL, BPL, SIL, or DIL, then it requires a REX prefix.
@@ -1449,7 +1449,7 @@ PrefixKind X86MCCodeEmitter::emitREXPrefix(int MemOperand, const MCInst &MI,
void X86MCCodeEmitter::emitSegmentOverridePrefix(
unsigned SegOperand, const MCInst &MI, SmallVectorImpl<char> &CB) const {
// Check for explicit segment override on memory operand.
- if (unsigned Reg = MI.getOperand(SegOperand).getReg())
+ if (MCRegister Reg = MI.getOperand(SegOperand).getReg())
emitByte(X86::getSegmentOverridePrefixForReg(Reg), CB);
}
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 07c2c73e84ed9d..fe3c42eeb6e8ec 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -79,8 +79,8 @@ static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID) {
const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID];
- return (Base.isReg() && Base.getReg() != 0 && RC.contai...
[truncated]
|
KanRobert
approved these changes
Sep 14, 2024
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
No description provided.