-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[AMDGPU][NFC] Rename the reg-or-imm operand predicates to match their class names. #79439
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
… class names. No need to have two names for the same thing. Also simplifies operand definitions. Part of <llvm#62629>.
@llvm/pr-subscribers-backend-amdgpu Author: Ivan Kosarev (kosarev) ChangesNo need to have two names for the same thing. Also simplifies operand definitions. Part of <#62629>. Patch is 20.02 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/79439.diff 2 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 9ab657f4e7bb4f8..523228bd0ba7bd0 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -425,11 +425,11 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isSCSrcB16();
}
- bool isSCSrcB32() const {
+ bool isSCSrc_b32() const {
return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
}
- bool isSCSrcB64() const {
+ bool isSCSrc_b64() const {
return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
}
@@ -451,45 +451,39 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
}
- bool isSSrcB32() const {
- return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
+ bool isSSrc_b32() const {
+ return isSCSrc_b32() || isLiteralImm(MVT::i32) || isExpr();
}
- bool isSSrcB16() const {
- return isSCSrcB16() || isLiteralImm(MVT::i16);
- }
+ bool isSSrc_b16() const { return isSCSrcB16() || isLiteralImm(MVT::i16); }
bool isSSrcV2B16() const {
llvm_unreachable("cannot happen");
- return isSSrcB16();
+ return isSSrc_b16();
}
- bool isSSrcB64() const {
+ bool isSSrc_b64() const {
// TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
// See isVSrc64().
- return isSCSrcB64() || isLiteralImm(MVT::i64);
+ return isSCSrc_b64() || isLiteralImm(MVT::i64);
}
- bool isSSrcF32() const {
- return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
+ bool isSSrc_f32() const {
+ return isSCSrc_b32() || isLiteralImm(MVT::f32) || isExpr();
}
- bool isSSrcF64() const {
- return isSCSrcB64() || isLiteralImm(MVT::f64);
- }
+ bool isSSrcF64() const { return isSCSrc_b64() || isLiteralImm(MVT::f64); }
- bool isSSrcF16() const {
- return isSCSrcB16() || isLiteralImm(MVT::f16);
- }
+ bool isSSrc_f16() const { return isSCSrcB16() || isLiteralImm(MVT::f16); }
bool isSSrcV2F16() const {
llvm_unreachable("cannot happen");
- return isSSrcF16();
+ return isSSrc_f16();
}
bool isSSrcV2FP32() const {
llvm_unreachable("cannot happen");
- return isSSrcF32();
+ return isSSrc_f32();
}
bool isSCSrcV2FP32() const {
@@ -499,20 +493,20 @@ class AMDGPUOperand : public MCParsedAsmOperand {
bool isSSrcV2INT32() const {
llvm_unreachable("cannot happen");
- return isSSrcB32();
+ return isSSrc_b32();
}
bool isSCSrcV2INT32() const {
llvm_unreachable("cannot happen");
- return isSCSrcB32();
+ return isSCSrc_b32();
}
- bool isSSrcOrLdsB32() const {
+ bool isSSrcOrLds_b32() const {
return isRegOrInlineNoMods(AMDGPU::SRegOrLds_32RegClassID, MVT::i32) ||
isLiteralImm(MVT::i32) || isExpr();
}
- bool isVCSrcB32() const {
+ bool isVCSrc_b32() const {
return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
}
@@ -532,15 +526,13 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isRegOrInlineNoMods(AMDGPU::VS_32_Lo128RegClassID, MVT::i16);
}
- bool isVCSrcB16() const {
+ bool isVCSrc_b16() const {
return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
}
- bool isVCSrcV2B16() const {
- return isVCSrcB16();
- }
+ bool isVCSrc_v2b16() const { return isVCSrc_b16(); }
- bool isVCSrcF32() const {
+ bool isVCSrc_f32() const {
return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
}
@@ -560,81 +552,63 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isRegOrInlineNoMods(AMDGPU::VS_32_Lo128RegClassID, MVT::f16);
}
- bool isVCSrcF16() const {
+ bool isVCSrc_f16() const {
return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
}
- bool isVCSrcV2F16() const {
- return isVCSrcF16();
- }
+ bool isVCSrc_v2f16() const { return isVCSrc_f16(); }
- bool isVSrcB32() const {
- return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
+ bool isVSrc_b32() const {
+ return isVCSrc_f32() || isLiteralImm(MVT::i32) || isExpr();
}
- bool isVSrcB64() const {
- return isVCSrcF64() || isLiteralImm(MVT::i64);
- }
+ bool isVSrc_b64() const { return isVCSrcF64() || isLiteralImm(MVT::i64); }
- bool isVSrcTB16() const { return isVCSrcTB16() || isLiteralImm(MVT::i16); }
+ bool isVSrcT_b16() const { return isVCSrcTB16() || isLiteralImm(MVT::i16); }
- bool isVSrcTB16_Lo128() const {
+ bool isVSrcT_b16_Lo128() const {
return isVCSrcTB16_Lo128() || isLiteralImm(MVT::i16);
}
- bool isVSrcFake16B16_Lo128() const {
+ bool isVSrcFake16_b16_Lo128() const {
return isVCSrcFake16B16_Lo128() || isLiteralImm(MVT::i16);
}
- bool isVSrcB16() const {
- return isVCSrcB16() || isLiteralImm(MVT::i16);
- }
+ bool isVSrc_b16() const { return isVCSrc_b16() || isLiteralImm(MVT::i16); }
- bool isVSrcV2B16() const {
- return isVSrcB16() || isLiteralImm(MVT::v2i16);
- }
+ bool isVSrc_v2b16() const { return isVSrc_b16() || isLiteralImm(MVT::v2i16); }
bool isVCSrcV2FP32() const {
return isVCSrcF64();
}
- bool isVSrcV2FP32() const {
- return isVSrcF64() || isLiteralImm(MVT::v2f32);
- }
+ bool isVSrc_v2f32() const { return isVSrc_f64() || isLiteralImm(MVT::v2f32); }
bool isVCSrcV2INT32() const {
return isVCSrcB64();
}
- bool isVSrcV2INT32() const {
- return isVSrcB64() || isLiteralImm(MVT::v2i32);
- }
+ bool isVSrc_v2b32() const { return isVSrc_b64() || isLiteralImm(MVT::v2i32); }
- bool isVSrcF32() const {
- return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
+ bool isVSrc_f32() const {
+ return isVCSrc_f32() || isLiteralImm(MVT::f32) || isExpr();
}
- bool isVSrcF64() const {
- return isVCSrcF64() || isLiteralImm(MVT::f64);
- }
+ bool isVSrc_f64() const { return isVCSrcF64() || isLiteralImm(MVT::f64); }
- bool isVSrcTF16() const { return isVCSrcTF16() || isLiteralImm(MVT::f16); }
+ bool isVSrcT_f16() const { return isVCSrcTF16() || isLiteralImm(MVT::f16); }
- bool isVSrcTF16_Lo128() const {
+ bool isVSrcT_f16_Lo128() const {
return isVCSrcTF16_Lo128() || isLiteralImm(MVT::f16);
}
- bool isVSrcFake16F16_Lo128() const {
+ bool isVSrcFake16_f16_Lo128() const {
return isVCSrcFake16F16_Lo128() || isLiteralImm(MVT::f16);
}
- bool isVSrcF16() const {
- return isVCSrcF16() || isLiteralImm(MVT::f16);
- }
+ bool isVSrc_f16() const { return isVCSrc_f16() || isLiteralImm(MVT::f16); }
- bool isVSrcV2F16() const {
- return isVSrcF16() || isLiteralImm(MVT::v2f16);
- }
+ bool isVSrc_v2f16() const { return isVSrc_f16() || isLiteralImm(MVT::v2f16); }
bool isVISrcB32() const {
return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i32);
@@ -660,11 +634,11 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isVISrcF16() || isVISrcB32();
}
- bool isVISrc_64F16() const {
+ bool isVISrc_64_f16() const {
return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::f16);
}
- bool isVISrc_64B32() const {
+ bool isVISrc_64_b32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i32);
}
@@ -672,7 +646,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i64);
}
- bool isVISrc_64F64() const {
+ bool isVISrc_64_f64() const {
return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::f64);
}
@@ -684,11 +658,11 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i32);
}
- bool isVISrc_256B32() const {
+ bool isVISrc_256_b32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i32);
}
- bool isVISrc_256F32() const {
+ bool isVISrc_256_f32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::f32);
}
@@ -696,7 +670,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i64);
}
- bool isVISrc_256F64() const {
+ bool isVISrc_256_f64() const {
return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::f64);
}
@@ -708,11 +682,11 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isVISrc_128B16();
}
- bool isVISrc_128B32() const {
+ bool isVISrc_128_b32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::i32);
}
- bool isVISrc_128F32() const {
+ bool isVISrc_128_f32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::f32);
}
@@ -724,7 +698,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i32);
}
- bool isVISrc_512B32() const {
+ bool isVISrc_512_b32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::i32);
}
@@ -736,7 +710,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isVISrc_512B16();
}
- bool isVISrc_512F32() const {
+ bool isVISrc_512_f32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::f32);
}
@@ -745,10 +719,10 @@ class AMDGPUOperand : public MCParsedAsmOperand {
}
bool isVISrc_512V2F16() const {
- return isVISrc_512F16() || isVISrc_512B32();
+ return isVISrc_512F16() || isVISrc_512_b32();
}
- bool isVISrc_1024B32() const {
+ bool isVISrc_1024_b32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::i32);
}
@@ -760,7 +734,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isVISrc_1024B16();
}
- bool isVISrc_1024F32() const {
+ bool isVISrc_1024_f32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::f32);
}
@@ -769,7 +743,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
}
bool isVISrc_1024V2F16() const {
- return isVISrc_1024F16() || isVISrc_1024B32();
+ return isVISrc_1024F16() || isVISrc_1024_b32();
}
bool isAISrcB32() const {
@@ -800,11 +774,11 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isRegOrInlineNoMods(AMDGPU::AReg_64RegClassID, MVT::i64);
}
- bool isAISrc_64F64() const {
+ bool isAISrc_64_f64() const {
return isRegOrInlineNoMods(AMDGPU::AReg_64RegClassID, MVT::f64);
}
- bool isAISrc_128B32() const {
+ bool isAISrc_128_b32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::i32);
}
@@ -816,7 +790,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isAISrc_128B16();
}
- bool isAISrc_128F32() const {
+ bool isAISrc_128_f32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::f32);
}
@@ -825,26 +799,26 @@ class AMDGPUOperand : public MCParsedAsmOperand {
}
bool isAISrc_128V2F16() const {
- return isAISrc_128F16() || isAISrc_128B32();
+ return isAISrc_128F16() || isAISrc_128_b32();
}
- bool isVISrc_128F16() const {
+ bool isVISrc_128_f16() const {
return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::f16);
}
bool isVISrc_128V2F16() const {
- return isVISrc_128F16() || isVISrc_128B32();
+ return isVISrc_128_f16() || isVISrc_128_b32();
}
bool isAISrc_256B64() const {
return isRegOrInlineNoMods(AMDGPU::AReg_256RegClassID, MVT::i64);
}
- bool isAISrc_256F64() const {
+ bool isAISrc_256_f64() const {
return isRegOrInlineNoMods(AMDGPU::AReg_256RegClassID, MVT::f64);
}
- bool isAISrc_512B32() const {
+ bool isAISrc_512_b32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::i32);
}
@@ -856,7 +830,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isAISrc_512B16();
}
- bool isAISrc_512F32() const {
+ bool isAISrc_512_f32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::f32);
}
@@ -865,10 +839,10 @@ class AMDGPUOperand : public MCParsedAsmOperand {
}
bool isAISrc_512V2F16() const {
- return isAISrc_512F16() || isAISrc_512B32();
+ return isAISrc_512F16() || isAISrc_512_b32();
}
- bool isAISrc_1024B32() const {
+ bool isAISrc_1024_b32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::i32);
}
@@ -880,7 +854,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
return isAISrc_1024B16();
}
- bool isAISrc_1024F32() const {
+ bool isAISrc_1024_f32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::f32);
}
@@ -889,7 +863,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
}
bool isAISrc_1024V2F16() const {
- return isAISrc_1024F16() || isAISrc_1024B32();
+ return isAISrc_1024F16() || isAISrc_1024_b32();
}
bool isKImmFP32() const {
@@ -2115,8 +2089,8 @@ bool AMDGPUOperand::isSDWAInt32Operand() const {
bool AMDGPUOperand::isBoolReg() const {
auto FB = AsmParser->getFeatureBits();
- return isReg() && ((FB[AMDGPU::FeatureWavefrontSize64] && isSCSrcB64()) ||
- (FB[AMDGPU::FeatureWavefrontSize32] && isSCSrcB32()));
+ return isReg() && ((FB[AMDGPU::FeatureWavefrontSize64] && isSCSrc_b64()) ||
+ (FB[AMDGPU::FeatureWavefrontSize32] && isSCSrc_b32()));
}
uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
@@ -9266,16 +9240,16 @@ unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
case MCK_tfe:
return Operand.isTFE() ? Match_Success : Match_InvalidOperand;
- case MCK_SSrcB32:
+ case MCK_SSrc_b32:
// When operands have expression values, they will return true for isToken,
// because it is not possible to distinguish between a token and an
// expression at parse time. MatchInstructionImpl() will always try to
// match an operand as a token, when isToken returns true, and when the
// name of the expression is not a valid token, the match will fail,
// so we need to handle it here.
- return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
- case MCK_SSrcF32:
- return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
+ return Operand.isSSrc_b32() ? Match_Success : Match_InvalidOperand;
+ case MCK_SSrc_f32:
+ return Operand.isSSrc_f32() ? Match_Success : Match_InvalidOperand;
case MCK_SOPPBrTarget:
return Operand.isSOPPBrTarget() ? Match_Success : Match_InvalidOperand;
case MCK_VReg32OrOff:
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index b50ea2a1db4a738..21e2c6b67b52fd9 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -1102,100 +1102,99 @@ class RegImmMatcher<string name> : AsmOperandClass {
}
class RegOrImmOperand <string RegisterClassName, string OperandTypeName,
- string ParserMatchClassName, string decoderImmSize>
+ string decoderImmSize>
: RegisterOperand<!cast<RegisterClass>(RegisterClassName)> {
let OperandNamespace = "AMDGPU";
let OperandType = OperandTypeName;
- let ParserMatchClass = RegImmMatcher<ParserMatchClassName>;
+ let ParserMatchClass = RegImmMatcher<!subst("_Deferred", "", NAME)>;
let DecoderMethod = "decodeOperand_" # RegisterClassName # decoderImmSize;
- }
+}
class RegOrB16 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_INT16",
- !subst("_b16", "B16", NAME), "_Imm16">;
+ "_Imm16">;
class RegOrF16 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_FP16",
- !subst("_f16", "F16", NAME), "_Imm16">;
+ "_Imm16">;
class RegOrB16T <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_INT16",
- !subst("_b16", "B16", NAME), "_Imm16"> {
+ "_Imm16"> {
let EncoderMethod = "getMachineOpValueT16";
}
class RegOrF16T <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_FP16",
- !subst("_f16", "F16", NAME), "_Imm16"> {
+ "_Imm16"> {
let EncoderMethod = "getMachineOpValueT16";
}
class RegOrB16_Lo128T <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_INT16",
- !subst("_b16_Lo128", "B16_Lo128", NAME), "_Imm16"> {
+ "_Imm16"> {
let EncoderMethod = "getMachineOpValueT16Lo128";
}
class RegOrF16_Lo128T <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_FP16",
- !subst("_f16_Lo128", "F16_Lo128", NAME), "_Imm16"> {
+ "_Imm16"> {
let EncoderMethod = "getMachineOpValueT16Lo128";
}
class RegOrB32 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_INT32",
- !subst("_b32", "B32", NAME), "_Imm32">;
+ "_Imm32">;
class RegOrF32 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_FP32",
- !subst("_f32", "F32", NAME), "_Imm32">;
+ "_Imm32">;
class RegOrV2B16 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_V2INT16",
- !subst("_v2b16", "V2B16", NAME), "_ImmV2I16">;
+ "_ImmV2I16">;
class RegOrV2F16 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_V2FP16",
- !subst("_v2f16", "V2F16", NAME), "_ImmV2F16">;
+ "_ImmV2F16">;
class RegOrF64 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_FP64",
- !subst("_f64", "F64", NAME), "_Imm64">;
+ "_Imm64">;
class RegOrB64 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_INT64",
- !subst("_b64", "B64", NAME), "_Imm64">;
+ "_Imm64">;
class RegOrV2F32 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_V2FP32",
- !subst("_v2f32", "V2FP32", NAME), "_Imm32">;
+ "_Imm32">;
class RegOrV2B32 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_V2INT32",
- !subst("_v2b32", "V2INT32", NAME), "_Imm32">;
+ "_Imm32">;
// For VOP1,2,C True16 instructions. _Lo128 use first 128 32-bit VGPRs only.
class RegOrB16_Lo128 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_INT16",
- !subst("_b16_Lo128", "B16_Lo128", NAME), "_Imm16">;
+ "_Imm16">;
class RegOrF16_Lo128 <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_FP16",
- !subst("_f16_Lo128", "F16_Lo128", NAME), "_Imm16">;
+ "_Imm16">;
// Deferred operands
class RegOrF16_Deferred <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_FP16_DEFERRED",
- !subst("_f16_Deferred", "F16", NAME), "_Deferred_Imm16">;
+ "_Deferred_Imm16">;
class RegOrF32_Deferred <string RegisterClass, string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_FP32_DEFERRED",
- !subst("_f32_Deferred", "F32", NAME), "_Deferred_Imm32">;
+ "_Deferred_Imm32">;
class RegOrF16_Lo128_Deferred <string RegisterClass,
string OperandTypePrefix>
: RegOrImmOperand <RegisterClass, OperandTypePrefix # "_FP16_DEFERRED",
- !subst("_f16_Lo128_Deferred", "F16_Lo128", NAME),
"_Deferred_Imm16">;
//===------------------------------------------------------...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Makes sense to me
bool isSSrcF64() const { | ||
return isSCSrcB64() || isLiteralImm(MVT::f64); | ||
} | ||
bool isSSrcF64() const { return isSCSrc_b64() || isLiteralImm(MVT::f64); } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why this was not changed to isSSrc_f64
and some other like isVISrcB32
?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Only predicates for RegOrImmOperand
operands were renamed, since that's what the patch touches.
No need to have two names for the same thing. Also simplifies operand definitions.
Part of #62629.