-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[RISCV][GISel] Add RISCVPassConfig::getCSEConfig() to match other targets. #110755
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-risc-v Author: Craig Topper (topperc) ChangesPatch is 595.03 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/110755.diff 71 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index b9d35a924669f1..2dcac1320417c2 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -19,6 +19,7 @@
#include "TargetInfo/RISCVTargetInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
#include "llvm/CodeGen/GlobalISel/Legalizer.h"
@@ -374,6 +375,8 @@ class RISCVPassConfig : public TargetPassConfig {
void addPreRegAlloc() override;
void addPostRegAlloc() override;
void addFastRegAlloc() override;
+
+ std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
};
} // namespace
@@ -381,6 +384,10 @@ TargetPassConfig *RISCVTargetMachine::createPassConfig(PassManagerBase &PM) {
return new RISCVPassConfig(*this, PM);
}
+std::unique_ptr<CSEConfigBase> RISCVPassConfig::getCSEConfig() const {
+ return getStandardCSEConfigForOpt(TM->getOptLevel());
+}
+
FunctionPass *RISCVPassConfig::createRVVRegAllocPass(bool Optimized) {
// Initialize the global default.
llvm::call_once(InitializeDefaultRVVRegisterAllocatorFlag,
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
index 63bc43ae20e7be..51809d00699103 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
@@ -870,31 +870,31 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV32I-NEXT: ADJCALLSTACKDOWN 4, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
- ; RV32I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
- ; RV32I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV32I-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV32I-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV32I-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32I-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x2
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s32)
- ; RV32I-NEXT: G_STORE [[ANYEXT8]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 16)
+ ; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C2]](s32)
+ ; RV32I-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: $x11 = COPY [[ANYEXT1]](s32)
- ; RV32I-NEXT: $x12 = COPY [[ANYEXT2]](s32)
- ; RV32I-NEXT: $x13 = COPY [[ANYEXT3]](s32)
- ; RV32I-NEXT: $x14 = COPY [[ANYEXT4]](s32)
- ; RV32I-NEXT: $x15 = COPY [[ANYEXT5]](s32)
- ; RV32I-NEXT: $x16 = COPY [[ANYEXT6]](s32)
- ; RV32I-NEXT: $x17 = COPY [[ANYEXT7]](s32)
+ ; RV32I-NEXT: $x12 = COPY [[COPY2]](s32)
+ ; RV32I-NEXT: $x13 = COPY [[ANYEXT2]](s32)
+ ; RV32I-NEXT: $x14 = COPY [[COPY3]](s32)
+ ; RV32I-NEXT: $x15 = COPY [[ANYEXT3]](s32)
+ ; RV32I-NEXT: $x16 = COPY [[COPY4]](s32)
+ ; RV32I-NEXT: $x17 = COPY [[COPY5]](s32)
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
- ; RV32I-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
- ; RV32I-NEXT: $x10 = COPY [[ANYEXT9]](s32)
+ ; RV32I-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
+ ; RV32I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV32I-NEXT: $x10 = COPY [[ANYEXT4]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: caller_half_return_stack2
@@ -910,28 +910,28 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV32IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
- ; RV32IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
- ; RV32IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32IF-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32IF-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32IF-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32IF-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
+ ; RV32IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
+ ; RV32IF-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV32IF-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV32IF-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV32IF-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: $f11_f = COPY [[ANYEXT1]](s32)
- ; RV32IF-NEXT: $f12_f = COPY [[ANYEXT2]](s32)
- ; RV32IF-NEXT: $f13_f = COPY [[ANYEXT3]](s32)
- ; RV32IF-NEXT: $f14_f = COPY [[ANYEXT4]](s32)
- ; RV32IF-NEXT: $f15_f = COPY [[ANYEXT5]](s32)
- ; RV32IF-NEXT: $f16_f = COPY [[ANYEXT6]](s32)
- ; RV32IF-NEXT: $f17_f = COPY [[ANYEXT7]](s32)
- ; RV32IF-NEXT: $x10 = COPY [[ANYEXT8]](s32)
+ ; RV32IF-NEXT: $f12_f = COPY [[COPY2]](s32)
+ ; RV32IF-NEXT: $f13_f = COPY [[ANYEXT2]](s32)
+ ; RV32IF-NEXT: $f14_f = COPY [[COPY3]](s32)
+ ; RV32IF-NEXT: $f15_f = COPY [[ANYEXT3]](s32)
+ ; RV32IF-NEXT: $f16_f = COPY [[COPY4]](s32)
+ ; RV32IF-NEXT: $f17_f = COPY [[COPY5]](s32)
+ ; RV32IF-NEXT: $x10 = COPY [[COPY6]](s32)
; RV32IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $f10_f
; RV32IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
- ; RV32IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
- ; RV32IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; RV32IF-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
- ; RV32IF-NEXT: $f10_f = COPY [[ANYEXT9]](s32)
+ ; RV32IF-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV32IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
+ ; RV32IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV32IF-NEXT: $f10_f = COPY [[ANYEXT4]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: caller_half_return_stack2
@@ -972,31 +972,31 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV64I-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s16)
- ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
- ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s16)
- ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
- ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s16)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ANYEXT3]](s64)
+ ; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[ANYEXT3]](s64)
+ ; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x2
; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64)
- ; RV64I-NEXT: G_STORE [[ANYEXT8]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
+ ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C2]](s64)
+ ; RV64I-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
- ; RV64I-NEXT: $x12 = COPY [[ANYEXT2]](s64)
- ; RV64I-NEXT: $x13 = COPY [[ANYEXT3]](s64)
- ; RV64I-NEXT: $x14 = COPY [[ANYEXT4]](s64)
- ; RV64I-NEXT: $x15 = COPY [[ANYEXT5]](s64)
- ; RV64I-NEXT: $x16 = COPY [[ANYEXT6]](s64)
- ; RV64I-NEXT: $x17 = COPY [[ANYEXT7]](s64)
+ ; RV64I-NEXT: $x12 = COPY [[COPY2]](s64)
+ ; RV64I-NEXT: $x13 = COPY [[ANYEXT2]](s64)
+ ; RV64I-NEXT: $x14 = COPY [[COPY3]](s64)
+ ; RV64I-NEXT: $x15 = COPY [[ANYEXT3]](s64)
+ ; RV64I-NEXT: $x16 = COPY [[COPY4]](s64)
+ ; RV64I-NEXT: $x17 = COPY [[COPY5]](s64)
; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s64)
- ; RV64I-NEXT: [[ANYEXT9:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC2]](s16)
- ; RV64I-NEXT: $x10 = COPY [[ANYEXT9]](s64)
+ ; RV64I-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s64)
+ ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT4]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: caller_half_return_stack2
@@ -1012,28 +1012,28 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV64IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV64IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
- ; RV64IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV64IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
- ; RV64IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV64IF-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64IF-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64IF-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV64IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
+ ; RV64IF-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV64IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64IF-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV64IF-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: $f11_f = COPY [[ANYEXT1]](s32)
- ; RV64IF-NEXT: $f12_f = COPY [[ANYEXT2]](s32)
- ; RV64IF-NEXT: $f13_f = COPY [[ANYEXT3]](s32)
- ; RV64IF-NEXT: $f14_f = COPY [[ANYEXT4]](s32)
- ; RV64IF-NEXT: $f15_f = COPY [[ANYEXT5]](s32)
- ; RV64IF-NEXT: $f16_f = COPY [[ANYEXT6]](s32)
- ; RV64IF-NEXT: $f17_f = COPY [[ANYEXT7]](s32)
- ; RV64IF-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
- ; RV64IF-NEXT: $x10 = COPY [[ANYEXT8]](s64)
+ ; RV64IF-NEXT: $f12_f = COPY [[COPY2]](s32)
+ ; RV64IF-NEXT: $f13_f = COPY [[ANYEXT2]](s32)
+ ; RV64IF-NEXT: $f14_f = COPY [[COPY3]](s32)
+ ; RV64IF-NEXT: $f15_f = COPY [[ANYEXT3]](s32)
+ ; RV64IF-NEXT: $f16_f = COPY [[COPY4]](s32)
+ ; RV64IF-NEXT: $f17_f = COPY [[COPY5]](s32)
+ ; RV64IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
+ ; RV64IF-NEXT: $x10 = COPY [[ANYEXT4]](s64)
; RV64IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $f10_f
; RV64IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
- ; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
- ; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; RV64IF-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
- ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT9]](s32)
+ ; RV64IF-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
+ ; RV64IF-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT5]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: caller_half_return_stack2
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index b7eb4574cc4dde..fb00e4860ddf2a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -1091,8 +1091,8 @@ define %struct.large2 @callee_large_struct_ret2() nounwind {
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; RV32I-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; RV32I-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; RV32I-NEXT: [[DEF2:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
index 6750954a53708b..3a0831b3c5214c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -747,7 +747,7 @@ define %struct.large2 @callee_large_struct_ret2() nounwind {
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; RV64I-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
- ; RV64I-NEXT: [[DEF2:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[DEF]](s64)
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; RV64I-NEXT: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 2
; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 3.000000e+00
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir
index cbafa76ed4cd42..e64e7866103fff 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir
@@ -110,16 +110,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY1]], [[C]](s32)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
- ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[COPY1]], [[C1]](s32)
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ASHR]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[ASHR]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[ASHR1]]
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[ASHR]]
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[ASHR]]
- ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[COPY3]], [[ASHR1]]
+ ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[COPY3]], [[ASHR]]
; CHECK-NEXT: $x10 = COPY [[XOR]](s32)
; CHECK-NEXT: $x11 = COPY [[XOR1]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
index 81da754b7ecc52..d188f89eef6f1f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
@@ -14,16 +14,15 @@ body: |
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_ZEXT]](s64)
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s64)
- ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64)
- ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
- ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[C2]](s64)
- ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_ZEXT]](s64)
- ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC1]], [[ASHR1]]
+ ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s64)
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+ ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[C1]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[ASHR1]]
; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR1]]
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
- ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
- ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C3]]
+ ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
+ ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C2]]
; RV64I-NEXT: $x10 = COPY [[AND]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
@@ -56,8 +55,8 @@ body: |
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_SEXT]](s64)
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[TRUNC]], [[C]](s64)
- ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_SEXT]](s64)
- ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD ...
[truncated]
|
@llvm/pr-subscribers-llvm-globalisel Author: Craig Topper (topperc) ChangesPatch is 595.03 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/110755.diff 71 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index b9d35a924669f1..2dcac1320417c2 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -19,6 +19,7 @@
#include "TargetInfo/RISCVTargetInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
#include "llvm/CodeGen/GlobalISel/Legalizer.h"
@@ -374,6 +375,8 @@ class RISCVPassConfig : public TargetPassConfig {
void addPreRegAlloc() override;
void addPostRegAlloc() override;
void addFastRegAlloc() override;
+
+ std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
};
} // namespace
@@ -381,6 +384,10 @@ TargetPassConfig *RISCVTargetMachine::createPassConfig(PassManagerBase &PM) {
return new RISCVPassConfig(*this, PM);
}
+std::unique_ptr<CSEConfigBase> RISCVPassConfig::getCSEConfig() const {
+ return getStandardCSEConfigForOpt(TM->getOptLevel());
+}
+
FunctionPass *RISCVPassConfig::createRVVRegAllocPass(bool Optimized) {
// Initialize the global default.
llvm::call_once(InitializeDefaultRVVRegisterAllocatorFlag,
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
index 63bc43ae20e7be..51809d00699103 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
@@ -870,31 +870,31 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV32I-NEXT: ADJCALLSTACKDOWN 4, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
- ; RV32I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
- ; RV32I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV32I-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV32I-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV32I-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32I-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x2
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s32)
- ; RV32I-NEXT: G_STORE [[ANYEXT8]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 16)
+ ; RV32I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C2]](s32)
+ ; RV32I-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD]](p0) :: (store (s32) into stack, align 16)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: $x11 = COPY [[ANYEXT1]](s32)
- ; RV32I-NEXT: $x12 = COPY [[ANYEXT2]](s32)
- ; RV32I-NEXT: $x13 = COPY [[ANYEXT3]](s32)
- ; RV32I-NEXT: $x14 = COPY [[ANYEXT4]](s32)
- ; RV32I-NEXT: $x15 = COPY [[ANYEXT5]](s32)
- ; RV32I-NEXT: $x16 = COPY [[ANYEXT6]](s32)
- ; RV32I-NEXT: $x17 = COPY [[ANYEXT7]](s32)
+ ; RV32I-NEXT: $x12 = COPY [[COPY2]](s32)
+ ; RV32I-NEXT: $x13 = COPY [[ANYEXT2]](s32)
+ ; RV32I-NEXT: $x14 = COPY [[COPY3]](s32)
+ ; RV32I-NEXT: $x15 = COPY [[ANYEXT3]](s32)
+ ; RV32I-NEXT: $x16 = COPY [[COPY4]](s32)
+ ; RV32I-NEXT: $x17 = COPY [[COPY5]](s32)
; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
- ; RV32I-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
- ; RV32I-NEXT: $x10 = COPY [[ANYEXT9]](s32)
+ ; RV32I-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
+ ; RV32I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV32I-NEXT: $x10 = COPY [[ANYEXT4]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
; RV32IF-LABEL: name: caller_half_return_stack2
@@ -910,28 +910,28 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV32IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV32IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
- ; RV32IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
- ; RV32IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV32IF-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32IF-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32IF-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV32IF-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
+ ; RV32IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
+ ; RV32IF-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV32IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV32IF-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV32IF-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV32IF-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV32IF-NEXT: $f11_f = COPY [[ANYEXT1]](s32)
- ; RV32IF-NEXT: $f12_f = COPY [[ANYEXT2]](s32)
- ; RV32IF-NEXT: $f13_f = COPY [[ANYEXT3]](s32)
- ; RV32IF-NEXT: $f14_f = COPY [[ANYEXT4]](s32)
- ; RV32IF-NEXT: $f15_f = COPY [[ANYEXT5]](s32)
- ; RV32IF-NEXT: $f16_f = COPY [[ANYEXT6]](s32)
- ; RV32IF-NEXT: $f17_f = COPY [[ANYEXT7]](s32)
- ; RV32IF-NEXT: $x10 = COPY [[ANYEXT8]](s32)
+ ; RV32IF-NEXT: $f12_f = COPY [[COPY2]](s32)
+ ; RV32IF-NEXT: $f13_f = COPY [[ANYEXT2]](s32)
+ ; RV32IF-NEXT: $f14_f = COPY [[COPY3]](s32)
+ ; RV32IF-NEXT: $f15_f = COPY [[ANYEXT3]](s32)
+ ; RV32IF-NEXT: $f16_f = COPY [[COPY4]](s32)
+ ; RV32IF-NEXT: $f17_f = COPY [[COPY5]](s32)
+ ; RV32IF-NEXT: $x10 = COPY [[COPY6]](s32)
; RV32IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $f10_f
; RV32IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
- ; RV32IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
- ; RV32IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; RV32IF-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
- ; RV32IF-NEXT: $f10_f = COPY [[ANYEXT9]](s32)
+ ; RV32IF-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV32IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
+ ; RV32IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV32IF-NEXT: $f10_f = COPY [[ANYEXT4]](s32)
; RV32IF-NEXT: PseudoRET implicit $f10_f
;
; RV32IZFH-LABEL: name: caller_half_return_stack2
@@ -972,31 +972,31 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV64I-NEXT: ADJCALLSTACKDOWN 8, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s16)
- ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
- ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s16)
- ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
- ; RV64I-NEXT: [[ANYEXT5:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64I-NEXT: [[ANYEXT6:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64I-NEXT: [[ANYEXT7:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64I-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s16)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ANYEXT3]](s64)
+ ; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[ANYEXT3]](s64)
+ ; RV64I-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[ANYEXT]](s64)
+ ; RV64I-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x2
; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64)
- ; RV64I-NEXT: G_STORE [[ANYEXT8]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
+ ; RV64I-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C2]](s64)
+ ; RV64I-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 16)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
- ; RV64I-NEXT: $x12 = COPY [[ANYEXT2]](s64)
- ; RV64I-NEXT: $x13 = COPY [[ANYEXT3]](s64)
- ; RV64I-NEXT: $x14 = COPY [[ANYEXT4]](s64)
- ; RV64I-NEXT: $x15 = COPY [[ANYEXT5]](s64)
- ; RV64I-NEXT: $x16 = COPY [[ANYEXT6]](s64)
- ; RV64I-NEXT: $x17 = COPY [[ANYEXT7]](s64)
+ ; RV64I-NEXT: $x12 = COPY [[COPY2]](s64)
+ ; RV64I-NEXT: $x13 = COPY [[ANYEXT2]](s64)
+ ; RV64I-NEXT: $x14 = COPY [[COPY3]](s64)
+ ; RV64I-NEXT: $x15 = COPY [[ANYEXT3]](s64)
+ ; RV64I-NEXT: $x16 = COPY [[COPY4]](s64)
+ ; RV64I-NEXT: $x17 = COPY [[COPY5]](s64)
; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s64)
- ; RV64I-NEXT: [[ANYEXT9:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC2]](s16)
- ; RV64I-NEXT: $x10 = COPY [[ANYEXT9]](s64)
+ ; RV64I-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64I-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s64)
+ ; RV64I-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV64I-NEXT: $x10 = COPY [[ANYEXT4]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64IF-LABEL: name: caller_half_return_stack2
@@ -1012,28 +1012,28 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV64IF-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV64IF-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s16)
- ; RV64IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV64IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
- ; RV64IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
- ; RV64IF-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64IF-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64IF-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV64IF-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s16)
+ ; RV64IF-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
+ ; RV64IF-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; RV64IF-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
+ ; RV64IF-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ANYEXT3]](s32)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: $f11_f = COPY [[ANYEXT1]](s32)
- ; RV64IF-NEXT: $f12_f = COPY [[ANYEXT2]](s32)
- ; RV64IF-NEXT: $f13_f = COPY [[ANYEXT3]](s32)
- ; RV64IF-NEXT: $f14_f = COPY [[ANYEXT4]](s32)
- ; RV64IF-NEXT: $f15_f = COPY [[ANYEXT5]](s32)
- ; RV64IF-NEXT: $f16_f = COPY [[ANYEXT6]](s32)
- ; RV64IF-NEXT: $f17_f = COPY [[ANYEXT7]](s32)
- ; RV64IF-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
- ; RV64IF-NEXT: $x10 = COPY [[ANYEXT8]](s64)
+ ; RV64IF-NEXT: $f12_f = COPY [[COPY2]](s32)
+ ; RV64IF-NEXT: $f13_f = COPY [[ANYEXT2]](s32)
+ ; RV64IF-NEXT: $f14_f = COPY [[COPY3]](s32)
+ ; RV64IF-NEXT: $f15_f = COPY [[ANYEXT3]](s32)
+ ; RV64IF-NEXT: $f16_f = COPY [[COPY4]](s32)
+ ; RV64IF-NEXT: $f17_f = COPY [[COPY5]](s32)
+ ; RV64IF-NEXT: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
+ ; RV64IF-NEXT: $x10 = COPY [[ANYEXT4]](s64)
; RV64IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $f10_f
; RV64IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
- ; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
- ; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; RV64IF-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
- ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT9]](s32)
+ ; RV64IF-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
+ ; RV64IF-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT5]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: caller_half_return_stack2
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index b7eb4574cc4dde..fb00e4860ddf2a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -1091,8 +1091,8 @@ define %struct.large2 @callee_large_struct_ret2() nounwind {
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; RV32I-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; RV32I-NEXT: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; RV32I-NEXT: [[DEF2:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
; RV32I-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 3
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
index 6750954a53708b..3a0831b3c5214c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -747,7 +747,7 @@ define %struct.large2 @callee_large_struct_ret2() nounwind {
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; RV64I-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
- ; RV64I-NEXT: [[DEF2:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[DEF]](s64)
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; RV64I-NEXT: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 2
; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 3.000000e+00
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir
index cbafa76ed4cd42..e64e7866103fff 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir
@@ -110,16 +110,14 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY1]], [[C]](s32)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
- ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[COPY1]], [[C1]](s32)
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ASHR]]
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[ASHR]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[ASHR1]]
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[ASHR]]
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY2]], [[ASHR]]
- ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[COPY3]], [[ASHR1]]
+ ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[COPY3]], [[ASHR]]
; CHECK-NEXT: $x10 = COPY [[XOR]](s32)
; CHECK-NEXT: $x11 = COPY [[XOR1]](s32)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
index 81da754b7ecc52..d188f89eef6f1f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
@@ -14,16 +14,15 @@ body: |
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_ZEXT]](s64)
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s64)
- ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
- ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64)
- ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
- ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[C2]](s64)
- ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_ZEXT]](s64)
- ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC1]], [[ASHR1]]
+ ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s64)
+ ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 7
+ ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[C1]](s64)
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
+ ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[ASHR1]]
; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR1]]
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
- ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
- ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C3]]
+ ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
+ ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C2]]
; RV64I-NEXT: $x10 = COPY [[AND]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
@@ -56,8 +55,8 @@ body: |
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_SEXT]](s64)
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[TRUNC]], [[C]](s64)
- ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_SEXT]](s64)
- ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD ...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I wonder if we should enable this by default for all targets in TargetPassConfig.
No description provided.