-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[RISCV][GISel] Port AddiPair optimization #120463
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-risc-v @llvm/pr-subscribers-llvm-globalisel Author: Luke Quinn (lquinn2015) ChangesFull diff: https://github.com/llvm/llvm-project/pull/120463.diff 3 Files Affected:
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 985264c591e105..5aa66b3780b86b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -137,6 +137,11 @@ class RISCVInstructionSelector : public InstructionSelector {
void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
const MachineInstr &MI, int OpIdx) const;
+ void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
+ int OpIdx) const;
+ void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
+ int OpIdx) const;
+
const RISCVSubtarget &STI;
const RISCVInstrInfo &TII;
const RISCVRegisterInfo &TRI;
@@ -871,6 +876,25 @@ void RISCVInstructionSelector::renderXLenSubTrailingOnes(
MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
}
+void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
+ const MachineInstr &MI,
+ int OpIdx) const {
+ assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
+ "Expected G_CONSTANT");
+ int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
+ int64_t Adj = Imm < 0 ? -2048 : 2047;
+ MIB.addImm(Imm - Adj);
+}
+
+void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
+ const MachineInstr &MI,
+ int OpIdx) const {
+ assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
+ "Expected G_CONSTANT");
+ int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
+ MIB.addImm(Imm);
+}
+
const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
LLT Ty, const RegisterBank &RB) const {
if (RB.getID() == RISCV::GPRBRegBankID) {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 24bf7da4dadc07..ac9805ded9d307 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -454,7 +454,14 @@ def AddiPair : PatLeaf<(imm), [{
// The immediate operand must be in range [-4096,-2049] or [2048,4094].
int64_t Imm = N->getSExtValue();
return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094);
-}]>;
+}]> {
+ let GISelPredicateCode = [{
+ if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg()))
+ return false;
+ int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
+ return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094);
+ }];
+}
// Return imm - (imm < 0 ? -2048 : 2047).
def AddiPairImmSmall : SDNodeXForm<imm, [{
@@ -463,6 +470,8 @@ def AddiPairImmSmall : SDNodeXForm<imm, [{
return CurDAG->getSignedTargetConstant(Imm - Adj, SDLoc(N),
N->getValueType(0));
}]>;
+def GIAddiPairImmSmall : GICustomOperandRenderer<"renderAddiPairImmSmall">,
+ GISDNodeXFormEquiv<AddiPairImmSmall>;
// Return -2048 if immediate is negative or 2047 if positive. These are the
// largest simm12 values.
@@ -470,6 +479,8 @@ def AddiPairImmLarge : SDNodeXForm<imm, [{
int64_t Imm = N->getSExtValue() < 0 ? -2048 : 2047;
return CurDAG->getSignedTargetConstant(Imm, SDLoc(N), N->getValueType(0));
}]>;
+def GIAddiPairImmLarge : GICustomOperandRenderer<"renderAddiPairImmLarge">,
+ GISDNodeXFormEquiv<AddiPairImmLarge>;
def TrailingZeros : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(llvm::countr_zero(N->getZExtValue()),
@@ -537,7 +548,6 @@ def LeadingOnesWMask : PatLeaf<(imm), [{
return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) &&
Imm != UINT64_C(0xffffffff);
}];
-
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll
new file mode 100644
index 00000000000000..ff56ab193c480c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll
@@ -0,0 +1,247 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -global-isel < %s \
+; RUN: | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -global-isel < %s \
+; RUN: | FileCheck %s -check-prefix=RV64I
+
+; These test how the immediate in an addition is materialized.
+
+define i32 @add_positive_low_bound_reject(i32 %a) nounwind {
+; RV32I-LABEL: add_positive_low_bound_reject:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, 2047
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add_positive_low_bound_reject:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: ret
+ %1 = add i32 %a, 2047
+ ret i32 %1
+}
+
+define i32 @add_positive_low_bound_accept(i32 %a) nounwind {
+; RV32I-LABEL: add_positive_low_bound_accept:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, 2047
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add_positive_low_bound_accept:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: ret
+ %1 = add i32 %a, 2048
+ ret i32 %1
+}
+
+define i32 @add_positive_high_bound_accept(i32 %a) nounwind {
+; RV32I-LABEL: add_positive_high_bound_accept:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, 2047
+; RV32I-NEXT: addi a0, a0, 2047
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add_positive_high_bound_accept:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: ret
+ %1 = add i32 %a, 4094
+ ret i32 %1
+}
+
+define i32 @add_positive_high_bound_reject(i32 %a) nounwind {
+; RV32I-LABEL: add_positive_high_bound_reject:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a1, 1
+; RV32I-NEXT: addi a1, a1, -1
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add_positive_high_bound_reject:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a1, 1
+; RV64I-NEXT: addiw a1, a1, -1
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+ %1 = add i32 %a, 4095
+ ret i32 %1
+}
+
+define i32 @add_negative_high_bound_reject(i32 %a) nounwind {
+; RV32I-LABEL: add_negative_high_bound_reject:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, -2048
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add_negative_high_bound_reject:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -2048
+; RV64I-NEXT: ret
+ %1 = add i32 %a, -2048
+ ret i32 %1
+}
+
+define i32 @add_negative_high_bound_accept(i32 %a) nounwind {
+; RV32I-LABEL: add_negative_high_bound_accept:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, -2048
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add_negative_high_bound_accept:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -2048
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: ret
+ %1 = add i32 %a, -2049
+ ret i32 %1
+}
+
+define i32 @add_negative_low_bound_accept(i32 %a) nounwind {
+; RV32I-LABEL: add_negative_low_bound_accept:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, -2048
+; RV32I-NEXT: addi a0, a0, -2048
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add_negative_low_bound_accept:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, -2048
+; RV64I-NEXT: addi a0, a0, -2048
+; RV64I-NEXT: ret
+ %1 = add i32 %a, -4096
+ ret i32 %1
+}
+
+define i32 @add_negative_low_bound_reject(i32 %a) nounwind {
+; RV32I-LABEL: add_negative_low_bound_reject:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a1, 1048575
+; RV32I-NEXT: addi a1, a1, -1
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add_negative_low_bound_reject:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a1, 1048575
+; RV64I-NEXT: addiw a1, a1, -1
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+ %1 = add i32 %a, -4097
+ ret i32 %1
+}
+
+define i32 @add32_accept(i32 %a) nounwind {
+; RV32I-LABEL: add32_accept:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, 2047
+; RV32I-NEXT: addi a0, a0, 952
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add32_accept:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: addi a0, a0, 952
+; RV64I-NEXT: ret
+ %1 = add i32 %a, 2999
+ ret i32 %1
+}
+
+define signext i32 @add32_sext_accept(i32 signext %a) nounwind {
+; RV32I-LABEL: add32_sext_accept:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi a0, a0, 2047
+; RV32I-NEXT: addi a0, a0, 952
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add32_sext_accept:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: addiw a0, a0, 952
+; RV64I-NEXT: ret
+ %1 = add i32 %a, 2999
+ ret i32 %1
+}
+
+@gv0 = global i32 0, align 4
+define signext i32 @add32_sext_reject_on_rv64(i32 signext %a) nounwind {
+; RV32I-LABEL: add32_sext_reject_on_rv64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a1, %hi(gv0)
+; RV32I-NEXT: addi a0, a0, 2047
+; RV32I-NEXT: addi a0, a0, 953
+; RV32I-NEXT: sw a0, %lo(gv0)(a1)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add32_sext_reject_on_rv64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a1, %hi(gv0)
+; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: addiw a0, a0, 953
+; RV64I-NEXT: sw a0, %lo(gv0)(a1)
+; RV64I-NEXT: ret
+ %b = add nsw i32 %a, 3000
+ store i32 %b, ptr @gv0, align 4
+ ret i32 %b
+}
+
+define i64 @add64_accept(i64 %a) nounwind {
+; RV32I-LABEL: add64_accept:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a2, 1
+; RV32I-NEXT: addi a2, a2, -1097
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: sltu a2, a0, a2
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add64_accept:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: addi a0, a0, 952
+; RV64I-NEXT: ret
+ %1 = add i64 %a, 2999
+ ret i64 %1
+}
+
+@ga = global i32 0, align 4
+@gb = global i32 0, align 4
+define void @add32_reject() nounwind {
+; RV32I-LABEL: add32_reject:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a0, %hi(ga)
+; RV32I-NEXT: lui a1, %hi(gb)
+; RV32I-NEXT: lw a2, %lo(ga)(a0)
+; RV32I-NEXT: lw a3, %lo(gb)(a1)
+; RV32I-NEXT: lui a4, 1
+; RV32I-NEXT: addi a4, a4, -1096
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: add a3, a3, a4
+; RV32I-NEXT: sw a2, %lo(ga)(a0)
+; RV32I-NEXT: sw a3, %lo(gb)(a1)
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: add32_reject:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a0, %hi(ga)
+; RV64I-NEXT: lui a1, %hi(gb)
+; RV64I-NEXT: lw a2, %lo(ga)(a0)
+; RV64I-NEXT: lw a3, %lo(gb)(a1)
+; RV64I-NEXT: lui a4, 1
+; RV64I-NEXT: addi a4, a4, -1096
+; RV64I-NEXT: add a2, a2, a4
+; RV64I-NEXT: add a3, a3, a4
+; RV64I-NEXT: sw a2, %lo(ga)(a0)
+; RV64I-NEXT: sw a3, %lo(gb)(a1)
+; RV64I-NEXT: ret
+ %1 = load i32, ptr @ga, align 4
+ %2 = load i32, ptr @gb, align 4
+ %3 = add i32 %1, 3000
+ %4 = add i32 %2, 3000
+ store i32 %3, ptr @ga, align 4
+ store i32 %4, ptr @gb, align 4
+ ret void
+}
|
You can diff the add-imm.ll and the GISel/add-imm.ll they are different but I believe they are correct. The only difference seems to be that the GISel path seems to be favoring addiw over addi. In this test case there is no difference due to the value of the constants. @benshi001 or @topperc I had one question about the add64_accept test. I don't really understand what the point of the sltu instruction in that cases is but both GISel and ISel have that instruction. It seems like it shouldn't be there in either or am I missing a overflow check semantic or something? |
That test does 64-bit arithmetic. On RV32, the registers are only 32 bits wide so we have to split the addition. The sltu is determining if there was a carry out of the low half because RISC-V doesn't have flags. So we have to manually detect overflow by checking if the result of the low half add is smaller than one of the inputs. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could you add a pre-commit to this PR with the test file so we can see how the optimization changes things?
Please add a description of the optimization in the PR description. |
Signed-off-by: Luke Quinn <[email protected]>
Signed-off-by: Luke Quinn <[email protected]>
2c21ea6
to
48b3c2a
Compare
ditto |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Porting ISel AddiPair Optimization to GISel:
Check if (add r, imm) can be optimized to (ADDI (ADDI r, imm0), imm1), in which imm = imm0 + imml and both imm0 and imm1 are simm12. We make imm0 as large as possible and imm1 as small as possible so that we might be able to use c.addi for the small immediate.