Skip to content

[RISCV] Enable Zbb ANDN/ORN/XNOR for more 64-bit constants #122698

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3216,17 +3216,18 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
bool RISCVDAGToDAGISel::selectInvLogicImm(SDValue N, SDValue &Val) {
if (!isa<ConstantSDNode>(N))
return false;

int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
if ((Imm & 0xfff) != 0xfff || Imm == -1)

// For 32-bit signed constants, we can only substitute LUI+ADDI with LUI.
if (isInt<32>(Imm) && ((Imm & 0xfff) != 0xfff || Imm == -1))
return false;

// Abandon this transform if the constant is needed elsewhere.
for (const SDNode *U : N->users()) {
if (!ISD::isBitwiseLogicOp(U->getOpcode()))
return false;
}

// For 32-bit signed constants we already know it's a win: LUI+ADDI vs LUI.
// For 64-bit constants, the instruction sequences get complex,
// so we select inverted only if it's cheaper.
if (!isInt<32>(Imm)) {
Expand Down
21 changes: 9 additions & 12 deletions llvm/test/CodeGen/RISCV/zbb-logic-neg-imm.ll
Original file line number Diff line number Diff line change
Expand Up @@ -330,10 +330,9 @@ define i64 @andnofff(i64 %x) {
;
; RV64-LABEL: andnofff:
; RV64: # %bb.0:
; RV64-NEXT: li a1, -1
; RV64-NEXT: slli a1, a1, 56
; RV64-NEXT: addi a1, a1, 255
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: lui a1, 1048560
; RV64-NEXT: srli a1, a1, 8
; RV64-NEXT: andn a0, a0, a1
; RV64-NEXT: ret
%and = and i64 %x, -72057594037927681
ret i64 %and
Expand All @@ -349,10 +348,9 @@ define i64 @ornofff(i64 %x) {
;
; NOZBS64-LABEL: ornofff:
; NOZBS64: # %bb.0:
; NOZBS64-NEXT: li a1, -1
; NOZBS64-NEXT: slli a1, a1, 63
; NOZBS64-NEXT: addi a1, a1, 2047
; NOZBS64-NEXT: or a0, a0, a1
; NOZBS64-NEXT: lui a1, 1048575
; NOZBS64-NEXT: srli a1, a1, 1
; NOZBS64-NEXT: orn a0, a0, a1
; NOZBS64-NEXT: ret
;
; ZBS32-LABEL: ornofff:
Expand Down Expand Up @@ -380,10 +378,9 @@ define i64 @xornofff(i64 %x) {
;
; RV64-LABEL: xornofff:
; RV64: # %bb.0:
; RV64-NEXT: li a1, -1
; RV64-NEXT: slli a1, a1, 60
; RV64-NEXT: addi a1, a1, 255
; RV64-NEXT: xor a0, a0, a1
; RV64-NEXT: lui a1, 1048575
; RV64-NEXT: srli a1, a1, 4
; RV64-NEXT: xnor a0, a0, a1
; RV64-NEXT: ret
%xor = xor i64 %x, -1152921504606846721
ret i64 %xor
Expand Down
Loading