Skip to content

[RISCV] Add new tests for RISCV zicond extension #143580

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 12, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
289 changes: 289 additions & 0 deletions llvm/test/CodeGen/RISCV/zicond-opts.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,289 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -O2 -verify-machineinstrs -mattr=+b,+zicond < %s | FileCheck %s -check-prefix=RV32ZICOND
; RUN: llc -mtriple=riscv64 -O2 -verify-machineinstrs -mattr=+b,+zicond < %s | FileCheck %s -check-prefix=RV64ZICOND

; (and (icmp x. 0, ne), (icmp y, 0, ne)) -> (czero.eqz (icmp x, 0, ne), y)
define i32 @icmp_and(i64 %x, i64 %y) {
; RV32ZICOND-LABEL: icmp_and:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: or a2, a2, a3
; RV32ZICOND-NEXT: or a0, a0, a1
; RV32ZICOND-NEXT: snez a1, a2
; RV32ZICOND-NEXT: snez a0, a0
; RV32ZICOND-NEXT: and a0, a0, a1
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: icmp_and:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: snez a1, a1
; RV64ZICOND-NEXT: snez a0, a0
; RV64ZICOND-NEXT: and a0, a0, a1
; RV64ZICOND-NEXT: ret
%3 = icmp ne i64 %y, 0
%4 = icmp ne i64 %x, 0
%5 = and i1 %4, %3
%6 = zext i1 %5 to i32
ret i32 %6
}

; (and (and (icmp x, 0, ne), (icmp y, 0, ne)), (icmp z, 0, ne)) -> (czero.eqz (czero.eqz (icmp x, 0, ne), y), z)
define i32 @icmp_and_and(i64 %x, i64 %y, i64 %z) {
; RV32ZICOND-LABEL: icmp_and_and:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: or a2, a2, a3
; RV32ZICOND-NEXT: or a0, a0, a1
; RV32ZICOND-NEXT: or a4, a4, a5
; RV32ZICOND-NEXT: snez a1, a2
; RV32ZICOND-NEXT: snez a0, a0
; RV32ZICOND-NEXT: and a0, a1, a0
; RV32ZICOND-NEXT: snez a1, a4
; RV32ZICOND-NEXT: and a0, a1, a0
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: icmp_and_and:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: snez a1, a1
; RV64ZICOND-NEXT: snez a0, a0
; RV64ZICOND-NEXT: and a0, a1, a0
; RV64ZICOND-NEXT: snez a1, a2
; RV64ZICOND-NEXT: and a0, a1, a0
; RV64ZICOND-NEXT: ret
%4 = icmp ne i64 %y, 0
%5 = icmp ne i64 %x, 0
%6 = and i1 %4, %5
%7 = icmp ne i64 %z, 0
%8 = and i1 %7, %6
%9 = zext i1 %8 to i32
ret i32 %9
}

; (select cond, x, rotl(x, rot.amt)) -> (rotl x, (czero_nez rot.amt, cond))
define i64 @rotate_l_nez(i64 %x, i64 %rot.amt, i1 %cond) {
; RV32ZICOND-LABEL: rotate_l_nez:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: andi a4, a4, 1
; RV32ZICOND-NEXT: bexti a3, a2, 5
; RV32ZICOND-NEXT: not a5, a2
; RV32ZICOND-NEXT: czero.nez a6, a1, a3
; RV32ZICOND-NEXT: czero.eqz a7, a0, a3
; RV32ZICOND-NEXT: czero.nez t0, a0, a3
; RV32ZICOND-NEXT: czero.eqz a3, a1, a3
; RV32ZICOND-NEXT: czero.eqz a0, a0, a4
; RV32ZICOND-NEXT: czero.eqz a1, a1, a4
; RV32ZICOND-NEXT: or a6, a7, a6
; RV32ZICOND-NEXT: or a3, a3, t0
; RV32ZICOND-NEXT: sll a7, a6, a2
; RV32ZICOND-NEXT: srli t0, a3, 1
; RV32ZICOND-NEXT: sll a2, a3, a2
; RV32ZICOND-NEXT: srli a3, a6, 1
; RV32ZICOND-NEXT: srl a6, t0, a5
; RV32ZICOND-NEXT: srl a3, a3, a5
; RV32ZICOND-NEXT: or a5, a7, a6
; RV32ZICOND-NEXT: or a2, a2, a3
; RV32ZICOND-NEXT: czero.nez a2, a2, a4
; RV32ZICOND-NEXT: czero.nez a3, a5, a4
; RV32ZICOND-NEXT: or a0, a0, a2
; RV32ZICOND-NEXT: or a1, a1, a3
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: rotate_l_nez:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: andi a2, a2, 1
; RV64ZICOND-NEXT: rol a1, a0, a1
; RV64ZICOND-NEXT: czero.nez a1, a1, a2
; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
; RV64ZICOND-NEXT: or a0, a0, a1
; RV64ZICOND-NEXT: ret
%6 = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %rot.amt)
%7 = select i1 %cond, i64 %x, i64 %6
ret i64 %7
}

; (select cond, rotl(x, rot.amt), x) -> (rotl x, (czero_eqz rot.amt, cond))
define i64 @rotate_l_eqz(i64 %x, i64 %rot.amt, i1 %cond) {
; RV32ZICOND-LABEL: rotate_l_eqz:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: andi a4, a4, 1
; RV32ZICOND-NEXT: bexti a3, a2, 5
; RV32ZICOND-NEXT: not a5, a2
; RV32ZICOND-NEXT: czero.nez a6, a1, a3
; RV32ZICOND-NEXT: czero.eqz a7, a0, a3
; RV32ZICOND-NEXT: czero.nez t0, a0, a3
; RV32ZICOND-NEXT: czero.eqz a3, a1, a3
; RV32ZICOND-NEXT: czero.nez a0, a0, a4
; RV32ZICOND-NEXT: czero.nez a1, a1, a4
; RV32ZICOND-NEXT: or a6, a7, a6
; RV32ZICOND-NEXT: or a3, a3, t0
; RV32ZICOND-NEXT: sll a7, a6, a2
; RV32ZICOND-NEXT: srli t0, a3, 1
; RV32ZICOND-NEXT: sll a2, a3, a2
; RV32ZICOND-NEXT: srli a3, a6, 1
; RV32ZICOND-NEXT: srl a6, t0, a5
; RV32ZICOND-NEXT: srl a3, a3, a5
; RV32ZICOND-NEXT: or a5, a7, a6
; RV32ZICOND-NEXT: or a2, a2, a3
; RV32ZICOND-NEXT: czero.eqz a2, a2, a4
; RV32ZICOND-NEXT: czero.eqz a3, a5, a4
; RV32ZICOND-NEXT: or a0, a2, a0
; RV32ZICOND-NEXT: or a1, a3, a1
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: rotate_l_eqz:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: andi a2, a2, 1
; RV64ZICOND-NEXT: rol a1, a0, a1
; RV64ZICOND-NEXT: czero.nez a0, a0, a2
; RV64ZICOND-NEXT: czero.eqz a1, a1, a2
; RV64ZICOND-NEXT: or a0, a1, a0
; RV64ZICOND-NEXT: ret
%6 = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %rot.amt)
%7 = select i1 %cond, i64 %6, i64 %x
ret i64 %7
}

; (select cond, const, t) -> (add (czero_nez t - const, cond), const)
define i64 @select_imm_reg(i64 %t, i1 %cond) {
; RV32ZICOND-LABEL: select_imm_reg:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: andi a2, a2, 1
; RV32ZICOND-NEXT: li a3, 3
; RV32ZICOND-NEXT: czero.nez a0, a0, a2
; RV32ZICOND-NEXT: czero.eqz a3, a3, a2
; RV32ZICOND-NEXT: or a0, a3, a0
; RV32ZICOND-NEXT: czero.nez a1, a1, a2
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: select_imm_reg:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: andi a1, a1, 1
; RV64ZICOND-NEXT: li a2, 3
; RV64ZICOND-NEXT: czero.nez a0, a0, a1
; RV64ZICOND-NEXT: czero.eqz a1, a2, a1
; RV64ZICOND-NEXT: or a0, a1, a0
; RV64ZICOND-NEXT: ret
%4 = select i1 %cond, i64 3, i64 %t
ret i64 %4
}

; (select cond, t, const) -> (add (czero_eqz t - const, cond), const)
define i64 @select_reg_imm(i64 %t, i1 %cond) {
; RV32ZICOND-LABEL: select_reg_imm:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: andi a2, a2, 1
; RV32ZICOND-NEXT: li a3, 3
; RV32ZICOND-NEXT: czero.nez a3, a3, a2
; RV32ZICOND-NEXT: czero.eqz a0, a0, a2
; RV32ZICOND-NEXT: or a0, a0, a3
; RV32ZICOND-NEXT: czero.eqz a1, a1, a2
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: select_reg_imm:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: andi a1, a1, 1
; RV64ZICOND-NEXT: li a2, 3
; RV64ZICOND-NEXT: czero.nez a2, a2, a1
; RV64ZICOND-NEXT: czero.eqz a0, a0, a1
; RV64ZICOND-NEXT: or a0, a0, a2
; RV64ZICOND-NEXT: ret
%4 = select i1 %cond, i64 %t, i64 3
ret i64 %4
}

; (select cond, -2048, t) -> (xor (czero_nez (xor t, -2048), cond), -2048)
define i64 @select_imm_reg_neg_2048(i64 %t, i1 %cond) {
; RV32ZICOND-LABEL: select_imm_reg_neg_2048:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: andi a2, a2, 1
; RV32ZICOND-NEXT: li a3, -2048
; RV32ZICOND-NEXT: czero.nez a0, a0, a2
; RV32ZICOND-NEXT: czero.eqz a3, a3, a2
; RV32ZICOND-NEXT: neg a2, a2
; RV32ZICOND-NEXT: or a0, a3, a0
; RV32ZICOND-NEXT: or a1, a2, a1
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: select_imm_reg_neg_2048:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: andi a1, a1, 1
; RV64ZICOND-NEXT: li a2, -2048
; RV64ZICOND-NEXT: czero.nez a0, a0, a1
; RV64ZICOND-NEXT: czero.eqz a1, a2, a1
; RV64ZICOND-NEXT: or a0, a1, a0
; RV64ZICOND-NEXT: ret
%4 = select i1 %cond, i64 -2048, i64 %t
ret i64 %4
}

; (select cond, 2048, t) -> no transform
define i64 @select_imm_reg_2048(i64 %t, i1 %cond) {
; RV32ZICOND-LABEL: select_imm_reg_2048:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: andi a2, a2, 1
; RV32ZICOND-NEXT: bseti a3, zero, 11
; RV32ZICOND-NEXT: czero.nez a0, a0, a2
; RV32ZICOND-NEXT: czero.eqz a3, a3, a2
; RV32ZICOND-NEXT: or a0, a3, a0
; RV32ZICOND-NEXT: czero.nez a1, a1, a2
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: select_imm_reg_2048:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: andi a1, a1, 1
; RV64ZICOND-NEXT: bseti a2, zero, 11
; RV64ZICOND-NEXT: czero.nez a0, a0, a1
; RV64ZICOND-NEXT: czero.eqz a1, a2, a1
; RV64ZICOND-NEXT: or a0, a1, a0
; RV64ZICOND-NEXT: ret
%4 = select i1 %cond, i64 2048, i64 %t
ret i64 %4
}

; (select cond, (and f, ~x), f) -> (andn f, (czero_eqz x, cond))
define i64 @test_inv_and_nez(i64 %f, i64 %x, i1 %cond) {
; RV32ZICOND-LABEL: test_inv_and_nez:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: andi a4, a4, 1
; RV32ZICOND-NEXT: addi a4, a4, -1
; RV32ZICOND-NEXT: orn a3, a4, a3
; RV32ZICOND-NEXT: orn a2, a4, a2
; RV32ZICOND-NEXT: and a0, a2, a0
; RV32ZICOND-NEXT: and a1, a3, a1
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: test_inv_and_nez:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: andi a2, a2, 1
; RV64ZICOND-NEXT: andn a1, a0, a1
; RV64ZICOND-NEXT: czero.nez a0, a0, a2
; RV64ZICOND-NEXT: or a0, a1, a0
; RV64ZICOND-NEXT: ret
%5 = xor i64 %x, -1
%6 = select i1 %cond, i64 %5, i64 -1
%7 = and i64 %6, %f
ret i64 %7
}

; (select cond, f, (and f, ~x)) -> (andn f, (czero_nez x, cond))
define i64 @test_inv_and_eqz(i64 %f, i64 %x, i1 %cond) {
; RV32ZICOND-LABEL: test_inv_and_eqz:
; RV32ZICOND: # %bb.0:
; RV32ZICOND-NEXT: slli a4, a4, 31
; RV32ZICOND-NEXT: srai a4, a4, 31
; RV32ZICOND-NEXT: orn a3, a4, a3
; RV32ZICOND-NEXT: orn a2, a4, a2
; RV32ZICOND-NEXT: and a0, a2, a0
; RV32ZICOND-NEXT: and a1, a3, a1
; RV32ZICOND-NEXT: ret
;
; RV64ZICOND-LABEL: test_inv_and_eqz:
; RV64ZICOND: # %bb.0:
; RV64ZICOND-NEXT: andi a2, a2, 1
; RV64ZICOND-NEXT: andn a1, a0, a1
; RV64ZICOND-NEXT: czero.eqz a0, a0, a2
; RV64ZICOND-NEXT: or a0, a1, a0
; RV64ZICOND-NEXT: ret
%5 = xor i64 %x, -1
%6 = select i1 %cond, i64 -1, i64 %5
%7 = and i64 %6, %f
ret i64 %7
}