-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[DAG] SimplifyMultipleUseDemandedBits - bypass ADD nodes if either operand is zero #112588
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-aarch64 @llvm/pr-subscribers-backend-powerpc Author: Simon Pilgrim (RKSimon) ChangesThe dpbusd_const.ll test change is due to use losing the expanded add reduction pattern as one of the elements is known to be zero (removing one of the adds from the reduction pyramid). I don't think its of concern. Noticed while working on #107423 Patch is 57.65 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/112588.diff 18 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 40f030d7b936f7..403018534a2040 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -793,6 +793,16 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits(
return Op.getOperand(1);
break;
}
+ case ISD::ADD: {
+ RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
+ if (RHSKnown.isZero())
+ return Op.getOperand(0);
+
+ LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ if (LHSKnown.isZero())
+ return Op.getOperand(1);
+ break;
+ }
case ISD::SHL: {
// If we are only demanding sign bits then we can use the shift source
// directly.
diff --git a/llvm/test/CodeGen/AArch64/srem-lkk.ll b/llvm/test/CodeGen/AArch64/srem-lkk.ll
index 5ff178937ebbfb..1223ae3a15e7bc 100644
--- a/llvm/test/CodeGen/AArch64/srem-lkk.ll
+++ b/llvm/test/CodeGen/AArch64/srem-lkk.ll
@@ -4,14 +4,14 @@
define i32 @fold_srem_positive_odd(i32 %x) {
; CHECK-LABEL: fold_srem_positive_odd:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #37253
+; CHECK-NEXT: mov w8, #37253 // =0x9185
; CHECK-NEXT: movk w8, #44150, lsl #16
; CHECK-NEXT: smull x8, w0, w8
; CHECK-NEXT: lsr x8, x8, #32
; CHECK-NEXT: add w8, w8, w0
; CHECK-NEXT: asr w9, w8, #6
; CHECK-NEXT: add w8, w9, w8, lsr #31
-; CHECK-NEXT: mov w9, #95
+; CHECK-NEXT: mov w9, #95 // =0x5f
; CHECK-NEXT: msub w0, w8, w9, w0
; CHECK-NEXT: ret
%1 = srem i32 %x, 95
@@ -22,13 +22,12 @@ define i32 @fold_srem_positive_odd(i32 %x) {
define i32 @fold_srem_positive_even(i32 %x) {
; CHECK-LABEL: fold_srem_positive_even:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #36849
+; CHECK-NEXT: mov w8, #36849 // =0x8ff1
+; CHECK-NEXT: mov w9, #1060 // =0x424
; CHECK-NEXT: movk w8, #15827, lsl #16
; CHECK-NEXT: smull x8, w0, w8
-; CHECK-NEXT: lsr x9, x8, #63
; CHECK-NEXT: asr x8, x8, #40
-; CHECK-NEXT: add w8, w8, w9
-; CHECK-NEXT: mov w9, #1060
+; CHECK-NEXT: add w8, w8, w8, lsr #31
; CHECK-NEXT: msub w0, w8, w9, w0
; CHECK-NEXT: ret
%1 = srem i32 %x, 1060
@@ -39,13 +38,12 @@ define i32 @fold_srem_positive_even(i32 %x) {
define i32 @fold_srem_negative_odd(i32 %x) {
; CHECK-LABEL: fold_srem_negative_odd:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #65445
+; CHECK-NEXT: mov w8, #65445 // =0xffa5
+; CHECK-NEXT: mov w9, #-723 // =0xfffffd2d
; CHECK-NEXT: movk w8, #42330, lsl #16
; CHECK-NEXT: smull x8, w0, w8
-; CHECK-NEXT: lsr x9, x8, #63
; CHECK-NEXT: asr x8, x8, #40
-; CHECK-NEXT: add w8, w8, w9
-; CHECK-NEXT: mov w9, #-723
+; CHECK-NEXT: add w8, w8, w8, lsr #31
; CHECK-NEXT: msub w0, w8, w9, w0
; CHECK-NEXT: ret
%1 = srem i32 %x, -723
@@ -56,13 +54,12 @@ define i32 @fold_srem_negative_odd(i32 %x) {
define i32 @fold_srem_negative_even(i32 %x) {
; CHECK-LABEL: fold_srem_negative_even:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #62439
+; CHECK-NEXT: mov w8, #62439 // =0xf3e7
+; CHECK-NEXT: mov w9, #-22981 // =0xffffa63b
; CHECK-NEXT: movk w8, #64805, lsl #16
; CHECK-NEXT: smull x8, w0, w8
-; CHECK-NEXT: lsr x9, x8, #63
; CHECK-NEXT: asr x8, x8, #40
-; CHECK-NEXT: add w8, w8, w9
-; CHECK-NEXT: mov w9, #-22981
+; CHECK-NEXT: add w8, w8, w8, lsr #31
; CHECK-NEXT: msub w0, w8, w9, w0
; CHECK-NEXT: ret
%1 = srem i32 %x, -22981
@@ -74,14 +71,14 @@ define i32 @fold_srem_negative_even(i32 %x) {
define i32 @combine_srem_sdiv(i32 %x) {
; CHECK-LABEL: combine_srem_sdiv:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #37253
+; CHECK-NEXT: mov w8, #37253 // =0x9185
; CHECK-NEXT: movk w8, #44150, lsl #16
; CHECK-NEXT: smull x8, w0, w8
; CHECK-NEXT: lsr x8, x8, #32
; CHECK-NEXT: add w8, w8, w0
; CHECK-NEXT: asr w9, w8, #6
; CHECK-NEXT: add w8, w9, w8, lsr #31
-; CHECK-NEXT: mov w9, #95
+; CHECK-NEXT: mov w9, #95 // =0x5f
; CHECK-NEXT: msub w9, w8, w9, w0
; CHECK-NEXT: add w0, w9, w8
; CHECK-NEXT: ret
@@ -95,14 +92,14 @@ define i32 @combine_srem_sdiv(i32 %x) {
define i64 @dont_fold_srem_i64(i64 %x) {
; CHECK-LABEL: dont_fold_srem_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov x8, #58849
+; CHECK-NEXT: mov x8, #58849 // =0xe5e1
; CHECK-NEXT: movk x8, #48148, lsl #16
; CHECK-NEXT: movk x8, #33436, lsl #32
; CHECK-NEXT: movk x8, #21399, lsl #48
; CHECK-NEXT: smulh x8, x0, x8
; CHECK-NEXT: asr x9, x8, #5
; CHECK-NEXT: add x8, x9, x8, lsr #63
-; CHECK-NEXT: mov w9, #98
+; CHECK-NEXT: mov w9, #98 // =0x62
; CHECK-NEXT: msub x0, x8, x9, x0
; CHECK-NEXT: ret
%1 = srem i64 %x, 98
diff --git a/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll b/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll
index a74f0c86fe1859..b165ac0d56d20f 100644
--- a/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll
@@ -263,16 +263,14 @@ define <2 x i32> @fold_srem_v2i32(<2 x i32> %x) {
; CHECK-LABEL: fold_srem_v2i32:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #26215 // =0x6667
-; CHECK-NEXT: movi v3.2s, #10
+; CHECK-NEXT: movi v2.2s, #10
; CHECK-NEXT: movk w8, #26214, lsl #16
; CHECK-NEXT: dup v1.2s, w8
; CHECK-NEXT: smull v1.2d, v0.2s, v1.2s
-; CHECK-NEXT: ushr v2.2d, v1.2d, #63
; CHECK-NEXT: sshr v1.2d, v1.2d, #34
-; CHECK-NEXT: xtn v2.2s, v2.2d
; CHECK-NEXT: xtn v1.2s, v1.2d
-; CHECK-NEXT: add v1.2s, v1.2s, v2.2s
-; CHECK-NEXT: mls v0.2s, v1.2s, v3.2s
+; CHECK-NEXT: usra v1.2s, v1.2s, #31
+; CHECK-NEXT: mls v0.2s, v1.2s, v2.2s
; CHECK-NEXT: ret
%1 = srem <2 x i32> %x, <i32 10, i32 10>
ret <2 x i32> %1
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index 1622f498dce65a..122fb6242569c6 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -39,8 +39,8 @@ define amdgpu_kernel void @srem_i16_7(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: s_waitcnt vmcnt(0)
; TAHITI-NEXT: v_readfirstlane_b32 s0, v0
; TAHITI-NEXT: s_mulk_i32 s0, 0x4925
-; TAHITI-NEXT: s_lshr_b32 s1, s0, 31
; TAHITI-NEXT: s_ashr_i32 s0, s0, 17
+; TAHITI-NEXT: s_bfe_u32 s1, s0, 0x1000f
; TAHITI-NEXT: s_add_i32 s0, s0, s1
; TAHITI-NEXT: s_mul_i32 s0, s0, 7
; TAHITI-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0
@@ -72,7 +72,7 @@ define amdgpu_kernel void @srem_i16_7(ptr addrspace(1) %out, ptr addrspace(1) %i
; EG: ; %bb.0:
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
; EG-NEXT: TEX 0 @6
-; EG-NEXT: ALU 22, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: ALU 23, @9, KC0[CB0:0-32], KC1[]
; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
; EG-NEXT: CF_END
; EG-NEXT: PAD
@@ -85,10 +85,11 @@ define amdgpu_kernel void @srem_i16_7(ptr addrspace(1) %out, ptr addrspace(1) %i
; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
; EG-NEXT: MULLO_INT * T0.Y, PV.W, literal.x,
; EG-NEXT: 18725(2.623931e-41), 0(0.000000e+00)
-; EG-NEXT: ASHR T0.W, PS, literal.x,
-; EG-NEXT: LSHR * T1.W, PS, literal.y,
-; EG-NEXT: 17(2.382207e-44), 31(4.344025e-44)
-; EG-NEXT: ADD_INT * T0.W, PV.W, PS,
+; EG-NEXT: ASHR * T0.W, PS, literal.x,
+; EG-NEXT: 17(2.382207e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_UINT * T1.W, PV.W, literal.x, 1,
+; EG-NEXT: 15(2.101948e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, T0.W, PV.W,
; EG-NEXT: MULLO_INT * T0.Y, PV.W, literal.x,
; EG-NEXT: 7(9.809089e-45), 0(0.000000e+00)
; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
diff --git a/llvm/test/CodeGen/PowerPC/ppc-32bit-build-vector.ll b/llvm/test/CodeGen/PowerPC/ppc-32bit-build-vector.ll
index 0171e27e80901d..ae23520094db67 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-32bit-build-vector.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-32bit-build-vector.ll
@@ -8,51 +8,38 @@
define dso_local fastcc void @BuildVectorICE() unnamed_addr {
; 32BIT-LABEL: BuildVectorICE:
; 32BIT: # %bb.0: # %entry
-; 32BIT-NEXT: stwu 1, -64(1)
-; 32BIT-NEXT: .cfi_def_cfa_offset 64
-; 32BIT-NEXT: li 4, .LCPI0_0@l
-; 32BIT-NEXT: lis 5, .LCPI0_0@ha
+; 32BIT-NEXT: stwu 1, -48(1)
+; 32BIT-NEXT: .cfi_def_cfa_offset 48
; 32BIT-NEXT: lxvw4x 34, 0, 3
-; 32BIT-NEXT: li 3, 0
-; 32BIT-NEXT: addi 6, 1, 48
-; 32BIT-NEXT: li 7, 0
-; 32BIT-NEXT: lxvw4x 35, 5, 4
+; 32BIT-NEXT: li 5, 0
+; 32BIT-NEXT: addi 3, 1, 16
; 32BIT-NEXT: addi 4, 1, 32
-; 32BIT-NEXT: addi 5, 1, 16
-; 32BIT-NEXT: .p2align 4
+; 32BIT-NEXT: xxspltw 35, 34, 1
+; 32BIT-NEXT: .p2align 5
; 32BIT-NEXT: .LBB0_1: # %while.body
; 32BIT-NEXT: #
-; 32BIT-NEXT: stw 3, 32(1)
-; 32BIT-NEXT: stw 7, 16(1)
-; 32BIT-NEXT: lxvw4x 36, 0, 4
-; 32BIT-NEXT: lxvw4x 37, 0, 5
-; 32BIT-NEXT: vperm 4, 5, 4, 3
+; 32BIT-NEXT: stw 5, 16(1)
+; 32BIT-NEXT: lxvw4x 36, 0, 3
; 32BIT-NEXT: vadduwm 4, 2, 4
-; 32BIT-NEXT: xxspltw 37, 36, 1
-; 32BIT-NEXT: vadduwm 4, 4, 5
-; 32BIT-NEXT: stxvw4x 36, 0, 6
-; 32BIT-NEXT: lwz 7, 48(1)
+; 32BIT-NEXT: vadduwm 4, 4, 3
+; 32BIT-NEXT: stxvw4x 36, 0, 4
+; 32BIT-NEXT: lwz 5, 32(1)
; 32BIT-NEXT: b .LBB0_1
;
; 64BIT-LABEL: BuildVectorICE:
; 64BIT: # %bb.0: # %entry
; 64BIT-NEXT: lxvw4x 34, 0, 3
; 64BIT-NEXT: li 3, 0
-; 64BIT-NEXT: rldimi 3, 3, 32, 0
-; 64BIT-NEXT: mtfprd 0, 3
-; 64BIT-NEXT: li 3, 0
-; 64BIT-NEXT: .p2align 4
+; 64BIT-NEXT: xxspltw 35, 34, 1
+; 64BIT-NEXT: .p2align 5
; 64BIT-NEXT: .LBB0_1: # %while.body
; 64BIT-NEXT: #
-; 64BIT-NEXT: li 4, 0
-; 64BIT-NEXT: rldimi 4, 3, 32, 0
-; 64BIT-NEXT: mtfprd 1, 4
-; 64BIT-NEXT: xxmrghd 35, 1, 0
-; 64BIT-NEXT: vadduwm 3, 2, 3
-; 64BIT-NEXT: xxspltw 36, 35, 1
-; 64BIT-NEXT: vadduwm 3, 3, 4
-; 64BIT-NEXT: xxsldwi 1, 35, 35, 3
-; 64BIT-NEXT: mffprwz 3, 1
+; 64BIT-NEXT: sldi 3, 3, 32
+; 64BIT-NEXT: mtvsrd 36, 3
+; 64BIT-NEXT: vadduwm 4, 2, 4
+; 64BIT-NEXT: vadduwm 4, 4, 3
+; 64BIT-NEXT: xxsldwi 0, 36, 36, 3
+; 64BIT-NEXT: mffprwz 3, 0
; 64BIT-NEXT: b .LBB0_1
entry:
br label %while.body
diff --git a/llvm/test/CodeGen/RISCV/div-by-constant.ll b/llvm/test/CodeGen/RISCV/div-by-constant.ll
index 91ac7c5ddae3ff..7644619a61aa8a 100644
--- a/llvm/test/CodeGen/RISCV/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/div-by-constant.ll
@@ -310,8 +310,8 @@ define i32 @sdiv_constant_srai(i32 %a) nounwind {
; RV64-NEXT: lui a1, 419430
; RV64-NEXT: addiw a1, a1, 1639
; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: srli a1, a0, 63
; RV64-NEXT: srai a0, a0, 33
+; RV64-NEXT: srliw a1, a0, 31
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: ret
%1 = sdiv i32 %a, 5
@@ -755,8 +755,9 @@ define i16 @sdiv16_constant_srai(i16 %a) nounwind {
; RV32IM-NEXT: lui a1, 6
; RV32IM-NEXT: addi a1, a1, 1639
; RV32IM-NEXT: mul a0, a0, a1
-; RV32IM-NEXT: srli a1, a0, 31
; RV32IM-NEXT: srai a0, a0, 17
+; RV32IM-NEXT: slli a1, a0, 16
+; RV32IM-NEXT: srli a1, a1, 31
; RV32IM-NEXT: add a0, a0, a1
; RV32IM-NEXT: ret
;
@@ -766,8 +767,9 @@ define i16 @sdiv16_constant_srai(i16 %a) nounwind {
; RV32IMZB-NEXT: lui a1, 6
; RV32IMZB-NEXT: addi a1, a1, 1639
; RV32IMZB-NEXT: mul a0, a0, a1
-; RV32IMZB-NEXT: srli a1, a0, 31
; RV32IMZB-NEXT: srai a0, a0, 17
+; RV32IMZB-NEXT: slli a1, a0, 16
+; RV32IMZB-NEXT: srli a1, a1, 31
; RV32IMZB-NEXT: add a0, a0, a1
; RV32IMZB-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index f4e67698473151..c84fea3518ff45 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -640,8 +640,8 @@ define i32 @sdiv_constant(i32 %a) nounwind {
; RV64IM-NEXT: lui a1, 419430
; RV64IM-NEXT: addiw a1, a1, 1639
; RV64IM-NEXT: mul a0, a0, a1
-; RV64IM-NEXT: srli a1, a0, 63
; RV64IM-NEXT: srai a0, a0, 33
+; RV64IM-NEXT: srliw a1, a0, 31
; RV64IM-NEXT: add a0, a0, a1
; RV64IM-NEXT: ret
%1 = sdiv i32 %a, 5
@@ -1169,8 +1169,9 @@ define i16 @sdiv16_constant(i16 %a) nounwind {
; RV32IM-NEXT: lui a1, 6
; RV32IM-NEXT: addi a1, a1, 1639
; RV32IM-NEXT: mul a0, a0, a1
-; RV32IM-NEXT: srli a1, a0, 31
; RV32IM-NEXT: srai a0, a0, 17
+; RV32IM-NEXT: slli a1, a0, 16
+; RV32IM-NEXT: srli a1, a1, 31
; RV32IM-NEXT: add a0, a0, a1
; RV32IM-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index d309da6df7dc70..8b88defdc169f5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -1004,8 +1004,8 @@ define i32 @extractelt_sdiv_v4i32(<4 x i32> %x) {
; RV64M-NEXT: lui a1, 322639
; RV64M-NEXT: addiw a1, a1, -945
; RV64M-NEXT: mul a0, a0, a1
-; RV64M-NEXT: srli a1, a0, 63
; RV64M-NEXT: srai a0, a0, 34
+; RV64M-NEXT: srliw a1, a0, 31
; RV64M-NEXT: add a0, a0, a1
; RV64M-NEXT: ret
%bo = sdiv <4 x i32> %x, <i32 11, i32 12, i32 13, i32 14>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 9c6ec6aef60347..353886da2ab9da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -13487,7 +13487,6 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
; RV32ZVE32F-NEXT: vid.v v8
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 4
; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: lw a3, 0(a1)
; RV32ZVE32F-NEXT: sw a3, 252(sp) # 4-byte Folded Spill
; RV32ZVE32F-NEXT: lw a1, 4(a1)
@@ -13587,10 +13586,10 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
; RV32ZVE32F-NEXT: lw s9, 4(a1)
; RV32ZVE32F-NEXT: lw s10, 0(a2)
; RV32ZVE32F-NEXT: lw s11, 4(a2)
-; RV32ZVE32F-NEXT: lw t5, 0(a3)
-; RV32ZVE32F-NEXT: lw t6, 4(a3)
-; RV32ZVE32F-NEXT: lw s2, 0(a4)
-; RV32ZVE32F-NEXT: lw s3, 4(a4)
+; RV32ZVE32F-NEXT: lw s4, 0(a3)
+; RV32ZVE32F-NEXT: lw s5, 4(a3)
+; RV32ZVE32F-NEXT: lw s6, 0(a4)
+; RV32ZVE32F-NEXT: lw s7, 4(a4)
; RV32ZVE32F-NEXT: lw a2, 336(sp)
; RV32ZVE32F-NEXT: lw a4, 340(sp)
; RV32ZVE32F-NEXT: lw a5, 344(sp)
@@ -13607,8 +13606,8 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
; RV32ZVE32F-NEXT: lw a6, 356(sp)
; RV32ZVE32F-NEXT: lw t3, 360(sp)
; RV32ZVE32F-NEXT: lw t4, 364(sp)
-; RV32ZVE32F-NEXT: lw s4, 0(a5)
-; RV32ZVE32F-NEXT: sw s4, 116(sp) # 4-byte Folded Spill
+; RV32ZVE32F-NEXT: lw t5, 0(a5)
+; RV32ZVE32F-NEXT: sw t5, 116(sp) # 4-byte Folded Spill
; RV32ZVE32F-NEXT: lw a5, 4(a5)
; RV32ZVE32F-NEXT: sw a5, 112(sp) # 4-byte Folded Spill
; RV32ZVE32F-NEXT: lw a5, 0(a6)
@@ -13626,10 +13625,10 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
; RV32ZVE32F-NEXT: lw a6, 372(sp)
; RV32ZVE32F-NEXT: lw t3, 376(sp)
; RV32ZVE32F-NEXT: lw t4, 380(sp)
-; RV32ZVE32F-NEXT: lw s4, 0(a5)
-; RV32ZVE32F-NEXT: lw s5, 4(a5)
-; RV32ZVE32F-NEXT: lw s6, 0(a6)
-; RV32ZVE32F-NEXT: lw s7, 4(a6)
+; RV32ZVE32F-NEXT: lw t5, 0(a5)
+; RV32ZVE32F-NEXT: lw t6, 4(a5)
+; RV32ZVE32F-NEXT: lw s2, 0(a6)
+; RV32ZVE32F-NEXT: lw s3, 4(a6)
; RV32ZVE32F-NEXT: lw a5, 0(t3)
; RV32ZVE32F-NEXT: lw a6, 4(t3)
; RV32ZVE32F-NEXT: lw t3, 0(t4)
@@ -13642,10 +13641,10 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
; RV32ZVE32F-NEXT: sw t0, 164(a0)
; RV32ZVE32F-NEXT: sw t1, 168(a0)
; RV32ZVE32F-NEXT: sw t2, 172(a0)
-; RV32ZVE32F-NEXT: sw t5, 144(a0)
-; RV32ZVE32F-NEXT: sw t6, 148(a0)
-; RV32ZVE32F-NEXT: sw s2, 152(a0)
-; RV32ZVE32F-NEXT: sw s3, 156(a0)
+; RV32ZVE32F-NEXT: sw s4, 144(a0)
+; RV32ZVE32F-NEXT: sw s5, 148(a0)
+; RV32ZVE32F-NEXT: sw s6, 152(a0)
+; RV32ZVE32F-NEXT: sw s7, 156(a0)
; RV32ZVE32F-NEXT: sw s8, 128(a0)
; RV32ZVE32F-NEXT: sw s9, 132(a0)
; RV32ZVE32F-NEXT: sw s10, 136(a0)
@@ -13686,10 +13685,10 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
; RV32ZVE32F-NEXT: sw a6, 244(a0)
; RV32ZVE32F-NEXT: sw t3, 248(a0)
; RV32ZVE32F-NEXT: sw t4, 252(a0)
-; RV32ZVE32F-NEXT: sw s4, 224(a0)
-; RV32ZVE32F-NEXT: sw s5, 228(a0)
-; RV32ZVE32F-NEXT: sw s6, 232(a0)
-; RV32ZVE32F-NEXT: sw s7, 236(a0)
+; RV32ZVE32F-NEXT: sw t5, 224(a0)
+; RV32ZVE32F-NEXT: sw t6, 228(a0)
+; RV32ZVE32F-NEXT: sw s2, 232(a0)
+; RV32ZVE32F-NEXT: sw s3, 236(a0)
; RV32ZVE32F-NEXT: sw ra, 208(a0)
; RV32ZVE32F-NEXT: lw a1, 108(sp) # 4-byte Folded Reload
; RV32ZVE32F-NEXT: sw a1, 212(a0)
diff --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index 7c291bbceedc6d..49a8ba9a0b2b9a 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -95,8 +95,8 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
; RV64IM-NEXT: lui a2, 253241
; RV64IM-NEXT: addiw a2, a2, -15
; RV64IM-NEXT: mul a1, a1, a2
-; RV64IM-NEXT: srli a2, a1, 63
; RV64IM-NEXT: srai a1, a1, 40
+; RV64IM-NEXT: srliw a2, a1, 31
; RV64IM-NEXT: add a1, a1, a2
; RV64IM-NEXT: li a2, 1060
; RV64IM-NEXT: mul a1, a1, a2
@@ -143,8 +143,8 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
; RV64IM-NEXT: lui a2, 677296
; RV64IM-NEXT: addiw a2, a2, -91
; RV64IM-NEXT: mul a1, a1, a2
-; RV64IM-NEXT: srli a2, a1, 63
; RV64IM-NEXT: srai a1, a1, 40
+; RV64IM-NEXT: srliw a2, a1, 31
; RV64IM-NEXT: add a1, a1, a2
; RV64IM-NEXT: li a2, -723
; RV64IM-NEXT: mul a1, a1, a2
@@ -194,8 +194,8 @@ define i32 @fold_srem_negative_even(i32 %x) nounwind {
; RV64IM-NEXT: lui a2, 1036895
; RV64IM-NEXT: addiw a2, a2, 999
; RV64IM-NEXT: mul a1, a1, a2
-; RV64IM-NEXT: srli a2, a1, 63
; RV64IM-NEXT: srai a1, a1, 40
+; RV64IM-NEXT: srliw a2, a1, 31
; RV64IM-NEXT: add a1, a1, a2
; RV64IM-NEXT: lui a2, 1048570
; RV64IM-NEXT: addi a2, a2, 1595
diff --git a/llvm/test/CodeGen/X86/combine-pmuldq.ll b/llvm/test/CodeGen/X86/combine-pmuldq.ll
index 2a52b9eabc7b48..4efde4b8d75397 100644
--- a/llvm/test/CodeGen/X86/combine-pmuldq.ll
+++ b/llvm/test/CodeGen/X86/combine-pmuldq.ll
@@ -203,46 +203,39 @@ define i32 @PR43159(ptr %a0) {
; SSE-LABEL: PR43159:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movdqa (%rdi), %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $1, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; SSE-NEXT: psubd %xmm2, %xmm0
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
+; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
-; SSE-NEXT: paddd %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: psrld $7, %xmm0
-; SSE-NEXT: psrld $6, %xmm2
-; SSE-NEXT: movd %xmm2, %edi
+; SSE-NEXT: pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE-NEXT: psrld $6, %xmm1
+; SSE-NEXT: movd %xmm1, %edi
; SSE-NEXT: pextrd $1, %xmm0, %esi
-; SSE-NEXT: pextrd $2, %xmm2, %edx
+; SSE-NEXT: pextrd $2, %xmm1, %edx
; SSE-NEXT: pextrd $3, %xmm0, %ecx
; SSE-NEXT: jmp foo # TAILCALL
;
; AVX1-LABEL: PR43159:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1...
[truncated]
|
e3caee4
to
2032751
Compare
66f97a1
to
4b1aa41
Compare
any more comments? |
4b1aa41
to
defc669
Compare
; RV32IM-NEXT: srai a0, a0, 17 | ||
; RV32IM-NEXT: slli a1, a0, 16 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this is also 1 -> 2 instructions?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes its the same regression
defc669
to
8c0b396
Compare
… down sign bit, try to shift down the MSB directly If we're only demanding the LSB of a SRL node and that is shifting down an extended sign bit, see if we can change the SRL to shift down the MSB directly. These patterns can occur during legalisation when we've sign extended to a wider type but the SRL is still shifting from the subreg. There's potentially a more general fold we could do here if we're just shifting a block of sign bits, but we only seem to currently benefit from demanding just the MSB, as this is a pretty common pattern for other folds. Fixes the remaining regression in llvm#112588
…t extraction If we're masking the LSB of a SRL node result and that is shifting down an extended sign bit, see if we can change the SRL to shift down the MSB directly. These patterns can occur during legalisation when we've sign extended to a wider type but the SRL is still shifting from the subreg. Alternative to llvm#114967 Fixes the remaining regression in llvm#112588
…t extraction (#114992) If we're masking the LSB of a SRL node result and that is shifting down an extended sign bit, see if we can change the SRL to shift down the MSB directly. These patterns can occur during legalisation when we've sign extended to a wider type but the SRL is still shifting from the subreg. Alternative to #114967 Fixes the remaining regression in #112588
…erand is zero The dpbusd_const.ll test change is due to use losing the expanded add reduction pattern as one of the elements is known to be zero (removing one of the adds from the reduction pyramid). I don't think its of concern. Noticed while working on llvm#107423
8c0b396
to
8a20acd
Compare
@@ -797,6 +797,16 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedBits( | |||
return Op.getOperand(1); | |||
break; | |||
} | |||
case ISD::ADD: { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should this also cover or and sub?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
OR is already handled more generally - I'll see if I can get (SUB X, 0) to match in any tests
…t extraction (llvm#114992) If we're masking the LSB of a SRL node result and that is shifting down an extended sign bit, see if we can change the SRL to shift down the MSB directly. These patterns can occur during legalisation when we've sign extended to a wider type but the SRL is still shifting from the subreg. Alternative to llvm#114967 Fixes the remaining regression in llvm#112588
The dpbusd_const.ll test change is due to us losing the expanded add reduction pattern as one of the elements is known to be zero (removing one of the adds from the reduction pyramid). I don't think its of concern.
Noticed while working on #107423