-
Notifications
You must be signed in to change notification settings - Fork 14.3k
Revert "[SelectionDAG] Make (a & x) | (~a & y) -> (a & (x ^ y)) ^ y
available for all targets"
#143648
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
… availab…" This reverts commit bfb4836.
@llvm/pr-subscribers-backend-amdgpu @llvm/pr-subscribers-backend-x86 Author: Iris Shi (el-ev) ChangesReverts llvm/llvm-project#137641 Causing crash. Patch is 144.99 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/143648.diff 18 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b0da536a3b157..b65e8e06eae62 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -8128,59 +8128,6 @@ static SDValue visitORCommutative(SelectionDAG &DAG, SDValue N0, SDValue N1,
return SDValue();
}
-static SDValue foldMaskedMergeImpl(SDValue AndL0, SDValue AndR0, SDValue AndL1,
- SDValue AndR1, const SDLoc &DL,
- SelectionDAG &DAG) {
- if (!isBitwiseNot(AndL0, true) || !AndL0->hasOneUse())
- return SDValue();
- SDValue NotOp = AndL0->getOperand(0);
- if (NotOp == AndR1)
- std::swap(AndR1, AndL1);
- if (NotOp != AndL1)
- return SDValue();
-
- EVT VT = AndL1->getValueType(0);
- SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, AndR1, AndR0);
- SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
- SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, AndR0);
- return Xor1;
-}
-
-/// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
-/// equivalent `((x ^ y) & m) ^ y)` pattern.
-/// This is typically a better representation for targets without a fused
-/// "and-not" operation.
-static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG,
- const TargetLowering &TLI, const SDLoc &DL) {
- // Note that masked-merge variants using XOR or ADD expressions are
- // normalized to OR by InstCombine so we only check for OR.
- assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
- SDValue N0 = Node->getOperand(0);
- if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
- return SDValue();
- SDValue N1 = Node->getOperand(1);
- if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
- return SDValue();
-
- // If the target supports and-not, don't fold this.
- if (TLI.hasAndNot(SDValue(Node, 0)))
- return SDValue();
-
- SDValue N00 = N0->getOperand(0);
- SDValue N01 = N0->getOperand(1);
- SDValue N10 = N1->getOperand(0);
- SDValue N11 = N1->getOperand(1);
- if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
- return Result;
- if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
- return Result;
- if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
- return Result;
- if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
- return Result;
- return SDValue();
-}
-
SDValue DAGCombiner::visitOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -8359,10 +8306,6 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
if (SDValue R = foldLogicTreeOfShifts(N, N0, N1, DAG))
return R;
- if (VT.isScalarInteger() && VT != MVT::i1)
- if (SDValue R = foldMaskedMerge(N, DAG, TLI, DL))
- return R;
-
return SDValue();
}
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 1c59b1e63b7bc..f06246706aaa9 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1283,20 +1283,6 @@ bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
return true;
}
-bool SystemZTargetLowering::hasAndNot(SDValue Y) const {
- EVT VT = Y.getValueType();
-
- // We can use NC(G)RK for types in GPRs ...
- if (VT == MVT::i32 || VT == MVT::i64)
- return Subtarget.hasMiscellaneousExtensions3();
-
- // ... or VNC for types in VRs.
- if (VT.isVector() || VT == MVT::i128)
- return Subtarget.hasVector();
-
- return false;
-}
-
// Information about the addressing mode for a memory access.
struct AddressingMode {
// True if a long displacement is supported.
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index f2f0bf6d8b410..f3536a840fda8 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -671,7 +671,6 @@ class SystemZTargetLowering : public TargetLowering {
}
unsigned getStackProbeSize(const MachineFunction &MF) const;
- bool hasAndNot(SDValue Y) const override;
private:
const SystemZSubtarget &Subtarget;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 68da901c2f123..96be91256915d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -52350,6 +52350,59 @@ static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
}
+static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
+ SDValue And1_L, SDValue And1_R,
+ const SDLoc &DL, SelectionDAG &DAG) {
+ if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
+ return SDValue();
+ SDValue NotOp = And0_L->getOperand(0);
+ if (NotOp == And1_R)
+ std::swap(And1_R, And1_L);
+ if (NotOp != And1_L)
+ return SDValue();
+
+ // (~(NotOp) & And0_R) | (NotOp & And1_R)
+ // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
+ EVT VT = And1_L->getValueType(0);
+ SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
+ SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
+ SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
+ SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
+ return Xor1;
+}
+
+/// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
+/// equivalent `((x ^ y) & m) ^ y)` pattern.
+/// This is typically a better representation for targets without a fused
+/// "and-not" operation. This function is intended to be called from a
+/// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
+static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
+ // Note that masked-merge variants using XOR or ADD expressions are
+ // normalized to OR by InstCombine so we only check for OR.
+ assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
+ SDValue N0 = Node->getOperand(0);
+ if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
+ return SDValue();
+ SDValue N1 = Node->getOperand(1);
+ if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
+ return SDValue();
+
+ SDLoc DL(Node);
+ SDValue N00 = N0->getOperand(0);
+ SDValue N01 = N0->getOperand(1);
+ SDValue N10 = N1->getOperand(0);
+ SDValue N11 = N1->getOperand(1);
+ if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
+ return Result;
+ if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
+ return Result;
+ if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
+ return Result;
+ if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
+ return Result;
+ return SDValue();
+}
+
/// If this is an add or subtract where one operand is produced by a cmp+setcc,
/// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
/// with CMP+{ADC, SBB}.
@@ -52753,6 +52806,11 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
}
}
+ // We should fold "masked merge" patterns when `andn` is not available.
+ if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
+ if (SDValue R = foldMaskedMerge(N, DAG))
+ return R;
+
if (SDValue R = combineOrXorWithSETCC(N->getOpcode(), dl, VT, N0, N1, DAG))
return R;
diff --git a/llvm/test/CodeGen/AMDGPU/bfi_int.ll b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
index b372dec383344..201b97d479c68 100644
--- a/llvm/test/CodeGen/AMDGPU/bfi_int.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
@@ -16,9 +16,9 @@ define amdgpu_kernel void @s_bfi_def_i32(ptr addrspace(1) %out, i32 %x, i32 %y,
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: s_xor_b32 s1, s1, s2
+; GFX7-NEXT: s_andn2_b32 s2, s2, s0
; GFX7-NEXT: s_and_b32 s0, s1, s0
-; GFX7-NEXT: s_xor_b32 s0, s0, s2
+; GFX7-NEXT: s_or_b32 s0, s2, s0
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX7-NEXT: s_endpgm
@@ -28,9 +28,9 @@ define amdgpu_kernel void @s_bfi_def_i32(ptr addrspace(1) %out, i32 %x, i32 %y,
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_xor_b32 s1, s1, s2
+; GFX8-NEXT: s_andn2_b32 s2, s2, s0
; GFX8-NEXT: s_and_b32 s0, s1, s0
-; GFX8-NEXT: s_xor_b32 s0, s0, s2
+; GFX8-NEXT: s_or_b32 s0, s2, s0
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v2, s0
@@ -44,9 +44,9 @@ define amdgpu_kernel void @s_bfi_def_i32(ptr addrspace(1) %out, i32 %x, i32 %y,
; GFX10-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
; GFX10-NEXT: v_mov_b32_e32 v0, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_xor_b32 s1, s1, s2
+; GFX10-NEXT: s_andn2_b32 s2, s2, s0
; GFX10-NEXT: s_and_b32 s0, s1, s0
-; GFX10-NEXT: s_xor_b32 s0, s0, s2
+; GFX10-NEXT: s_or_b32 s0, s2, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s0
; GFX10-NEXT: global_store_dword v0, v1, s[4:5]
; GFX10-NEXT: s_endpgm
@@ -1407,9 +1407,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
-; GFX7-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
-; GFX7-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX7-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
+; GFX7-NEXT: s_andn2_b64 s[0:1], s[4:5], s[0:1]
+; GFX7-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; GFX7-NEXT: s_add_u32 s0, s0, 10
; GFX7-NEXT: s_addc_u32 s1, s1, 0
; GFX7-NEXT: v_mov_b32_e32 v0, s0
@@ -1422,9 +1422,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
-; GFX8-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
-; GFX8-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX8-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
+; GFX8-NEXT: s_andn2_b64 s[0:1], s[4:5], s[0:1]
+; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; GFX8-NEXT: s_add_u32 s0, s0, 10
; GFX8-NEXT: s_addc_u32 s1, s1, 0
; GFX8-NEXT: v_mov_b32_e32 v0, s0
@@ -1438,9 +1438,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX10-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
-; GFX10-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
-; GFX10-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX10-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[4:5], s[0:1]
+; GFX10-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; GFX10-NEXT: s_add_u32 s0, s0, 10
; GFX10-NEXT: s_addc_u32 s1, s1, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s0
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
index e1b4cad370f96..6925a98f643b9 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
@@ -289,16 +289,16 @@ entry:
define amdgpu_kernel void @half4_inselt(ptr addrspace(1) %out, <4 x half> %vec, i32 %sel) {
; GCN-LABEL: half4_inselt:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_mov_b32 s4, 0x3c003c00
; GCN-NEXT: s_mov_b32 s5, s4
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_xor_b64 s[4:5], s[2:3], s[4:5]
; GCN-NEXT: s_lshl_b32 s6, s6, 4
; GCN-NEXT: s_lshl_b64 s[6:7], 0xffff, s6
-; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
+; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
+; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GCN-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3]
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: v_mov_b32_e32 v1, s1
@@ -317,10 +317,10 @@ define amdgpu_kernel void @half2_inselt(ptr addrspace(1) %out, <2 x half> %vec,
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshl_b32 s3, s3, 4
-; GCN-NEXT: s_xor_b32 s4, s2, 0x3c003c00
; GCN-NEXT: s_lshl_b32 s3, 0xffff, s3
-; GCN-NEXT: s_and_b32 s3, s4, s3
-; GCN-NEXT: s_xor_b32 s2, s3, s2
+; GCN-NEXT: s_andn2_b32 s2, s2, s3
+; GCN-NEXT: s_and_b32 s3, s3, 0x3c003c00
+; GCN-NEXT: s_or_b32 s2, s3, s2
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v1, s1
; GCN-NEXT: v_mov_b32_e32 v2, s2
@@ -399,10 +399,10 @@ define amdgpu_kernel void @short2_inselt(ptr addrspace(1) %out, <2 x i16> %vec,
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshl_b32 s3, s3, 4
-; GCN-NEXT: s_xor_b32 s4, s2, 0x10001
; GCN-NEXT: s_lshl_b32 s3, 0xffff, s3
-; GCN-NEXT: s_and_b32 s3, s4, s3
-; GCN-NEXT: s_xor_b32 s2, s3, s2
+; GCN-NEXT: s_andn2_b32 s2, s2, s3
+; GCN-NEXT: s_and_b32 s3, s3, 0x10001
+; GCN-NEXT: s_or_b32 s2, s3, s2
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v1, s1
; GCN-NEXT: v_mov_b32_e32 v2, s2
@@ -417,16 +417,16 @@ entry:
define amdgpu_kernel void @short4_inselt(ptr addrspace(1) %out, <4 x i16> %vec, i32 %sel) {
; GCN-LABEL: short4_inselt:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_mov_b32 s4, 0x10001
; GCN-NEXT: s_mov_b32 s5, s4
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_xor_b64 s[4:5], s[2:3], s[4:5]
; GCN-NEXT: s_lshl_b32 s6, s6, 4
; GCN-NEXT: s_lshl_b64 s[6:7], 0xffff, s6
-; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
+; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
+; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GCN-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3]
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: v_mov_b32_e32 v1, s1
@@ -442,15 +442,15 @@ entry:
define amdgpu_kernel void @byte8_inselt(ptr addrspace(1) %out, <8 x i8> %vec, i32 %sel) {
; GCN-LABEL: byte8_inselt:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_xor_b32 s5, s3, 0x1010101
-; GCN-NEXT: s_lshl_b32 s6, s6, 3
-; GCN-NEXT: s_xor_b32 s4, s2, 0x1010101
-; GCN-NEXT: s_lshl_b64 s[6:7], 0xff, s6
-; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
+; GCN-NEXT: s_lshl_b32 s4, s6, 3
+; GCN-NEXT: s_lshl_b64 s[4:5], 0xff, s4
+; GCN-NEXT: s_and_b32 s7, s5, 0x1010101
+; GCN-NEXT: s_and_b32 s6, s4, 0x1010101
+; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GCN-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3]
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: v_mov_b32_e32 v1, s1
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 44bd4090436ef..be16fac4c53f7 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -1511,13 +1511,13 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(ptr addrspace(1) %out, <2
; SI-NEXT: s_mov_b32 s7, 0x100f000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_lshl_b32 s1, s3, 4
; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_xor_b32 s0, s2, 0x50005
-; SI-NEXT: s_lshl_b32 s1, 0xffff, s1
-; SI-NEXT: s_and_b32 s0, s0, s1
-; SI-NEXT: s_xor_b32 s0, s0, s2
+; SI-NEXT: s_lshl_b32 s0, s3, 4
+; SI-NEXT: s_lshl_b32 s0, 0xffff, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_andn2_b32 s1, s2, s0
+; SI-NEXT: s_and_b32 s0, s0, 0x50005
+; SI-NEXT: s_or_b32 s0, s0, s1
; SI-NEXT: v_mov_b32_e32 v0, s0
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -1528,13 +1528,13 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(ptr addrspace(1) %out, <2
; VI-NEXT: s_mov_b32 s7, 0x1100f000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_lshl_b32 s1, s3, 4
; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_xor_b32 s0, s2, 0x50005
-; VI-NEXT: s_lshl_b32 s1, 0xffff, s1
-; VI-NEXT: s_and_b32 s0, s0, s1
-; VI-NEXT: s_xor_b32 s0, s0, s2
+; VI-NEXT: s_lshl_b32 s0, s3, 4
+; VI-NEXT: s_lshl_b32 s0, 0xffff, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_andn2_b32 s1, s2, s0
+; VI-NEXT: s_and_b32 s0, s0, 0x50005
+; VI-NEXT: s_or_b32 s0, s0, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
@@ -1552,13 +1552,13 @@ define amdgpu_kernel void @dynamic_insertelement_v3i16(ptr addrspace(1) %out, <3
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_lshl_b32 s8, s8, 4
+; SI-NEXT: s_lshl_b32 s0, s8, 4
; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_xor_b32 s1, s3, 0x50005
-; SI-NEXT: s_xor_b32 s0, s2, 0x50005
-; SI-NEXT: s_lshl_b64 s[8:9], 0xffff, s8
-; SI-NEXT: s_and_b64 s[0:1], s[0:1], s[8:9]
-; SI-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; SI-NEXT: s_lshl_b64 s[0:1], 0xffff, s0
+; SI-NEXT: s_and_b32 s9, s1, 0x50005
+; SI-NEXT: s_and_b32 s8, s0, 0x50005
+; SI-NEXT: s_andn2_b64 s[0:1], s[2:3], s[0:1]
+; SI-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1]
; SI-NEXT: v_mov_b32_e32 v0, s1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
; SI-NEXT: v_mov_b32_e32 v0, s0
@@ -1573,14 +1573,14 @@ define amdgpu_kernel void @dynamic_insertelement_v3i16(ptr addrspace(1) %out, <3
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s0, 0x50005
+; VI-NEXT: s_lshl_b32 s0, s8, 4
+; VI-NEXT: s_mov_b32 s8, 0x50005
; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_mov_b32 s1, s0
-; VI-NEXT: s_lshl_b32 s8, s8, 4
-; VI-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1]
-; VI-NEXT: s_lshl_b64 s[8:9], 0xffff, s8
+; VI-NEXT: s_lshl_b64 s[0:1], 0xffff, s0
+; VI-NEXT: s_mov_b32 s9, s8
+; VI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
; VI-NEXT: s_and_b64 s[0:1], s[0:1], s[8:9]
-; VI-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; VI-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
; VI-NEXT: v_mov_b32_e32 v0, s1
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
; VI-NEXT: v_mov_b32_e32 v0, s0
@@ -1594,34 +1594,35 @@ define amdgpu_kernel void @dynamic_insertelement_v3i16(ptr addrspace(1) %out, <3
define amdgpu_kernel void @dynamic_insertelement_v2i8(ptr addrspace(1) %out, [8 x i32], <2 x i8> %a, [8 x i32], i32 %b) nounwind {
; SI-LABEL: dynamic_insertelement_v2i8:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dword s4, s[8:9], 0xa
-; SI-NEXT: s_load_dword s5, s[8:9], 0x13
+; SI-NEXT: s_load_dword s4, s[8:9], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; SI-NEXT: s_load_dword s5, s[8:9], 0xa
; SI-NEXT: s_mov_b32 s3, 0x100f000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_xor_b32 s6, s4, 0x505
-; SI-NEXT: s_lshl_b32 s5, s5, 3
-; SI-NEXT: s_lshl_b32 s5, 0xff, s5
-; SI-NEXT: s_and_b32 s5, s6, s5
-; SI-NEXT: s_xor_b32 s4, s5, s4
+; SI-NEXT: s_lshl_b32 s4, s4, 3
+; SI-NEXT: s_lshl_b32 s4, 0xff, s4
+; SI-NEXT: s_andn2_b32 s5, s5, s4
+; SI-NEXT: s_and_b32 s4, s4, 0x505
+; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT:...
[truncated]
|
@llvm/pr-subscribers-backend-systemz Author: Iris Shi (el-ev) ChangesReverts llvm/llvm-project#137641 Causing crash. Patch is 144.99 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/143648.diff 18 Files Affected:
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b0da536a3b157..b65e8e06eae62 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -8128,59 +8128,6 @@ static SDValue visitORCommutative(SelectionDAG &DAG, SDValue N0, SDValue N1,
return SDValue();
}
-static SDValue foldMaskedMergeImpl(SDValue AndL0, SDValue AndR0, SDValue AndL1,
- SDValue AndR1, const SDLoc &DL,
- SelectionDAG &DAG) {
- if (!isBitwiseNot(AndL0, true) || !AndL0->hasOneUse())
- return SDValue();
- SDValue NotOp = AndL0->getOperand(0);
- if (NotOp == AndR1)
- std::swap(AndR1, AndL1);
- if (NotOp != AndL1)
- return SDValue();
-
- EVT VT = AndL1->getValueType(0);
- SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, AndR1, AndR0);
- SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
- SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, AndR0);
- return Xor1;
-}
-
-/// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
-/// equivalent `((x ^ y) & m) ^ y)` pattern.
-/// This is typically a better representation for targets without a fused
-/// "and-not" operation.
-static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG,
- const TargetLowering &TLI, const SDLoc &DL) {
- // Note that masked-merge variants using XOR or ADD expressions are
- // normalized to OR by InstCombine so we only check for OR.
- assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
- SDValue N0 = Node->getOperand(0);
- if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
- return SDValue();
- SDValue N1 = Node->getOperand(1);
- if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
- return SDValue();
-
- // If the target supports and-not, don't fold this.
- if (TLI.hasAndNot(SDValue(Node, 0)))
- return SDValue();
-
- SDValue N00 = N0->getOperand(0);
- SDValue N01 = N0->getOperand(1);
- SDValue N10 = N1->getOperand(0);
- SDValue N11 = N1->getOperand(1);
- if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
- return Result;
- if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
- return Result;
- if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
- return Result;
- if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
- return Result;
- return SDValue();
-}
-
SDValue DAGCombiner::visitOR(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -8359,10 +8306,6 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
if (SDValue R = foldLogicTreeOfShifts(N, N0, N1, DAG))
return R;
- if (VT.isScalarInteger() && VT != MVT::i1)
- if (SDValue R = foldMaskedMerge(N, DAG, TLI, DL))
- return R;
-
return SDValue();
}
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 1c59b1e63b7bc..f06246706aaa9 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1283,20 +1283,6 @@ bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
return true;
}
-bool SystemZTargetLowering::hasAndNot(SDValue Y) const {
- EVT VT = Y.getValueType();
-
- // We can use NC(G)RK for types in GPRs ...
- if (VT == MVT::i32 || VT == MVT::i64)
- return Subtarget.hasMiscellaneousExtensions3();
-
- // ... or VNC for types in VRs.
- if (VT.isVector() || VT == MVT::i128)
- return Subtarget.hasVector();
-
- return false;
-}
-
// Information about the addressing mode for a memory access.
struct AddressingMode {
// True if a long displacement is supported.
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index f2f0bf6d8b410..f3536a840fda8 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -671,7 +671,6 @@ class SystemZTargetLowering : public TargetLowering {
}
unsigned getStackProbeSize(const MachineFunction &MF) const;
- bool hasAndNot(SDValue Y) const override;
private:
const SystemZSubtarget &Subtarget;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 68da901c2f123..96be91256915d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -52350,6 +52350,59 @@ static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
}
+static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
+ SDValue And1_L, SDValue And1_R,
+ const SDLoc &DL, SelectionDAG &DAG) {
+ if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
+ return SDValue();
+ SDValue NotOp = And0_L->getOperand(0);
+ if (NotOp == And1_R)
+ std::swap(And1_R, And1_L);
+ if (NotOp != And1_L)
+ return SDValue();
+
+ // (~(NotOp) & And0_R) | (NotOp & And1_R)
+ // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
+ EVT VT = And1_L->getValueType(0);
+ SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
+ SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
+ SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
+ SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
+ return Xor1;
+}
+
+/// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
+/// equivalent `((x ^ y) & m) ^ y)` pattern.
+/// This is typically a better representation for targets without a fused
+/// "and-not" operation. This function is intended to be called from a
+/// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
+static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
+ // Note that masked-merge variants using XOR or ADD expressions are
+ // normalized to OR by InstCombine so we only check for OR.
+ assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
+ SDValue N0 = Node->getOperand(0);
+ if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
+ return SDValue();
+ SDValue N1 = Node->getOperand(1);
+ if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
+ return SDValue();
+
+ SDLoc DL(Node);
+ SDValue N00 = N0->getOperand(0);
+ SDValue N01 = N0->getOperand(1);
+ SDValue N10 = N1->getOperand(0);
+ SDValue N11 = N1->getOperand(1);
+ if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
+ return Result;
+ if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
+ return Result;
+ if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
+ return Result;
+ if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
+ return Result;
+ return SDValue();
+}
+
/// If this is an add or subtract where one operand is produced by a cmp+setcc,
/// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
/// with CMP+{ADC, SBB}.
@@ -52753,6 +52806,11 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
}
}
+ // We should fold "masked merge" patterns when `andn` is not available.
+ if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
+ if (SDValue R = foldMaskedMerge(N, DAG))
+ return R;
+
if (SDValue R = combineOrXorWithSETCC(N->getOpcode(), dl, VT, N0, N1, DAG))
return R;
diff --git a/llvm/test/CodeGen/AMDGPU/bfi_int.ll b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
index b372dec383344..201b97d479c68 100644
--- a/llvm/test/CodeGen/AMDGPU/bfi_int.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
@@ -16,9 +16,9 @@ define amdgpu_kernel void @s_bfi_def_i32(ptr addrspace(1) %out, i32 %x, i32 %y,
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: s_xor_b32 s1, s1, s2
+; GFX7-NEXT: s_andn2_b32 s2, s2, s0
; GFX7-NEXT: s_and_b32 s0, s1, s0
-; GFX7-NEXT: s_xor_b32 s0, s0, s2
+; GFX7-NEXT: s_or_b32 s0, s2, s0
; GFX7-NEXT: v_mov_b32_e32 v0, s0
; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX7-NEXT: s_endpgm
@@ -28,9 +28,9 @@ define amdgpu_kernel void @s_bfi_def_i32(ptr addrspace(1) %out, i32 %x, i32 %y,
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_xor_b32 s1, s1, s2
+; GFX8-NEXT: s_andn2_b32 s2, s2, s0
; GFX8-NEXT: s_and_b32 s0, s1, s0
-; GFX8-NEXT: s_xor_b32 s0, s0, s2
+; GFX8-NEXT: s_or_b32 s0, s2, s0
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v2, s0
@@ -44,9 +44,9 @@ define amdgpu_kernel void @s_bfi_def_i32(ptr addrspace(1) %out, i32 %x, i32 %y,
; GFX10-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
; GFX10-NEXT: v_mov_b32_e32 v0, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_xor_b32 s1, s1, s2
+; GFX10-NEXT: s_andn2_b32 s2, s2, s0
; GFX10-NEXT: s_and_b32 s0, s1, s0
-; GFX10-NEXT: s_xor_b32 s0, s0, s2
+; GFX10-NEXT: s_or_b32 s0, s2, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s0
; GFX10-NEXT: global_store_dword v0, v1, s[4:5]
; GFX10-NEXT: s_endpgm
@@ -1407,9 +1407,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
-; GFX7-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
-; GFX7-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX7-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
+; GFX7-NEXT: s_andn2_b64 s[0:1], s[4:5], s[0:1]
+; GFX7-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; GFX7-NEXT: s_add_u32 s0, s0, 10
; GFX7-NEXT: s_addc_u32 s1, s1, 0
; GFX7-NEXT: v_mov_b32_e32 v0, s0
@@ -1422,9 +1422,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
-; GFX8-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
-; GFX8-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX8-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
+; GFX8-NEXT: s_andn2_b64 s[0:1], s[4:5], s[0:1]
+; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; GFX8-NEXT: s_add_u32 s0, s0, 10
; GFX8-NEXT: s_addc_u32 s1, s1, 0
; GFX8-NEXT: v_mov_b32_e32 v0, s0
@@ -1438,9 +1438,9 @@ define amdgpu_kernel void @s_bitselect_i64_pat_0(i64 %a, i64 %b, i64 %mask) {
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX10-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
-; GFX10-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
-; GFX10-NEXT: s_xor_b64 s[0:1], s[0:1], s[4:5]
+; GFX10-NEXT: s_and_b64 s[2:3], s[0:1], s[2:3]
+; GFX10-NEXT: s_andn2_b64 s[0:1], s[4:5], s[0:1]
+; GFX10-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; GFX10-NEXT: s_add_u32 s0, s0, 10
; GFX10-NEXT: s_addc_u32 s1, s1, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s0
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
index e1b4cad370f96..6925a98f643b9 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
@@ -289,16 +289,16 @@ entry:
define amdgpu_kernel void @half4_inselt(ptr addrspace(1) %out, <4 x half> %vec, i32 %sel) {
; GCN-LABEL: half4_inselt:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_mov_b32 s4, 0x3c003c00
; GCN-NEXT: s_mov_b32 s5, s4
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_xor_b64 s[4:5], s[2:3], s[4:5]
; GCN-NEXT: s_lshl_b32 s6, s6, 4
; GCN-NEXT: s_lshl_b64 s[6:7], 0xffff, s6
-; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
+; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
+; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GCN-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3]
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: v_mov_b32_e32 v1, s1
@@ -317,10 +317,10 @@ define amdgpu_kernel void @half2_inselt(ptr addrspace(1) %out, <2 x half> %vec,
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshl_b32 s3, s3, 4
-; GCN-NEXT: s_xor_b32 s4, s2, 0x3c003c00
; GCN-NEXT: s_lshl_b32 s3, 0xffff, s3
-; GCN-NEXT: s_and_b32 s3, s4, s3
-; GCN-NEXT: s_xor_b32 s2, s3, s2
+; GCN-NEXT: s_andn2_b32 s2, s2, s3
+; GCN-NEXT: s_and_b32 s3, s3, 0x3c003c00
+; GCN-NEXT: s_or_b32 s2, s3, s2
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v1, s1
; GCN-NEXT: v_mov_b32_e32 v2, s2
@@ -399,10 +399,10 @@ define amdgpu_kernel void @short2_inselt(ptr addrspace(1) %out, <2 x i16> %vec,
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshl_b32 s3, s3, 4
-; GCN-NEXT: s_xor_b32 s4, s2, 0x10001
; GCN-NEXT: s_lshl_b32 s3, 0xffff, s3
-; GCN-NEXT: s_and_b32 s3, s4, s3
-; GCN-NEXT: s_xor_b32 s2, s3, s2
+; GCN-NEXT: s_andn2_b32 s2, s2, s3
+; GCN-NEXT: s_and_b32 s3, s3, 0x10001
+; GCN-NEXT: s_or_b32 s2, s3, s2
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v1, s1
; GCN-NEXT: v_mov_b32_e32 v2, s2
@@ -417,16 +417,16 @@ entry:
define amdgpu_kernel void @short4_inselt(ptr addrspace(1) %out, <4 x i16> %vec, i32 %sel) {
; GCN-LABEL: short4_inselt:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_mov_b32 s4, 0x10001
; GCN-NEXT: s_mov_b32 s5, s4
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_xor_b64 s[4:5], s[2:3], s[4:5]
; GCN-NEXT: s_lshl_b32 s6, s6, 4
; GCN-NEXT: s_lshl_b64 s[6:7], 0xffff, s6
-; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
+; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
+; GCN-NEXT: s_and_b64 s[4:5], s[6:7], s[4:5]
+; GCN-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3]
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: v_mov_b32_e32 v1, s1
@@ -442,15 +442,15 @@ entry:
define amdgpu_kernel void @byte8_inselt(ptr addrspace(1) %out, <8 x i8> %vec, i32 %sel) {
; GCN-LABEL: byte8_inselt:
; GCN: ; %bb.0: ; %entry
-; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_load_dword s6, s[4:5], 0x34
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_xor_b32 s5, s3, 0x1010101
-; GCN-NEXT: s_lshl_b32 s6, s6, 3
-; GCN-NEXT: s_xor_b32 s4, s2, 0x1010101
-; GCN-NEXT: s_lshl_b64 s[6:7], 0xff, s6
-; GCN-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
-; GCN-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
+; GCN-NEXT: s_lshl_b32 s4, s6, 3
+; GCN-NEXT: s_lshl_b64 s[4:5], 0xff, s4
+; GCN-NEXT: s_and_b32 s7, s5, 0x1010101
+; GCN-NEXT: s_and_b32 s6, s4, 0x1010101
+; GCN-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
+; GCN-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3]
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: v_mov_b32_e32 v1, s1
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 44bd4090436ef..be16fac4c53f7 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -1511,13 +1511,13 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(ptr addrspace(1) %out, <2
; SI-NEXT: s_mov_b32 s7, 0x100f000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_lshl_b32 s1, s3, 4
; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_xor_b32 s0, s2, 0x50005
-; SI-NEXT: s_lshl_b32 s1, 0xffff, s1
-; SI-NEXT: s_and_b32 s0, s0, s1
-; SI-NEXT: s_xor_b32 s0, s0, s2
+; SI-NEXT: s_lshl_b32 s0, s3, 4
+; SI-NEXT: s_lshl_b32 s0, 0xffff, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_andn2_b32 s1, s2, s0
+; SI-NEXT: s_and_b32 s0, s0, 0x50005
+; SI-NEXT: s_or_b32 s0, s0, s1
; SI-NEXT: v_mov_b32_e32 v0, s0
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -1528,13 +1528,13 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(ptr addrspace(1) %out, <2
; VI-NEXT: s_mov_b32 s7, 0x1100f000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_lshl_b32 s1, s3, 4
; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_xor_b32 s0, s2, 0x50005
-; VI-NEXT: s_lshl_b32 s1, 0xffff, s1
-; VI-NEXT: s_and_b32 s0, s0, s1
-; VI-NEXT: s_xor_b32 s0, s0, s2
+; VI-NEXT: s_lshl_b32 s0, s3, 4
+; VI-NEXT: s_lshl_b32 s0, 0xffff, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_andn2_b32 s1, s2, s0
+; VI-NEXT: s_and_b32 s0, s0, 0x50005
+; VI-NEXT: s_or_b32 s0, s0, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
@@ -1552,13 +1552,13 @@ define amdgpu_kernel void @dynamic_insertelement_v3i16(ptr addrspace(1) %out, <3
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_lshl_b32 s8, s8, 4
+; SI-NEXT: s_lshl_b32 s0, s8, 4
; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_xor_b32 s1, s3, 0x50005
-; SI-NEXT: s_xor_b32 s0, s2, 0x50005
-; SI-NEXT: s_lshl_b64 s[8:9], 0xffff, s8
-; SI-NEXT: s_and_b64 s[0:1], s[0:1], s[8:9]
-; SI-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; SI-NEXT: s_lshl_b64 s[0:1], 0xffff, s0
+; SI-NEXT: s_and_b32 s9, s1, 0x50005
+; SI-NEXT: s_and_b32 s8, s0, 0x50005
+; SI-NEXT: s_andn2_b64 s[0:1], s[2:3], s[0:1]
+; SI-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1]
; SI-NEXT: v_mov_b32_e32 v0, s1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
; SI-NEXT: v_mov_b32_e32 v0, s0
@@ -1573,14 +1573,14 @@ define amdgpu_kernel void @dynamic_insertelement_v3i16(ptr addrspace(1) %out, <3
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s0, 0x50005
+; VI-NEXT: s_lshl_b32 s0, s8, 4
+; VI-NEXT: s_mov_b32 s8, 0x50005
; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_mov_b32 s1, s0
-; VI-NEXT: s_lshl_b32 s8, s8, 4
-; VI-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1]
-; VI-NEXT: s_lshl_b64 s[8:9], 0xffff, s8
+; VI-NEXT: s_lshl_b64 s[0:1], 0xffff, s0
+; VI-NEXT: s_mov_b32 s9, s8
+; VI-NEXT: s_andn2_b64 s[2:3], s[2:3], s[0:1]
; VI-NEXT: s_and_b64 s[0:1], s[0:1], s[8:9]
-; VI-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
+; VI-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
; VI-NEXT: v_mov_b32_e32 v0, s1
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
; VI-NEXT: v_mov_b32_e32 v0, s0
@@ -1594,34 +1594,35 @@ define amdgpu_kernel void @dynamic_insertelement_v3i16(ptr addrspace(1) %out, <3
define amdgpu_kernel void @dynamic_insertelement_v2i8(ptr addrspace(1) %out, [8 x i32], <2 x i8> %a, [8 x i32], i32 %b) nounwind {
; SI-LABEL: dynamic_insertelement_v2i8:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dword s4, s[8:9], 0xa
-; SI-NEXT: s_load_dword s5, s[8:9], 0x13
+; SI-NEXT: s_load_dword s4, s[8:9], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; SI-NEXT: s_load_dword s5, s[8:9], 0xa
; SI-NEXT: s_mov_b32 s3, 0x100f000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_xor_b32 s6, s4, 0x505
-; SI-NEXT: s_lshl_b32 s5, s5, 3
-; SI-NEXT: s_lshl_b32 s5, 0xff, s5
-; SI-NEXT: s_and_b32 s5, s6, s5
-; SI-NEXT: s_xor_b32 s4, s5, s4
+; SI-NEXT: s_lshl_b32 s4, s4, 3
+; SI-NEXT: s_lshl_b32 s4, 0xff, s4
+; SI-NEXT: s_andn2_b32 s5, s5, s4
+; SI-NEXT: s_and_b32 s4, s4, 0x505
+; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT:...
[truncated]
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/162/builds/24286 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/23/builds/11132 Here is the relevant piece of the build log for the reference
|
… available for all targets" (llvm#143648)
… available for all targets" (llvm#143648)
… available for all targets" (llvm#143648)
Reverts #137641
Causing crash.