Skip to content

[XCore] Set MaxAtomicSizeInBitsSupported to 0 #74389

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Dec 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 5 additions & 90 deletions llvm/lib/Target/XCore/XCoreISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,12 +147,7 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);

// Atomic operations
// We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
// As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);

// TRAMPOLINE is custom lowered.
setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
Expand All @@ -171,6 +166,9 @@ XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,

setMinFunctionAlignment(Align(2));
setPrefFunctionAlignment(Align(4));

// This target doesn't implement native atomics.
setMaxAtomicSizeInBitsSupported(0);
}

bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
Expand Down Expand Up @@ -215,9 +213,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
case ISD::ATOMIC_FENCE:
return LowerATOMIC_FENCE(Op, DAG);
default:
llvm_unreachable("unimplemented operand");
}
Expand Down Expand Up @@ -928,88 +925,6 @@ LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
}

SDValue XCoreTargetLowering::
LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
AtomicSDNode *N = cast<AtomicSDNode>(Op);
assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
assert((N->getSuccessOrdering() == AtomicOrdering::Unordered ||
N->getSuccessOrdering() == AtomicOrdering::Monotonic) &&
"shouldInsertFencesForAtomic(true) expects unordered / monotonic");
if (N->getMemoryVT() == MVT::i32) {
if (N->getAlign() < Align(4))
report_fatal_error("atomic load must be aligned");
return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
N->getChain(), N->getBasePtr(), N->getPointerInfo(),
N->getAlign(), N->getMemOperand()->getFlags(),
N->getAAInfo(), N->getRanges());
}
if (N->getMemoryVT() == MVT::i16) {
if (N->getAlign() < Align(2))
report_fatal_error("atomic load must be aligned");
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
N->getAlign(), N->getMemOperand()->getFlags(),
N->getAAInfo());
}
if (N->getMemoryVT() == MVT::i8)
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
N->getAlign(), N->getMemOperand()->getFlags(),
N->getAAInfo());
return SDValue();
}

SDValue XCoreTargetLowering::
LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
AtomicSDNode *N = cast<AtomicSDNode>(Op);
assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
assert((N->getSuccessOrdering() == AtomicOrdering::Unordered ||
N->getSuccessOrdering() == AtomicOrdering::Monotonic) &&
"shouldInsertFencesForAtomic(true) expects unordered / monotonic");
if (N->getMemoryVT() == MVT::i32) {
if (N->getAlign() < Align(4))
report_fatal_error("atomic store must be aligned");
return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
N->getPointerInfo(), N->getAlign(),
N->getMemOperand()->getFlags(), N->getAAInfo());
}
if (N->getMemoryVT() == MVT::i16) {
if (N->getAlign() < Align(2))
report_fatal_error("atomic store must be aligned");
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
N->getAlign(), N->getMemOperand()->getFlags(),
N->getAAInfo());
}
if (N->getMemoryVT() == MVT::i8)
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
N->getAlign(), N->getMemOperand()->getFlags(),
N->getAAInfo());
return SDValue();
}

MachineMemOperand::Flags
XCoreTargetLowering::getTargetMMOFlags(const Instruction &I) const {
// Because of how we convert atomic_load and atomic_store to normal loads and
// stores in the DAG, we need to ensure that the MMOs are marked volatile
// since DAGCombine hasn't been updated to account for atomic, but non
// volatile loads. (See D57601)
if (auto *SI = dyn_cast<StoreInst>(&I))
if (SI->isAtomic())
return MachineMemOperand::MOVolatile;
if (auto *LI = dyn_cast<LoadInst>(&I))
if (LI->isAtomic())
return MachineMemOperand::MOVolatile;
if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
if (AI->isAtomic())
return MachineMemOperand::MOVolatile;
if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
if (AI->isAtomic())
return MachineMemOperand::MOVolatile;
return MachineMemOperand::MONone;
}

//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
Expand Down
17 changes: 4 additions & 13 deletions llvm/lib/Target/XCore/XCoreISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -181,11 +181,6 @@ namespace llvm {
SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;

MachineMemOperand::Flags getTargetMMOFlags(
const Instruction &I) const override;

// Inline asm support
std::pair<unsigned, const TargetRegisterClass *>
Expand Down Expand Up @@ -219,14 +214,10 @@ namespace llvm {
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const override;

bool
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
LLVMContext &Context) const override;
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
return true;
}
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
LLVMContext &Context) const override;
};
}

Expand Down
72 changes: 12 additions & 60 deletions llvm/test/CodeGen/XCore/atomic.ll
Original file line number Diff line number Diff line change
Expand Up @@ -21,71 +21,23 @@ define void @atomicloadstore() nounwind {
entry:
; CHECK-LABEL: atomicloadstore

; CHECK: ldw r[[R0:[0-9]+]], dp[pool]
; CHECK-NEXT: ldaw r[[R1:[0-9]+]], dp[pool]
; CHECK-NEXT: #MEMBARRIER
; CHECK-NEXT: ldc r[[R2:[0-9]+]], 0
%0 = load atomic i32, ptr @pool acquire, align 4
; CHECK: __atomic_load_4
%0 = load atomic i32, ptr @pool seq_cst, align 4

; CHECK-NEXT: ld16s r3, r[[R1]][r[[R2]]]
; CHECK-NEXT: #MEMBARRIER
%1 = load atomic i16, ptr @pool acquire, align 2
; CHECK: __atomic_load_2
%1 = load atomic i16, ptr @pool seq_cst, align 2

; CHECK-NEXT: ld8u r11, r[[R1]][r[[R2]]]
; CHECK-NEXT: #MEMBARRIER
%2 = load atomic i8, ptr @pool acquire, align 1
; CHECK: __atomic_load_1
%2 = load atomic i8, ptr @pool seq_cst, align 1

; CHECK-NEXT: ldw r4, dp[pool]
; CHECK-NEXT: #MEMBARRIER
%3 = load atomic i32, ptr @pool seq_cst, align 4
; CHECK: __atomic_store_4
store atomic i32 %0, ptr @pool seq_cst, align 4

; CHECK-NEXT: ld16s r5, r[[R1]][r[[R2]]]
; CHECK-NEXT: #MEMBARRIER
%4 = load atomic i16, ptr @pool seq_cst, align 2
; CHECK: __atomic_store_2
store atomic i16 %1, ptr @pool seq_cst, align 2

; CHECK-NEXT: ld8u r6, r[[R1]][r[[R2]]]
; CHECK-NEXT: #MEMBARRIER
%5 = load atomic i8, ptr @pool seq_cst, align 1

; CHECK-NEXT: #MEMBARRIER
; CHECK-NEXT: stw r[[R0]], dp[pool]
store atomic i32 %0, ptr @pool release, align 4

; CHECK-NEXT: #MEMBARRIER
; CHECK-NEXT: st16 r3, r[[R1]][r[[R2]]]
store atomic i16 %1, ptr @pool release, align 2

; CHECK-NEXT: #MEMBARRIER
; CHECK-NEXT: st8 r11, r[[R1]][r[[R2]]]
store atomic i8 %2, ptr @pool release, align 1

; CHECK-NEXT: #MEMBARRIER
; CHECK-NEXT: stw r4, dp[pool]
; CHECK-NEXT: #MEMBARRIER
store atomic i32 %3, ptr @pool seq_cst, align 4

; CHECK-NEXT: #MEMBARRIER
; CHECK-NEXT: st16 r5, r[[R1]][r[[R2]]]
; CHECK-NEXT: #MEMBARRIER
store atomic i16 %4, ptr @pool seq_cst, align 2

; CHECK-NEXT: #MEMBARRIER
; CHECK-NEXT: st8 r6, r[[R1]][r[[R2]]]
; CHECK-NEXT: #MEMBARRIER
store atomic i8 %5, ptr @pool seq_cst, align 1

; CHECK-NEXT: ldw r[[R0]], dp[pool]
; CHECK-NEXT: stw r[[R0]], dp[pool]
; CHECK-NEXT: ld16s r[[R0]], r[[R1]][r[[R2]]]
; CHECK-NEXT: st16 r[[R0]], r[[R1]][r[[R2]]]
; CHECK-NEXT: ld8u r[[R0]], r[[R1]][r[[R2]]]
; CHECK-NEXT: st8 r[[R0]], r[[R1]][r[[R2]]]
%6 = load atomic i32, ptr @pool monotonic, align 4
store atomic i32 %6, ptr @pool monotonic, align 4
%7 = load atomic i16, ptr @pool monotonic, align 2
store atomic i16 %7, ptr @pool monotonic, align 2
%8 = load atomic i8, ptr @pool monotonic, align 1
store atomic i8 %8, ptr @pool monotonic, align 1
; CHECK: __atomic_store_1
store atomic i8 %2, ptr @pool seq_cst, align 1

ret void
}