Skip to content

[RISCV] Support memcmp expansion for vectors #114517

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 64 additions & 3 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14474,17 +14474,81 @@ static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &D
return true;
}

/// Try to map an integer comparison with size > XLEN to vector instructions
/// before type legalization splits it up into chunks.
static SDValue
combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y, ISD::CondCode CC,
const SDLoc &DL, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
assert(ISD::isIntEqualitySetCC(CC) && "Bad comparison predicate");

if (!Subtarget.hasVInstructions())
return SDValue();

MVT XLenVT = Subtarget.getXLenVT();
EVT OpVT = X.getValueType();
// We're looking for an oversized integer equality comparison.
if (OpVT.isScalableVT() || !OpVT.isScalarInteger())
return SDValue();

unsigned OpSize = OpVT.getSizeInBits();
// TODO: Support non-power-of-2 types.
if (!isPowerOf2_32(OpSize))
return SDValue();

// The size should be larger than XLen and smaller than the maximum vector
// size.
if (OpSize <= Subtarget.getXLen() ||
OpSize > Subtarget.getRealMinVLen() *
Subtarget.getMaxLMULForFixedLengthVectors())
return SDValue();

// Don't perform this combine if constructing the vector will be expensive.
auto IsVectorBitCastCheap = [](SDValue X) {
X = peekThroughBitcasts(X);
return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
X.getOpcode() == ISD::LOAD;
};
if (!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y))
return SDValue();

if (!DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat)) {
unsigned VecSize = OpSize / 8;
EVT VecVT = MVT::getVectorVT(MVT::i8, VecSize);
EVT CmpVT = MVT::getVectorVT(MVT::i1, VecSize);

SDValue VecX = DAG.getBitcast(VecVT, X);
SDValue VecY = DAG.getBitcast(VecVT, Y);
SDValue Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, CC);
unsigned ReductionOpc =
CC == ISD::SETEQ ? ISD::VECREDUCE_AND : ISD::VECREDUCE_OR;
return DAG.getSetCC(DL, VT, DAG.getNode(ReductionOpc, DL, XLenVT, Cmp),
DAG.getConstant(0, DL, XLenVT), ISD::SETNE);
}

return SDValue();
}

// Replace (seteq (i64 (and X, 0xffffffff)), C1) with
// (seteq (i64 (sext_inreg (X, i32)), C1')) where C1' is C1 sign extended from
// bit 31. Same for setne. C1' may be cheaper to materialize and the sext_inreg
// can become a sext.w instead of a shift pair.
static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
SDLoc dl(N);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT OpVT = N0.getValueType();

// Looking for an equality compare.
ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
if (ISD::isIntEqualitySetCC(Cond))
if (SDValue V = combineVectorSizedSetCCEquality(VT, N0, N1, Cond, dl, DAG,
Subtarget))
return V;

if (OpVT != MVT::i64 || !Subtarget.is64Bit())
return SDValue();

Expand All @@ -14499,8 +14563,6 @@ static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG,
N0.getConstantOperandVal(1) != UINT64_C(0xffffffff))
return SDValue();

// Looking for an equality compare.
ISD::CondCode Cond = cast<CondCodeSDNode>(N->getOperand(2))->get();
if (!isIntEqualitySetCC(Cond))
return SDValue();

Expand All @@ -14512,7 +14574,6 @@ static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG,

const APInt &C1 = N1C->getAPIntValue();

SDLoc dl(N);
// If the constant is larger than 2^32 - 1 it is impossible for both sides
// to be equal.
if (C1.getActiveBits() > 32)
Expand Down
31 changes: 31 additions & 0 deletions llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2491,3 +2491,34 @@ bool RISCVTTIImpl::isProfitableToSinkOperands(
}
return true;
}

RISCVTTIImpl::TTI::MemCmpExpansionOptions
RISCVTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
TTI::MemCmpExpansionOptions Options;
// FIXME: Vector haven't been tested.
Options.AllowOverlappingLoads =
(ST->enableUnalignedScalarMem() || ST->enableUnalignedVectorMem());
Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
Options.NumLoadsPerBlock = Options.MaxNumLoads;
if (ST->is64Bit())
Options.LoadSizes = {8, 4, 2, 1};
else
Options.LoadSizes = {4, 2, 1};
if (IsZeroCmp && ST->hasVInstructions()) {
unsigned RealMinVLen = ST->getRealMinVLen();
// Support Fractional LMULs if the lengths are larger than XLen.
// TODO: Support non-power-of-2 types.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we create a github ticket for this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have implemented it: #114971.

for (unsigned LMUL = 8; LMUL >= 2; LMUL /= 2) {
unsigned Len = RealMinVLen / LMUL;
if (Len > ST->getXLen())
Options.LoadSizes.insert(Options.LoadSizes.begin(), Len / 8);
}
for (unsigned LMUL = 1; LMUL <= ST->getMaxLMULForFixedLengthVectors();
LMUL *= 2) {
unsigned Len = RealMinVLen * LMUL;
if (Len > ST->getXLen())
Options.LoadSizes.insert(Options.LoadSizes.begin(), Len / 8);
}
}
return Options;
}
3 changes: 3 additions & 0 deletions llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,9 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {

bool isProfitableToSinkOperands(Instruction *I,
SmallVectorImpl<Use *> &Ops) const;

TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
bool IsZeroCmp) const;
};

} // end namespace llvm
Expand Down
Loading
Loading