Skip to content

[RISCV] Pack build_vectors into largest available element type #97351

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jul 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 66 additions & 0 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3905,6 +3905,65 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
return SDValue();
}

/// Double the element size of the build vector to reduce the number
/// of vslide1down in the build vector chain. In the worst case, this
/// trades three scalar operations for 1 vector operation. Scalar
/// operations are generally lower latency, and for out-of-order cores
/// we also benefit from additional parallelism.
static SDValue lowerBuildVectorViaPacking(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
assert(VT.isFixedLengthVector() && "Unexpected vector!");
MVT ElemVT = VT.getVectorElementType();
if (!ElemVT.isInteger())
return SDValue();

// TODO: Relax these architectural restrictions, possibly with costing
// of the actual instructions required.
if (!Subtarget.hasStdExtZbb() || !Subtarget.hasStdExtZba())
return SDValue();

unsigned NumElts = VT.getVectorNumElements();
unsigned ElemSizeInBits = ElemVT.getSizeInBits();
if (ElemSizeInBits >= std::min(Subtarget.getELen(), Subtarget.getXLen()) ||
NumElts % 2 != 0)
return SDValue();

// Produce [B,A] packed into a type twice as wide. Note that all
// scalars are XLenVT, possibly masked (see below).
MVT XLenVT = Subtarget.getXLenVT();
auto pack = [&](SDValue A, SDValue B) {
// Bias the scheduling of the inserted operations to near the
// definition of the element - this tends to reduce register
// pressure overall.
SDLoc ElemDL(B);
SDValue ShtAmt = DAG.getConstant(ElemSizeInBits, ElemDL, XLenVT);
return DAG.getNode(ISD::OR, ElemDL, XLenVT, A,
DAG.getNode(ISD::SHL, ElemDL, XLenVT, B, ShtAmt));
};

SDValue Mask = DAG.getConstant(
APInt::getLowBitsSet(XLenVT.getSizeInBits(), ElemSizeInBits), DL, XLenVT);
SmallVector<SDValue> NewOperands;
NewOperands.reserve(NumElts / 2);
for (unsigned i = 0; i < VT.getVectorNumElements(); i += 2) {
SDValue A = Op.getOperand(i);
SDValue B = Op.getOperand(i + 1);
// Bias the scheduling of the inserted operations to near the
// definition of the element - this tends to reduce register
// pressure overall.
A = DAG.getNode(ISD::AND, SDLoc(A), XLenVT, A, Mask);
B = DAG.getNode(ISD::AND, SDLoc(B), XLenVT, B, Mask);
NewOperands.push_back(pack(A, B));
}
assert(NumElts == NewOperands.size() * 2);
MVT WideVT = MVT::getIntegerVT(ElemSizeInBits * 2);
MVT WideVecVT = MVT::getVectorVT(WideVT, NumElts / 2);
return DAG.getNode(ISD::BITCAST, DL, VT,
DAG.getBuildVector(WideVecVT, DL, NewOperands));
}

// Convert to an vXf16 build_vector to vXi16 with bitcasts.
static SDValue lowerBUILD_VECTORvXf16(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
Expand Down Expand Up @@ -4006,6 +4065,13 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
return convertFromScalableVector(VT, Vec, DAG, Subtarget);
}

// If we're about to resort to vslide1down (or stack usage), pack our
// elements into the widest scalar type we can. This will force a VL/VTYPE
// toggle, but reduces the critical path, the number of vslide1down ops
// required, and possibly enables scalar folds of the values.
if (SDValue Res = lowerBuildVectorViaPacking(Op, DAG, Subtarget))
return Res;

// For m1 vectors, if we have non-undef values in both halves of our vector,
// split the vector into low and high halves, build them separately, then
// use a vselect to combine them. For long vectors, this cuts the critical
Expand Down
Loading
Loading