Skip to content

[mlir][sparse] remove dead code from utils #68943

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
107 changes: 0 additions & 107 deletions mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,54 +23,6 @@
using namespace mlir;
using namespace mlir::sparse_tensor;

/// If the tensor is a sparse constant, generates and returns the pair of
/// the constants for the coordinates and the values.
static std::optional<std::pair<Value, Value>>
genSplitSparseConstant(OpBuilder &builder, Location loc, Value tensor) {
if (auto constOp = tensor.getDefiningOp<arith::ConstantOp>()) {
if (auto a = dyn_cast<SparseElementsAttr>(constOp.getValue())) {
auto coordinates = builder.create<arith::ConstantOp>(loc, a.getIndices());
auto values = builder.create<arith::ConstantOp>(loc, a.getValues());
return std::make_pair(coordinates, values);
}
}
return {};
}

/// Reads `coordinates[k][0..rank-1]` and `value[k]`, appending the
/// former onto `cvs` and returning the latter.
// FIXME: Change the `rank` argument to `Dimension dimRank` or `Level lvlRank`,
// to clarify its intended meaning.
static Value genCoordsAndValueForSparse(OpBuilder &builder, Location loc,
Value coordinates, Value values,
SmallVectorImpl<Value> &cvs, Value k,
unsigned rank) {
for (unsigned d = 0; d < rank; d++) {
Value dim = constantIndex(builder, loc, d);
Value crd =
builder.create<tensor::ExtractOp>(loc, coordinates, ValueRange{k, dim});
crd = builder.create<arith::IndexCastOp>(loc, builder.getIndexType(), crd);
// builder.create<memref::StoreOp>(loc, crd, cvs, dim);
cvs.push_back(crd);
}
return builder.create<tensor::ExtractOp>(loc, values, k);
}

/// Generates code to read the value from `tensor[ivs]`, and open
/// a conditional for whether the value is non-zero. The generated code
/// looks like the following and the insertion point after this routine
/// is inside the then-branch.
/// if (tensor[ivs] != 0)
/// insert_point
static Value genCoordsAndValueForDense(OpBuilder &builder, Location loc,
Value tensor,
SmallVectorImpl<Value> &cvs,
ValueRange ivs) {
Value val = genValueForDense(builder, loc, tensor, ivs);
cvs.append(ivs.begin(), ivs.end());
return val;
}

//===----------------------------------------------------------------------===//
// ExecutionEngine/SparseTensorUtils helper functions.
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -450,65 +402,6 @@ void mlir::sparse_tensor::deallocDenseTensor(OpBuilder &builder, Location loc,
builder.create<memref::DeallocOp>(loc, buffer);
}

Value mlir::sparse_tensor::genValueForDense(OpBuilder &builder, Location loc,
Value tensor, ValueRange ivs) {
Value val = builder.create<tensor::ExtractOp>(loc, tensor, ivs);
Value cond = genIsNonzero(builder, loc, val);
scf::IfOp ifOp = builder.create<scf::IfOp>(loc, cond, /*else*/ false);
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
return val;
}

// FIXME:
// 1. Dense tensors loop should be generated by loop emitter.
// 2. Support reduction variables to propagate SSA chains properly.
// 3. Change the `rank` argument to `Dimension dimRank` or `Level lvlRank`,
// to clarify its meaning.
void mlir::sparse_tensor::genDenseTensorOrSparseConstantIterLoop(
OpBuilder &builder, Location loc, Value src, unsigned rank,
function_ref<void(OpBuilder &, Location, Value, ValueRange)> bodyBuilder) {
// `cvs` is actually the flattened coordinates array for all elements,
// not just for one element (since we do not `SmallVector::clear` after
// each iteration of the body of the loopnest.
SmallVector<Value> cvs;
SmallVector<Value> lo;
SmallVector<Value> hi;
SmallVector<Value> st;
const Value zero = constantIndex(builder, loc, 0);
const Value one = constantIndex(builder, loc, 1);
const auto splitSrc = genSplitSparseConstant(builder, loc, src);
if (splitSrc.has_value()) {
const Value srcCoordinates = splitSrc->first;
const Value srcValues = splitSrc->second;
lo.push_back(zero);
hi.push_back(linalg::createOrFoldDimOp(builder, loc, srcValues, 0));
st.push_back(one);
scf::buildLoopNest(builder, loc, lo, hi, st, {},
[&](OpBuilder &builder, Location loc, ValueRange ivs,
ValueRange /*args*/) -> scf::ValueVector {
Value val = genCoordsAndValueForSparse(
builder, loc, srcCoordinates, srcValues, cvs,
ivs[0], rank);
bodyBuilder(builder, loc, val, cvs);
return {};
});
} else {
for (unsigned i = 0; i < rank; i++) {
lo.push_back(zero);
hi.push_back(linalg::createOrFoldDimOp(builder, loc, src, i));
st.push_back(one);
}
scf::buildLoopNest(builder, loc, lo, hi, st, {},
[&](OpBuilder &builder, Location loc, ValueRange ivs,
ValueRange /*args*/) -> scf::ValueVector {
Value val = genCoordsAndValueForDense(builder, loc,
src, cvs, ivs);
bodyBuilder(builder, loc, val, cvs);
return {};
});
}
}

void mlir::sparse_tensor::sizesFromSrc(OpBuilder &builder,
SmallVectorImpl<Value> &sizes,
Location loc, Value src) {
Expand Down
28 changes: 0 additions & 28 deletions mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -221,34 +221,6 @@ Value allocDenseTensor(OpBuilder &builder, Location loc,
/// Generates code to deallocate a dense buffer.
void deallocDenseTensor(OpBuilder &builder, Location loc, Value buffer);

/// Generates code to read the value from `tensor[ivs]`. The generated code
/// looks like the following and the insertion point after this routine is
/// inside the then-branch.
/// if (tensor[ivs] != 0)
/// insert_point
Value genValueForDense(OpBuilder &builder, Location loc, Value tensor,
ValueRange ivs);

/// Generates the loop structure to iterate over a dense tensor or a sparse
/// tensor constant to support the lowering of dense-to-sparse convert operator.
//
// The loop to iterate a dense tensor:
// for i1 in dim1
// ..
// for ik in dimk
// val = a[i1,..,ik]
// if val != 0
// loop-body
//
// The loop to iterate a sparse tensor constant:
// for i in range(NNZ)
// val = values[i]
// [i1,..,ik] = coordinates[i]
// loop-body
void genDenseTensorOrSparseConstantIterLoop(
OpBuilder &builder, Location loc, Value src, unsigned rank,
function_ref<void(OpBuilder &, Location, Value, ValueRange)> bodyBuilder);

/// Populates given sizes array from dense tensor or sparse tensor constant.
void sizesFromSrc(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, Value src);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -241,16 +241,6 @@ class NewCallParams final {
return true;
}

/// Gets the dimension-to-level mapping.
//
// TODO: This is only ever used for passing into `genAddEltCall`;
// is there a better way to encapsulate that pattern (both to avoid
// this one-off getter, and to avoid potential mixups)?
Value getDimToLvl() const {
assert(isInitialized() && "Must initialize before getDimToLvl");
return params[kParamDim2Lvl];
}

/// Generates a function call, with the current static parameters
/// and the given dynamic arguments.
Value genNewCall(Action action, Value ptr = Value()) {
Expand Down