Skip to content

Commit fbe47bf

Browse files
authored
[mlir][sparse] remove dead code from utils (#68943)
1 parent fcb4c05 commit fbe47bf

File tree

3 files changed

+0
-145
lines changed

3 files changed

+0
-145
lines changed

mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp

Lines changed: 0 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -23,54 +23,6 @@
2323
using namespace mlir;
2424
using namespace mlir::sparse_tensor;
2525

26-
/// If the tensor is a sparse constant, generates and returns the pair of
27-
/// the constants for the coordinates and the values.
28-
static std::optional<std::pair<Value, Value>>
29-
genSplitSparseConstant(OpBuilder &builder, Location loc, Value tensor) {
30-
if (auto constOp = tensor.getDefiningOp<arith::ConstantOp>()) {
31-
if (auto a = dyn_cast<SparseElementsAttr>(constOp.getValue())) {
32-
auto coordinates = builder.create<arith::ConstantOp>(loc, a.getIndices());
33-
auto values = builder.create<arith::ConstantOp>(loc, a.getValues());
34-
return std::make_pair(coordinates, values);
35-
}
36-
}
37-
return {};
38-
}
39-
40-
/// Reads `coordinates[k][0..rank-1]` and `value[k]`, appending the
41-
/// former onto `cvs` and returning the latter.
42-
// FIXME: Change the `rank` argument to `Dimension dimRank` or `Level lvlRank`,
43-
// to clarify its intended meaning.
44-
static Value genCoordsAndValueForSparse(OpBuilder &builder, Location loc,
45-
Value coordinates, Value values,
46-
SmallVectorImpl<Value> &cvs, Value k,
47-
unsigned rank) {
48-
for (unsigned d = 0; d < rank; d++) {
49-
Value dim = constantIndex(builder, loc, d);
50-
Value crd =
51-
builder.create<tensor::ExtractOp>(loc, coordinates, ValueRange{k, dim});
52-
crd = builder.create<arith::IndexCastOp>(loc, builder.getIndexType(), crd);
53-
// builder.create<memref::StoreOp>(loc, crd, cvs, dim);
54-
cvs.push_back(crd);
55-
}
56-
return builder.create<tensor::ExtractOp>(loc, values, k);
57-
}
58-
59-
/// Generates code to read the value from `tensor[ivs]`, and open
60-
/// a conditional for whether the value is non-zero. The generated code
61-
/// looks like the following and the insertion point after this routine
62-
/// is inside the then-branch.
63-
/// if (tensor[ivs] != 0)
64-
/// insert_point
65-
static Value genCoordsAndValueForDense(OpBuilder &builder, Location loc,
66-
Value tensor,
67-
SmallVectorImpl<Value> &cvs,
68-
ValueRange ivs) {
69-
Value val = genValueForDense(builder, loc, tensor, ivs);
70-
cvs.append(ivs.begin(), ivs.end());
71-
return val;
72-
}
73-
7426
//===----------------------------------------------------------------------===//
7527
// ExecutionEngine/SparseTensorUtils helper functions.
7628
//===----------------------------------------------------------------------===//
@@ -450,65 +402,6 @@ void mlir::sparse_tensor::deallocDenseTensor(OpBuilder &builder, Location loc,
450402
builder.create<memref::DeallocOp>(loc, buffer);
451403
}
452404

453-
Value mlir::sparse_tensor::genValueForDense(OpBuilder &builder, Location loc,
454-
Value tensor, ValueRange ivs) {
455-
Value val = builder.create<tensor::ExtractOp>(loc, tensor, ivs);
456-
Value cond = genIsNonzero(builder, loc, val);
457-
scf::IfOp ifOp = builder.create<scf::IfOp>(loc, cond, /*else*/ false);
458-
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
459-
return val;
460-
}
461-
462-
// FIXME:
463-
// 1. Dense tensors loop should be generated by loop emitter.
464-
// 2. Support reduction variables to propagate SSA chains properly.
465-
// 3. Change the `rank` argument to `Dimension dimRank` or `Level lvlRank`,
466-
// to clarify its meaning.
467-
void mlir::sparse_tensor::genDenseTensorOrSparseConstantIterLoop(
468-
OpBuilder &builder, Location loc, Value src, unsigned rank,
469-
function_ref<void(OpBuilder &, Location, Value, ValueRange)> bodyBuilder) {
470-
// `cvs` is actually the flattened coordinates array for all elements,
471-
// not just for one element (since we do not `SmallVector::clear` after
472-
// each iteration of the body of the loopnest.
473-
SmallVector<Value> cvs;
474-
SmallVector<Value> lo;
475-
SmallVector<Value> hi;
476-
SmallVector<Value> st;
477-
const Value zero = constantIndex(builder, loc, 0);
478-
const Value one = constantIndex(builder, loc, 1);
479-
const auto splitSrc = genSplitSparseConstant(builder, loc, src);
480-
if (splitSrc.has_value()) {
481-
const Value srcCoordinates = splitSrc->first;
482-
const Value srcValues = splitSrc->second;
483-
lo.push_back(zero);
484-
hi.push_back(linalg::createOrFoldDimOp(builder, loc, srcValues, 0));
485-
st.push_back(one);
486-
scf::buildLoopNest(builder, loc, lo, hi, st, {},
487-
[&](OpBuilder &builder, Location loc, ValueRange ivs,
488-
ValueRange /*args*/) -> scf::ValueVector {
489-
Value val = genCoordsAndValueForSparse(
490-
builder, loc, srcCoordinates, srcValues, cvs,
491-
ivs[0], rank);
492-
bodyBuilder(builder, loc, val, cvs);
493-
return {};
494-
});
495-
} else {
496-
for (unsigned i = 0; i < rank; i++) {
497-
lo.push_back(zero);
498-
hi.push_back(linalg::createOrFoldDimOp(builder, loc, src, i));
499-
st.push_back(one);
500-
}
501-
scf::buildLoopNest(builder, loc, lo, hi, st, {},
502-
[&](OpBuilder &builder, Location loc, ValueRange ivs,
503-
ValueRange /*args*/) -> scf::ValueVector {
504-
Value val = genCoordsAndValueForDense(builder, loc,
505-
src, cvs, ivs);
506-
bodyBuilder(builder, loc, val, cvs);
507-
return {};
508-
});
509-
}
510-
}
511-
512405
void mlir::sparse_tensor::sizesFromSrc(OpBuilder &builder,
513406
SmallVectorImpl<Value> &sizes,
514407
Location loc, Value src) {

mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -221,34 +221,6 @@ Value allocDenseTensor(OpBuilder &builder, Location loc,
221221
/// Generates code to deallocate a dense buffer.
222222
void deallocDenseTensor(OpBuilder &builder, Location loc, Value buffer);
223223

224-
/// Generates code to read the value from `tensor[ivs]`. The generated code
225-
/// looks like the following and the insertion point after this routine is
226-
/// inside the then-branch.
227-
/// if (tensor[ivs] != 0)
228-
/// insert_point
229-
Value genValueForDense(OpBuilder &builder, Location loc, Value tensor,
230-
ValueRange ivs);
231-
232-
/// Generates the loop structure to iterate over a dense tensor or a sparse
233-
/// tensor constant to support the lowering of dense-to-sparse convert operator.
234-
//
235-
// The loop to iterate a dense tensor:
236-
// for i1 in dim1
237-
// ..
238-
// for ik in dimk
239-
// val = a[i1,..,ik]
240-
// if val != 0
241-
// loop-body
242-
//
243-
// The loop to iterate a sparse tensor constant:
244-
// for i in range(NNZ)
245-
// val = values[i]
246-
// [i1,..,ik] = coordinates[i]
247-
// loop-body
248-
void genDenseTensorOrSparseConstantIterLoop(
249-
OpBuilder &builder, Location loc, Value src, unsigned rank,
250-
function_ref<void(OpBuilder &, Location, Value, ValueRange)> bodyBuilder);
251-
252224
/// Populates given sizes array from dense tensor or sparse tensor constant.
253225
void sizesFromSrc(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
254226
Location loc, Value src);

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -241,16 +241,6 @@ class NewCallParams final {
241241
return true;
242242
}
243243

244-
/// Gets the dimension-to-level mapping.
245-
//
246-
// TODO: This is only ever used for passing into `genAddEltCall`;
247-
// is there a better way to encapsulate that pattern (both to avoid
248-
// this one-off getter, and to avoid potential mixups)?
249-
Value getDimToLvl() const {
250-
assert(isInitialized() && "Must initialize before getDimToLvl");
251-
return params[kParamDim2Lvl];
252-
}
253-
254244
/// Generates a function call, with the current static parameters
255245
/// and the given dynamic arguments.
256246
Value genNewCall(Action action, Value ptr = Value()) {

0 commit comments

Comments
 (0)