-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[mlir][sparse] refactor dim2lvl/lvl2dim lvlsizes setup #72474
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
This change provides access to the individual components of dim sizes and lvl sizes after each codegenutil call. This is step 2 out of 3 to make sparse_tensor.new work for BSR
@llvm/pr-subscribers-mlir Author: Aart Bik (aartbik) ChangesThis change provides access to the individual components of dim sizes and lvl sizes after each codegenutil call. This is step 2 out of 3 to make sparse_tensor.new work for BSR Full diff: https://github.com/llvm/llvm-project/pull/72474.diff 4 Files Affected:
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index 5c1d4437265cc93..1200b999f9a90ff 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -639,25 +639,20 @@ Value sparse_tensor::createOrFoldSliceStrideOp(OpBuilder &builder, Location loc,
return builder.create<ToSliceStrideOp>(loc, tensor, APInt(64, dim));
}
-void sparse_tensor::fillDimShape(OpBuilder &builder, Location loc,
- SparseTensorType stt,
- SmallVectorImpl<Value> &out) {
- out.clear();
- out.reserve(stt.getDimRank());
- for (const Size sz : stt.getDimShape()) {
- const auto s = ShapedType::isDynamic(sz) ? 0 : sz;
- out.push_back(constantIndex(builder, loc, s));
- }
-}
-
Value sparse_tensor::genReader(OpBuilder &builder, Location loc,
SparseTensorType stt, Value tensor,
- /*out*/ SmallVectorImpl<Value> &dimShapesValues,
+ /*out*/ SmallVectorImpl<Value> &dimSizesValues,
/*out*/ Value &dimSizesBuffer) {
- // Construct the dimShapes buffer. The buffer contains the static size
- // per dimension, or otherwise a zero for a dynamic size.
- fillDimShape(builder, loc, stt, dimShapesValues);
- Value dimShapesBuffer = allocaBuffer(builder, loc, dimShapesValues);
+ // Construct the dimension **shapes** buffer. The buffer contains the static
+ // size per dimension, or otherwise a zero for a dynamic size.
+ Dimension dimRank = stt.getDimRank();
+ dimSizesValues.clear();
+ dimSizesValues.reserve(dimRank);
+ for (const Size sz : stt.getDimShape()) {
+ const auto s = ShapedType::isDynamic(sz) ? 0 : sz;
+ dimSizesValues.push_back(constantIndex(builder, loc, s));
+ }
+ Value dimShapesBuffer = allocaBuffer(builder, loc, dimSizesValues);
// Create the `CheckedSparseTensorReader`. This reader performs a
// consistency check on the static sizes, but accepts any size
// of each dimension with a dynamic size.
@@ -679,18 +674,27 @@ Value sparse_tensor::genReader(OpBuilder &builder, Location loc,
createFuncCall(builder, loc, "getSparseTensorReaderDimSizes", memTp,
reader, EmitCInterface::On)
.getResult(0);
+ // Also convert the dim shapes values into dim sizes values, just in case
+ // subsequent clients need the values (DCE will remove unused).
+ for (Dimension d = 0; d < dimRank; d++) {
+ if (stt.isDynamicDim(d))
+ dimSizesValues[d] = builder.create<memref::LoadOp>(
+ loc, dimSizesBuffer, constantIndex(builder, loc, d));
+ }
}
return reader;
}
-Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
- SparseTensorType stt,
- ArrayRef<Value> dimShapesValues,
- Value dimSizesBuffer,
- /*out*/ Value &dim2lvlBuffer,
- /*out*/ Value &lvl2dimBuffer) {
+Value sparse_tensor::genMapBuffers(
+ OpBuilder &builder, Location loc, SparseTensorType stt,
+ ArrayRef<Value> dimSizesValues, Value dimSizesBuffer,
+ /*out*/ SmallVectorImpl<Value> &lvlSizesValues,
+ /*out*/ Value &dim2lvlBuffer,
+ /*out*/ Value &lvl2dimBuffer) {
const Dimension dimRank = stt.getDimRank();
const Level lvlRank = stt.getLvlRank();
+ lvlSizesValues.clear();
+ lvlSizesValues.reserve(lvlRank);
// For an identity mapping, the dim2lvl and lvl2dim mappings are
// identical as are dimSizes and lvlSizes, so buffers are reused
// as much as possible.
@@ -698,10 +702,12 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
assert(dimRank == lvlRank);
SmallVector<Value> iotaValues;
iotaValues.reserve(lvlRank);
- for (Level l = 0; l < lvlRank; l++)
+ for (Level l = 0; l < lvlRank; l++) {
iotaValues.push_back(constantIndex(builder, loc, l));
+ lvlSizesValues.push_back(dimSizesValues[l]);
+ }
dim2lvlBuffer = lvl2dimBuffer = allocaBuffer(builder, loc, iotaValues);
- return dimSizesBuffer;
+ return dimSizesBuffer; // now lvlSizesBuffer
}
// Otherwise, some code needs to be generated to set up the buffers.
// This code deals with permutations as well as non-permutations that
@@ -710,7 +716,6 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
const auto lvlToDim = stt.getLvlToDim();
SmallVector<Value> dim2lvlValues(lvlRank); // for each lvl, expr in dim vars
SmallVector<Value> lvl2dimValues(dimRank); // for each dim, expr in lvl vars
- SmallVector<Value> lvlSizesValues(lvlRank);
// Generate dim2lvl.
assert(lvlRank == dimToLvl.getNumResults());
for (Level l = 0; l < lvlRank; l++) {
@@ -748,17 +753,14 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
// (3) l = d % c : c
Value lvlSz;
if (cm == 0) {
- lvlSz = dimShapesValues[d];
- if (stt.isDynamicDim(d))
- lvlSz = builder.create<memref::LoadOp>(loc, dimSizesBuffer,
- constantIndex(builder, loc, d));
+ lvlSz = dimSizesValues[d];
if (cf != 0)
lvlSz = builder.create<arith::DivUIOp>(loc, lvlSz,
constantIndex(builder, loc, cf));
} else {
lvlSz = constantIndex(builder, loc, cm);
}
- lvlSizesValues[l] = lvlSz;
+ lvlSizesValues.push_back(lvlSz);
}
// Generate lvl2dim.
assert(dimRank == lvlToDim.getNumResults());
@@ -792,5 +794,5 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
// Return buffers.
dim2lvlBuffer = allocaBuffer(builder, loc, dim2lvlValues);
lvl2dimBuffer = allocaBuffer(builder, loc, lvl2dimValues);
- return allocaBuffer(builder, loc, lvlSizesValues);
+ return allocaBuffer(builder, loc, lvlSizesValues); // lvlSizesBuffer
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index d3b0889b71b514c..0e871d8e10aadf9 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -317,20 +317,16 @@ Value createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc, Value tensor,
Value createOrFoldSliceStrideOp(OpBuilder &builder, Location loc, Value tensor,
Dimension dim);
-/// Populates the array with the dimension-shape of the given
-/// `SparseTensorType`, where dynamic sizes are represented by zero.
-void fillDimShape(OpBuilder &builder, Location loc, SparseTensorType stt,
- SmallVectorImpl<Value> &out);
-
/// Generates code that opens a reader and sets the dimension sizes.
Value genReader(OpBuilder &builder, Location loc, SparseTensorType stt,
Value tensor,
- /*out*/ SmallVectorImpl<Value> &dimShapeValues,
+ /*out*/ SmallVectorImpl<Value> &dimSizesValues,
/*out*/ Value &dimSizesBuffer);
/// Generates code to set up the buffer parameters for a map.
Value genMapBuffers(OpBuilder &builder, Location loc, SparseTensorType stt,
- ArrayRef<Value> dimShapeValues, Value dimSizesBuffer,
+ ArrayRef<Value> dimSizesValues, Value dimSizesBuffer,
+ /*out*/ SmallVectorImpl<Value> &lvlSizesValues,
/*out*/ Value &dim2lvlBuffer,
/*out*/ Value &lvl2dimBuffer);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 888f513be2e4dc7..cfc8eb19918b77e 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -1484,11 +1484,12 @@ struct SparseNewConverter : public OpConversionPattern<NewOp> {
createAllocFields(rewriter, loc, dstTp, dynSizes, /*enableInit=*/false,
fields, nse);
- // Now construct the dim2lvl and lvl2dim buffers.
+ // Now construct the lvl sizes and the dim2lvl/lvl2dim buffers.
+ SmallVector<Value> lvlSizesValues;
Value dim2lvlBuffer;
Value lvl2dimBuffer;
genMapBuffers(rewriter, loc, dstTp, dimShapesValues, dimSizesBuffer,
- dim2lvlBuffer, lvl2dimBuffer);
+ lvlSizesValues, dim2lvlBuffer, lvl2dimBuffer);
// Read the COO tensor data.
MutSparseTensorDescriptor desc(dstTp, fields);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index e629133171e15dc..f8c7aba455c0f11 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -199,9 +199,10 @@ class NewCallParams final {
params[kParamDimSizes] = dimSizesBuffer
? dimSizesBuffer
: allocaBuffer(builder, loc, dimSizesValues);
- params[kParamLvlSizes] =
- genMapBuffers(builder, loc, stt, dimSizesValues, params[kParamDimSizes],
- params[kParamDim2Lvl], params[kParamLvl2Dim]);
+ SmallVector<Value> lvlSizesValues; // unused
+ params[kParamLvlSizes] = genMapBuffers(
+ builder, loc, stt, dimSizesValues, params[kParamDimSizes],
+ lvlSizesValues, params[kParamDim2Lvl], params[kParamLvl2Dim]);
// Secondary and primary types encoding.
const auto enc = stt.getEncoding();
params[kParamPosTp] = constantPosTypeEncoding(builder, loc, enc);
@@ -369,13 +370,13 @@ class SparseTensorNewConverter : public OpConversionPattern<NewOp> {
if (!stt.hasEncoding())
return failure();
// Construct the `reader` opening method calls.
- SmallVector<Value> dimShapesValues;
+ SmallVector<Value> dimSizesValues;
Value dimSizesBuffer;
Value reader = genReader(rewriter, loc, stt, adaptor.getOperands()[0],
- dimShapesValues, dimSizesBuffer);
+ dimSizesValues, dimSizesBuffer);
// Use the `reader` to parse the file.
Value tensor = NewCallParams(rewriter, loc)
- .genBuffers(stt, dimShapesValues, dimSizesBuffer)
+ .genBuffers(stt, dimSizesValues, dimSizesBuffer)
.genNewCall(Action::kFromReader, reader);
// Free the memory for `reader`.
createFuncCall(rewriter, loc, "delSparseTensorReader", {}, {reader},
@@ -402,11 +403,11 @@ class SparseTensorAllocConverter
// Gather all dimension sizes as SSA values.
Location loc = op.getLoc();
const Dimension dimRank = stt.getDimRank();
- SmallVector<Value> dimSizes;
- dimSizes.reserve(dimRank);
+ SmallVector<Value> dimSizesValues;
+ dimSizesValues.reserve(dimRank);
unsigned operandCtr = 0;
for (Dimension d = 0; d < dimRank; d++) {
- dimSizes.push_back(
+ dimSizesValues.push_back(
stt.isDynamicDim(d)
? adaptor.getOperands()[operandCtr++]
: constantIndex(rewriter, loc, op.getStaticSize(d)));
@@ -414,7 +415,7 @@ class SparseTensorAllocConverter
// Generate the call to construct empty tensor. The sizes are
// explicitly defined by the arguments to the alloc operator.
rewriter.replaceOp(op, NewCallParams(rewriter, loc)
- .genBuffers(stt, dimSizes)
+ .genBuffers(stt, dimSizesValues)
.genNewCall(Action::kEmpty));
return success();
}
@@ -433,19 +434,19 @@ class SparseTensorEmptyConverter : public OpConversionPattern<tensor::EmptyOp> {
return failure();
// Gather all dimension sizes as SSA values.
const Dimension dimRank = stt.getDimRank();
- SmallVector<Value> dimSizes;
- dimSizes.reserve(dimRank);
+ SmallVector<Value> dimSizesValues;
+ dimSizesValues.reserve(dimRank);
auto shape = op.getType().getShape();
unsigned operandCtr = 0;
for (Dimension d = 0; d < dimRank; d++) {
- dimSizes.push_back(stt.isDynamicDim(d)
- ? adaptor.getOperands()[operandCtr++]
- : constantIndex(rewriter, loc, shape[d]));
+ dimSizesValues.push_back(stt.isDynamicDim(d)
+ ? adaptor.getOperands()[operandCtr++]
+ : constantIndex(rewriter, loc, shape[d]));
}
// Generate the call to construct empty tensor. The sizes are
// explicitly defined by the arguments to the alloc operator.
rewriter.replaceOp(op, NewCallParams(rewriter, loc)
- .genBuffers(stt, dimSizes)
+ .genBuffers(stt, dimSizesValues)
.genNewCall(Action::kEmpty));
return success();
}
@@ -467,8 +468,8 @@ class SparseTensorReorderCOOConverter
const Value src = adaptor.getInputCoo();
NewCallParams params(rewriter, loc);
- SmallVector<Value> dimSizes = getDimSizes(rewriter, loc, srcTp, src);
- rewriter.replaceOp(op, params.genBuffers(dstTp, dimSizes)
+ SmallVector<Value> dimSizesValues = getDimSizes(rewriter, loc, srcTp, src);
+ rewriter.replaceOp(op, params.genBuffers(dstTp, dimSizesValues)
.genNewCall(Action::kSortCOOInPlace, src));
return success();
@@ -706,14 +707,14 @@ class SparseTensorAssembleConverter : public OpConversionPattern<AssembleOp> {
const Location loc = op->getLoc();
const auto dstTp = getSparseTensorType(op.getResult());
assert(dstTp.hasStaticDimShape());
- SmallVector<Value> dimSizes = getDimSizes(rewriter, loc, dstTp);
+ SmallVector<Value> dimSizesValues = getDimSizes(rewriter, loc, dstTp);
// Use a library method to transfer the external buffers from
// clients to the internal SparseTensorStorage. Since we cannot
// assume clients transfer ownership of the buffers, this method
// will copy all data over into a new SparseTensorStorage.
Value dst =
NewCallParams(rewriter, loc)
- .genBuffers(dstTp.withoutDimToLvl(), dimSizes)
+ .genBuffers(dstTp.withoutDimToLvl(), dimSizesValues)
.genNewCall(Action::kPack,
genLvlPtrsBuffers(rewriter, loc, adaptor.getLevels(),
adaptor.getValues()));
|
@llvm/pr-subscribers-mlir-sparse Author: Aart Bik (aartbik) ChangesThis change provides access to the individual components of dim sizes and lvl sizes after each codegenutil call. This is step 2 out of 3 to make sparse_tensor.new work for BSR Full diff: https://github.com/llvm/llvm-project/pull/72474.diff 4 Files Affected:
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
index 5c1d4437265cc93..1200b999f9a90ff 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp
@@ -639,25 +639,20 @@ Value sparse_tensor::createOrFoldSliceStrideOp(OpBuilder &builder, Location loc,
return builder.create<ToSliceStrideOp>(loc, tensor, APInt(64, dim));
}
-void sparse_tensor::fillDimShape(OpBuilder &builder, Location loc,
- SparseTensorType stt,
- SmallVectorImpl<Value> &out) {
- out.clear();
- out.reserve(stt.getDimRank());
- for (const Size sz : stt.getDimShape()) {
- const auto s = ShapedType::isDynamic(sz) ? 0 : sz;
- out.push_back(constantIndex(builder, loc, s));
- }
-}
-
Value sparse_tensor::genReader(OpBuilder &builder, Location loc,
SparseTensorType stt, Value tensor,
- /*out*/ SmallVectorImpl<Value> &dimShapesValues,
+ /*out*/ SmallVectorImpl<Value> &dimSizesValues,
/*out*/ Value &dimSizesBuffer) {
- // Construct the dimShapes buffer. The buffer contains the static size
- // per dimension, or otherwise a zero for a dynamic size.
- fillDimShape(builder, loc, stt, dimShapesValues);
- Value dimShapesBuffer = allocaBuffer(builder, loc, dimShapesValues);
+ // Construct the dimension **shapes** buffer. The buffer contains the static
+ // size per dimension, or otherwise a zero for a dynamic size.
+ Dimension dimRank = stt.getDimRank();
+ dimSizesValues.clear();
+ dimSizesValues.reserve(dimRank);
+ for (const Size sz : stt.getDimShape()) {
+ const auto s = ShapedType::isDynamic(sz) ? 0 : sz;
+ dimSizesValues.push_back(constantIndex(builder, loc, s));
+ }
+ Value dimShapesBuffer = allocaBuffer(builder, loc, dimSizesValues);
// Create the `CheckedSparseTensorReader`. This reader performs a
// consistency check on the static sizes, but accepts any size
// of each dimension with a dynamic size.
@@ -679,18 +674,27 @@ Value sparse_tensor::genReader(OpBuilder &builder, Location loc,
createFuncCall(builder, loc, "getSparseTensorReaderDimSizes", memTp,
reader, EmitCInterface::On)
.getResult(0);
+ // Also convert the dim shapes values into dim sizes values, just in case
+ // subsequent clients need the values (DCE will remove unused).
+ for (Dimension d = 0; d < dimRank; d++) {
+ if (stt.isDynamicDim(d))
+ dimSizesValues[d] = builder.create<memref::LoadOp>(
+ loc, dimSizesBuffer, constantIndex(builder, loc, d));
+ }
}
return reader;
}
-Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
- SparseTensorType stt,
- ArrayRef<Value> dimShapesValues,
- Value dimSizesBuffer,
- /*out*/ Value &dim2lvlBuffer,
- /*out*/ Value &lvl2dimBuffer) {
+Value sparse_tensor::genMapBuffers(
+ OpBuilder &builder, Location loc, SparseTensorType stt,
+ ArrayRef<Value> dimSizesValues, Value dimSizesBuffer,
+ /*out*/ SmallVectorImpl<Value> &lvlSizesValues,
+ /*out*/ Value &dim2lvlBuffer,
+ /*out*/ Value &lvl2dimBuffer) {
const Dimension dimRank = stt.getDimRank();
const Level lvlRank = stt.getLvlRank();
+ lvlSizesValues.clear();
+ lvlSizesValues.reserve(lvlRank);
// For an identity mapping, the dim2lvl and lvl2dim mappings are
// identical as are dimSizes and lvlSizes, so buffers are reused
// as much as possible.
@@ -698,10 +702,12 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
assert(dimRank == lvlRank);
SmallVector<Value> iotaValues;
iotaValues.reserve(lvlRank);
- for (Level l = 0; l < lvlRank; l++)
+ for (Level l = 0; l < lvlRank; l++) {
iotaValues.push_back(constantIndex(builder, loc, l));
+ lvlSizesValues.push_back(dimSizesValues[l]);
+ }
dim2lvlBuffer = lvl2dimBuffer = allocaBuffer(builder, loc, iotaValues);
- return dimSizesBuffer;
+ return dimSizesBuffer; // now lvlSizesBuffer
}
// Otherwise, some code needs to be generated to set up the buffers.
// This code deals with permutations as well as non-permutations that
@@ -710,7 +716,6 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
const auto lvlToDim = stt.getLvlToDim();
SmallVector<Value> dim2lvlValues(lvlRank); // for each lvl, expr in dim vars
SmallVector<Value> lvl2dimValues(dimRank); // for each dim, expr in lvl vars
- SmallVector<Value> lvlSizesValues(lvlRank);
// Generate dim2lvl.
assert(lvlRank == dimToLvl.getNumResults());
for (Level l = 0; l < lvlRank; l++) {
@@ -748,17 +753,14 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
// (3) l = d % c : c
Value lvlSz;
if (cm == 0) {
- lvlSz = dimShapesValues[d];
- if (stt.isDynamicDim(d))
- lvlSz = builder.create<memref::LoadOp>(loc, dimSizesBuffer,
- constantIndex(builder, loc, d));
+ lvlSz = dimSizesValues[d];
if (cf != 0)
lvlSz = builder.create<arith::DivUIOp>(loc, lvlSz,
constantIndex(builder, loc, cf));
} else {
lvlSz = constantIndex(builder, loc, cm);
}
- lvlSizesValues[l] = lvlSz;
+ lvlSizesValues.push_back(lvlSz);
}
// Generate lvl2dim.
assert(dimRank == lvlToDim.getNumResults());
@@ -792,5 +794,5 @@ Value sparse_tensor::genMapBuffers(OpBuilder &builder, Location loc,
// Return buffers.
dim2lvlBuffer = allocaBuffer(builder, loc, dim2lvlValues);
lvl2dimBuffer = allocaBuffer(builder, loc, lvl2dimValues);
- return allocaBuffer(builder, loc, lvlSizesValues);
+ return allocaBuffer(builder, loc, lvlSizesValues); // lvlSizesBuffer
}
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
index d3b0889b71b514c..0e871d8e10aadf9 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h
@@ -317,20 +317,16 @@ Value createOrFoldSliceOffsetOp(OpBuilder &builder, Location loc, Value tensor,
Value createOrFoldSliceStrideOp(OpBuilder &builder, Location loc, Value tensor,
Dimension dim);
-/// Populates the array with the dimension-shape of the given
-/// `SparseTensorType`, where dynamic sizes are represented by zero.
-void fillDimShape(OpBuilder &builder, Location loc, SparseTensorType stt,
- SmallVectorImpl<Value> &out);
-
/// Generates code that opens a reader and sets the dimension sizes.
Value genReader(OpBuilder &builder, Location loc, SparseTensorType stt,
Value tensor,
- /*out*/ SmallVectorImpl<Value> &dimShapeValues,
+ /*out*/ SmallVectorImpl<Value> &dimSizesValues,
/*out*/ Value &dimSizesBuffer);
/// Generates code to set up the buffer parameters for a map.
Value genMapBuffers(OpBuilder &builder, Location loc, SparseTensorType stt,
- ArrayRef<Value> dimShapeValues, Value dimSizesBuffer,
+ ArrayRef<Value> dimSizesValues, Value dimSizesBuffer,
+ /*out*/ SmallVectorImpl<Value> &lvlSizesValues,
/*out*/ Value &dim2lvlBuffer,
/*out*/ Value &lvl2dimBuffer);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 888f513be2e4dc7..cfc8eb19918b77e 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -1484,11 +1484,12 @@ struct SparseNewConverter : public OpConversionPattern<NewOp> {
createAllocFields(rewriter, loc, dstTp, dynSizes, /*enableInit=*/false,
fields, nse);
- // Now construct the dim2lvl and lvl2dim buffers.
+ // Now construct the lvl sizes and the dim2lvl/lvl2dim buffers.
+ SmallVector<Value> lvlSizesValues;
Value dim2lvlBuffer;
Value lvl2dimBuffer;
genMapBuffers(rewriter, loc, dstTp, dimShapesValues, dimSizesBuffer,
- dim2lvlBuffer, lvl2dimBuffer);
+ lvlSizesValues, dim2lvlBuffer, lvl2dimBuffer);
// Read the COO tensor data.
MutSparseTensorDescriptor desc(dstTp, fields);
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index e629133171e15dc..f8c7aba455c0f11 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -199,9 +199,10 @@ class NewCallParams final {
params[kParamDimSizes] = dimSizesBuffer
? dimSizesBuffer
: allocaBuffer(builder, loc, dimSizesValues);
- params[kParamLvlSizes] =
- genMapBuffers(builder, loc, stt, dimSizesValues, params[kParamDimSizes],
- params[kParamDim2Lvl], params[kParamLvl2Dim]);
+ SmallVector<Value> lvlSizesValues; // unused
+ params[kParamLvlSizes] = genMapBuffers(
+ builder, loc, stt, dimSizesValues, params[kParamDimSizes],
+ lvlSizesValues, params[kParamDim2Lvl], params[kParamLvl2Dim]);
// Secondary and primary types encoding.
const auto enc = stt.getEncoding();
params[kParamPosTp] = constantPosTypeEncoding(builder, loc, enc);
@@ -369,13 +370,13 @@ class SparseTensorNewConverter : public OpConversionPattern<NewOp> {
if (!stt.hasEncoding())
return failure();
// Construct the `reader` opening method calls.
- SmallVector<Value> dimShapesValues;
+ SmallVector<Value> dimSizesValues;
Value dimSizesBuffer;
Value reader = genReader(rewriter, loc, stt, adaptor.getOperands()[0],
- dimShapesValues, dimSizesBuffer);
+ dimSizesValues, dimSizesBuffer);
// Use the `reader` to parse the file.
Value tensor = NewCallParams(rewriter, loc)
- .genBuffers(stt, dimShapesValues, dimSizesBuffer)
+ .genBuffers(stt, dimSizesValues, dimSizesBuffer)
.genNewCall(Action::kFromReader, reader);
// Free the memory for `reader`.
createFuncCall(rewriter, loc, "delSparseTensorReader", {}, {reader},
@@ -402,11 +403,11 @@ class SparseTensorAllocConverter
// Gather all dimension sizes as SSA values.
Location loc = op.getLoc();
const Dimension dimRank = stt.getDimRank();
- SmallVector<Value> dimSizes;
- dimSizes.reserve(dimRank);
+ SmallVector<Value> dimSizesValues;
+ dimSizesValues.reserve(dimRank);
unsigned operandCtr = 0;
for (Dimension d = 0; d < dimRank; d++) {
- dimSizes.push_back(
+ dimSizesValues.push_back(
stt.isDynamicDim(d)
? adaptor.getOperands()[operandCtr++]
: constantIndex(rewriter, loc, op.getStaticSize(d)));
@@ -414,7 +415,7 @@ class SparseTensorAllocConverter
// Generate the call to construct empty tensor. The sizes are
// explicitly defined by the arguments to the alloc operator.
rewriter.replaceOp(op, NewCallParams(rewriter, loc)
- .genBuffers(stt, dimSizes)
+ .genBuffers(stt, dimSizesValues)
.genNewCall(Action::kEmpty));
return success();
}
@@ -433,19 +434,19 @@ class SparseTensorEmptyConverter : public OpConversionPattern<tensor::EmptyOp> {
return failure();
// Gather all dimension sizes as SSA values.
const Dimension dimRank = stt.getDimRank();
- SmallVector<Value> dimSizes;
- dimSizes.reserve(dimRank);
+ SmallVector<Value> dimSizesValues;
+ dimSizesValues.reserve(dimRank);
auto shape = op.getType().getShape();
unsigned operandCtr = 0;
for (Dimension d = 0; d < dimRank; d++) {
- dimSizes.push_back(stt.isDynamicDim(d)
- ? adaptor.getOperands()[operandCtr++]
- : constantIndex(rewriter, loc, shape[d]));
+ dimSizesValues.push_back(stt.isDynamicDim(d)
+ ? adaptor.getOperands()[operandCtr++]
+ : constantIndex(rewriter, loc, shape[d]));
}
// Generate the call to construct empty tensor. The sizes are
// explicitly defined by the arguments to the alloc operator.
rewriter.replaceOp(op, NewCallParams(rewriter, loc)
- .genBuffers(stt, dimSizes)
+ .genBuffers(stt, dimSizesValues)
.genNewCall(Action::kEmpty));
return success();
}
@@ -467,8 +468,8 @@ class SparseTensorReorderCOOConverter
const Value src = adaptor.getInputCoo();
NewCallParams params(rewriter, loc);
- SmallVector<Value> dimSizes = getDimSizes(rewriter, loc, srcTp, src);
- rewriter.replaceOp(op, params.genBuffers(dstTp, dimSizes)
+ SmallVector<Value> dimSizesValues = getDimSizes(rewriter, loc, srcTp, src);
+ rewriter.replaceOp(op, params.genBuffers(dstTp, dimSizesValues)
.genNewCall(Action::kSortCOOInPlace, src));
return success();
@@ -706,14 +707,14 @@ class SparseTensorAssembleConverter : public OpConversionPattern<AssembleOp> {
const Location loc = op->getLoc();
const auto dstTp = getSparseTensorType(op.getResult());
assert(dstTp.hasStaticDimShape());
- SmallVector<Value> dimSizes = getDimSizes(rewriter, loc, dstTp);
+ SmallVector<Value> dimSizesValues = getDimSizes(rewriter, loc, dstTp);
// Use a library method to transfer the external buffers from
// clients to the internal SparseTensorStorage. Since we cannot
// assume clients transfer ownership of the buffers, this method
// will copy all data over into a new SparseTensorStorage.
Value dst =
NewCallParams(rewriter, loc)
- .genBuffers(dstTp.withoutDimToLvl(), dimSizes)
+ .genBuffers(dstTp.withoutDimToLvl(), dimSizesValues)
.genNewCall(Action::kPack,
genLvlPtrsBuffers(rewriter, loc, adaptor.getLevels(),
adaptor.getValues()));
|
This change provides access to the individual components of dim sizes and lvl sizes after each codegenutil call. This is step 2 out of 3 to make sparse_tensor.new work for BSR
This change provides access to the individual components of dim sizes and lvl sizes after each codegenutil call. This is step 2 out of 3 to make sparse_tensor.new work for BSR
This change provides access to the individual components of dim sizes and lvl sizes after each codegenutil call.
This is step 2 out of 3 to make sparse_tensor.new work for BSR