-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[mlir] Update the return type of getNum{Dynamic|Scalable}Dims
#110472
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[mlir] Update the return type of getNum{Dynamic|Scalable}Dims
#110472
Conversation
Updates the return type of `getNumDynamicDims` from `int64_t` to `size_t`. This is for consistency with other helpers/methods that return "size" and to reduce the number of `static_cast`s in various places.
@llvm/pr-subscribers-mlir-tensor @llvm/pr-subscribers-mlir-ods Author: Andrzej Warzyński (banach-space) ChangesUpdates the return type of Full diff: https://github.com/llvm/llvm-project/pull/110472.diff 4 Files Affected:
diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
index db38e2e1bce22a..c9dcd546cf67c2 100644
--- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
+++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
@@ -166,7 +166,7 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> {
/// If this is a ranked type, return the number of dimensions with dynamic
/// size. Otherwise, abort.
- int64_t getNumDynamicDims() const {
+ size_t getNumDynamicDims() const {
return llvm::count_if($_type.getShape(), ::mlir::ShapedType::isDynamic);
}
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 04a8ff30ee946b..f1841b860ff81a 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -249,8 +249,7 @@ AllocTensorOp::getBufferType(Value value, const BufferizationOptions &options,
LogicalResult AllocTensorOp::verify() {
if (getCopy() && !getDynamicSizes().empty())
return emitError("dynamic sizes not needed when copying a tensor");
- if (!getCopy() && getType().getNumDynamicDims() !=
- static_cast<int64_t>(getDynamicSizes().size()))
+ if (!getCopy() && getType().getNumDynamicDims() != getDynamicSizes().size())
return emitError("expected ")
<< getType().getNumDynamicDims() << " dynamic sizes";
if (getCopy() && getCopy().getType() != getType())
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index f822c11aeec008..956877497d9338 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -2045,8 +2045,7 @@ void WaitOp::getCanonicalizationPatterns(RewritePatternSet &results,
LogicalResult AllocOp::verify() {
auto memRefType = llvm::cast<MemRefType>(getMemref().getType());
- if (static_cast<int64_t>(getDynamicSizes().size()) !=
- memRefType.getNumDynamicDims())
+ if (getDynamicSizes().size() != memRefType.getNumDynamicDims())
return emitOpError("dimension operand count does not equal memref "
"dynamic dimension count");
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 75b9729e63648c..d579a27359dfa0 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -205,8 +205,7 @@ static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
if (!memRefType)
return op.emitOpError("result must be a memref");
- if (static_cast<int64_t>(op.getDynamicSizes().size()) !=
- memRefType.getNumDynamicDims())
+ if (op.getDynamicSizes().size() != memRefType.getNumDynamicDims())
return op.emitOpError("dimension operand count does not equal memref "
"dynamic dimension count");
@@ -283,8 +282,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
// Create new memref type (which will have fewer dynamic dimensions).
MemRefType newMemRefType =
MemRefType::Builder(memrefType).setShape(newShapeConstants);
- assert(static_cast<int64_t>(dynamicSizes.size()) ==
- newMemRefType.getNumDynamicDims());
+ assert(dynamicSizes.size() == newMemRefType.getNumDynamicDims());
// Create and insert the alloc op for the new memref.
auto newAlloc = rewriter.create<AllocLikeOp>(
|
@llvm/pr-subscribers-mlir-core Author: Andrzej Warzyński (banach-space) ChangesUpdates the return type of Full diff: https://github.com/llvm/llvm-project/pull/110472.diff 4 Files Affected:
diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
index db38e2e1bce22a..c9dcd546cf67c2 100644
--- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
+++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
@@ -166,7 +166,7 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> {
/// If this is a ranked type, return the number of dimensions with dynamic
/// size. Otherwise, abort.
- int64_t getNumDynamicDims() const {
+ size_t getNumDynamicDims() const {
return llvm::count_if($_type.getShape(), ::mlir::ShapedType::isDynamic);
}
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 04a8ff30ee946b..f1841b860ff81a 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -249,8 +249,7 @@ AllocTensorOp::getBufferType(Value value, const BufferizationOptions &options,
LogicalResult AllocTensorOp::verify() {
if (getCopy() && !getDynamicSizes().empty())
return emitError("dynamic sizes not needed when copying a tensor");
- if (!getCopy() && getType().getNumDynamicDims() !=
- static_cast<int64_t>(getDynamicSizes().size()))
+ if (!getCopy() && getType().getNumDynamicDims() != getDynamicSizes().size())
return emitError("expected ")
<< getType().getNumDynamicDims() << " dynamic sizes";
if (getCopy() && getCopy().getType() != getType())
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index f822c11aeec008..956877497d9338 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -2045,8 +2045,7 @@ void WaitOp::getCanonicalizationPatterns(RewritePatternSet &results,
LogicalResult AllocOp::verify() {
auto memRefType = llvm::cast<MemRefType>(getMemref().getType());
- if (static_cast<int64_t>(getDynamicSizes().size()) !=
- memRefType.getNumDynamicDims())
+ if (getDynamicSizes().size() != memRefType.getNumDynamicDims())
return emitOpError("dimension operand count does not equal memref "
"dynamic dimension count");
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 75b9729e63648c..d579a27359dfa0 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -205,8 +205,7 @@ static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
if (!memRefType)
return op.emitOpError("result must be a memref");
- if (static_cast<int64_t>(op.getDynamicSizes().size()) !=
- memRefType.getNumDynamicDims())
+ if (op.getDynamicSizes().size() != memRefType.getNumDynamicDims())
return op.emitOpError("dimension operand count does not equal memref "
"dynamic dimension count");
@@ -283,8 +282,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
// Create new memref type (which will have fewer dynamic dimensions).
MemRefType newMemRefType =
MemRefType::Builder(memrefType).setShape(newShapeConstants);
- assert(static_cast<int64_t>(dynamicSizes.size()) ==
- newMemRefType.getNumDynamicDims());
+ assert(dynamicSizes.size() == newMemRefType.getNumDynamicDims());
// Create and insert the alloc op for the new memref.
auto newAlloc = rewriter.create<AllocLikeOp>(
|
@llvm/pr-subscribers-mlir-bufferization Author: Andrzej Warzyński (banach-space) ChangesUpdates the return type of Full diff: https://github.com/llvm/llvm-project/pull/110472.diff 4 Files Affected:
diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
index db38e2e1bce22a..c9dcd546cf67c2 100644
--- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
+++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
@@ -166,7 +166,7 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> {
/// If this is a ranked type, return the number of dimensions with dynamic
/// size. Otherwise, abort.
- int64_t getNumDynamicDims() const {
+ size_t getNumDynamicDims() const {
return llvm::count_if($_type.getShape(), ::mlir::ShapedType::isDynamic);
}
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
index 04a8ff30ee946b..f1841b860ff81a 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
@@ -249,8 +249,7 @@ AllocTensorOp::getBufferType(Value value, const BufferizationOptions &options,
LogicalResult AllocTensorOp::verify() {
if (getCopy() && !getDynamicSizes().empty())
return emitError("dynamic sizes not needed when copying a tensor");
- if (!getCopy() && getType().getNumDynamicDims() !=
- static_cast<int64_t>(getDynamicSizes().size()))
+ if (!getCopy() && getType().getNumDynamicDims() != getDynamicSizes().size())
return emitError("expected ")
<< getType().getNumDynamicDims() << " dynamic sizes";
if (getCopy() && getCopy().getType() != getType())
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index f822c11aeec008..956877497d9338 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -2045,8 +2045,7 @@ void WaitOp::getCanonicalizationPatterns(RewritePatternSet &results,
LogicalResult AllocOp::verify() {
auto memRefType = llvm::cast<MemRefType>(getMemref().getType());
- if (static_cast<int64_t>(getDynamicSizes().size()) !=
- memRefType.getNumDynamicDims())
+ if (getDynamicSizes().size() != memRefType.getNumDynamicDims())
return emitOpError("dimension operand count does not equal memref "
"dynamic dimension count");
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 75b9729e63648c..d579a27359dfa0 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -205,8 +205,7 @@ static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
if (!memRefType)
return op.emitOpError("result must be a memref");
- if (static_cast<int64_t>(op.getDynamicSizes().size()) !=
- memRefType.getNumDynamicDims())
+ if (op.getDynamicSizes().size() != memRefType.getNumDynamicDims())
return op.emitOpError("dimension operand count does not equal memref "
"dynamic dimension count");
@@ -283,8 +282,7 @@ struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
// Create new memref type (which will have fewer dynamic dimensions).
MemRefType newMemRefType =
MemRefType::Builder(memrefType).setShape(newShapeConstants);
- assert(static_cast<int64_t>(dynamicSizes.size()) ==
- newMemRefType.getNumDynamicDims());
+ assert(dynamicSizes.size() == newMemRefType.getNumDynamicDims());
// Create and insert the alloc op for the new memref.
auto newAlloc = rewriter.create<AllocLikeOp>(
|
The |
And what about |
Thanks for taking a look!
Sending update shortly 👍🏻 |
Update type.getNumDynamicDims in SparseTensor and getNumScalableDims in VectorType
getNumDynamicDims
getNum{Dynamic|Scalable}Dims
Updates the return type of
getNumDynamicDims
andgetNumScalableDims
from
int64_t
tosize_t
. This is for consistency with otherhelpers/methods that return "size" and to reduce the number of
static_cast
s in various places.