-
Notifications
You must be signed in to change notification settings - Fork 14.4k
[mlir] Use create
instead of createOrFold
for ConstantOp as folding has no effect (NFC)
#80129
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Thank you for submitting a Pull Request (PR) to the LLVM Project! This PR will be automatically labeled and the relevant teams will be If you wish to, you can add reviewers by using the "Reviewers" section on this page. If this is not working for you, it is probably because you do not have write If you have received no comments on your PR for a week, you can request a review If you have further questions, they may be answered by the LLVM GitHub User Guide. You can also ask questions in a comment on this PR, on the LLVM Discord or on the forums. |
@llvm/pr-subscribers-mlir @llvm/pr-subscribers-backend-amdgpu Author: Hugo Trachino (nujaa) ChangesThis PR aims to clean-up confusing uses of builder.createOrFold <ConstantOp> since folding of constants fails. Full diff: https://github.com/llvm/llvm-project/pull/80129.diff 5 Files Affected:
diff --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
index cc97ee74d48b6..12d2462061dcf 100644
--- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
+++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
@@ -38,7 +38,7 @@ static Value createI32Constant(ConversionPatternRewriter &rewriter,
static Value createI1Constant(ConversionPatternRewriter &rewriter, Location loc,
bool value) {
Type llvmI1 = rewriter.getI1Type();
- return rewriter.createOrFold<LLVM::ConstantOp>(loc, llvmI1, value);
+ return rewriter.create<LLVM::ConstantOp>(loc, llvmI1, value);
}
namespace {
@@ -163,7 +163,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
Value ptr = memrefDescriptor.alignedPtr(rewriter, loc);
// The stride value is always 0 for raw buffers. This also disables
// swizling.
- Value stride = rewriter.createOrFold<LLVM::ConstantOp>(
+ Value stride = rewriter.create<LLVM::ConstantOp>(
loc, llvmI16, rewriter.getI16IntegerAttr(0));
Value numRecords;
if (memrefType.hasStaticShape()) {
diff --git a/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp b/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
index c625a302a3970..b51a13ae362e9 100644
--- a/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
+++ b/mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
@@ -89,7 +89,7 @@ void ExtFOnFloat8RewritePattern::rewrite(arith::ExtFOp op,
}
VectorType inType = in.getType().cast<VectorType>();
int64_t numElements = inType.getNumElements();
- Value zero = rewriter.createOrFold<arith::ConstantOp>(
+ Value zero = rewriter.create<arith::ConstantOp>(
loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
Value result =
rewriter.createOrFold<vector::SplatOp>(loc, op.getOut().getType(), zero);
@@ -209,7 +209,7 @@ void TruncFToFloat8RewritePattern::rewrite(arith::TruncFOp op,
}
VectorType outType = op.getOut().getType().cast<VectorType>();
int64_t numElements = outType.getNumElements();
- Value zero = rewriter.createOrFold<arith::ConstantOp>(
+ Value zero = rewriter.create<arith::ConstantOp>(
loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
Value result = rewriter.createOrFold<vector::SplatOp>(loc, outType, zero);
if (outType.getShape().empty()) {
diff --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
index 599bb13190f12..363e6016113b1 100644
--- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
+++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
@@ -67,8 +67,8 @@ static bool canBeCalledWithBarePointers(gpu::GPUFuncOp func) {
Value getLaneId(ConversionPatternRewriter &rewriter, Location loc,
const unsigned indexBitwidth) {
auto int32Type = IntegerType::get(rewriter.getContext(), 32);
- Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32);
- Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32);
+ Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32);
+ Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32);
Value mbcntLo = rewriter.create<ROCDL::MbcntLoOp>(loc, int32Type,
ValueRange{minus1, zero});
Value laneId = rewriter.create<ROCDL::MbcntHiOp>(loc, int32Type,
@@ -89,8 +89,8 @@ struct GPULaneIdOpToROCDL : ConvertOpToLLVMPattern<gpu::LaneIdOp> {
// followed by: %lid = call @llvm.amdgcn.mbcnt.hi(-1, %mlo)
Type intTy = IntegerType::get(context, 32);
- Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32);
- Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32);
+ Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32);
+ Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32);
Value mbcntLo =
rewriter.create<ROCDL::MbcntLoOp>(loc, intTy, ValueRange{minus1, zero});
Value laneId = rewriter.create<ROCDL::MbcntHiOp>(
diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
index 06ec53d19b1e9..505d85f211111 100644
--- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
+++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
@@ -327,7 +327,7 @@ class PadConverter : public OpRewritePattern<tosa::PadOp> {
highValues.reserve(rank);
for (int i = 0; i < rank; i++) {
- Value inputIndex = rewriter.createOrFold<arith::ConstantIndexOp>(loc, i);
+ Value inputIndex = rewriter.create<arith::ConstantIndexOp>(loc, i);
Value lowVal = rewriter.createOrFold<tensor::ExtractOp>(
loc, padding, ValueRange({inputIndex, lowIndex}));
Value highVal = rewriter.createOrFold<tensor::ExtractOp>(
@@ -360,8 +360,8 @@ struct ConcatConverter : public OpConversionPattern<tosa::ConcatOp> {
Location loc = op.getLoc();
int axis = op.getAxis();
- Value axisValue = rewriter.createOrFold<arith::ConstantOp>(
- loc, rewriter.getIndexAttr(axis));
+ Value axisValue =
+ rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(axis));
int64_t rank = resultType.getRank();
SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
diff --git a/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp
index 2108fc591055a..7c8403c9609d8 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp
@@ -44,8 +44,8 @@ struct DecomposeTensorConcatOp : public OpRewritePattern<ConcatOp> {
return failure();
int64_t dim = concatOp.getDim();
- Value dimValue = rewriter.createOrFold<arith::ConstantOp>(
- loc, rewriter.getIndexAttr(dim));
+ Value dimValue =
+ rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(dim));
int64_t rank = concatOp.getResultType().getRank();
SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
|
Do you mind improving the commit message a bit? What is the problem here? Thanks! |
Sure, would you consider "[mlir] Create ConstantOps using create instead of createOrFold as folding ConstantOp has no effect." as changed in the MR title ? |
85898ce
to
11e1b21
Compare
create
instead of createOrFold
for ConstantOp as folding has no effect (NFC)
(tweaked the title it to make it shorter, and tagged NFC) |
@nujaa Congratulations on having your first Pull Request (PR) merged into the LLVM Project! Your changes will be combined with recent changes from other authors, then tested Please check whether problems have been caused by your change specifically, as How to do this, and the rest of the post-merge process, is covered in detail here. If your change does cause a problem, it may be reverted, or you can revert it yourself. If you don't get any reports, no action is required from you. Your changes are working as expected, well done! |
Thanks, @nujaa now looks good to me. |
…ng has no effect (NFC) (llvm#80129) This aims to clean-up confusing uses of builder.createOrFold<ConstantOp> since folding of constants fails.
…ng has no effect (NFC) (llvm#80129) This aims to clean-up confusing uses of builder.createOrFold<ConstantOp> since folding of constants fails.
This PR aims to clean-up confusing uses of builder.createOrFold since folding of constants fails.