Skip to content

Commit 2db190f

Browse files
authored
[mlir][tensor][NFC] Move function comments to where they are declared. (#94002)
According to LLVM style guide, we prefer putting the documentation comments for public APIs into the header file. See https://llvm.org/docs/CodingStandards.html#doxygen-use-in-documentation-comments for more details.
1 parent 8578b60 commit 2db190f

File tree

2 files changed

+14
-12
lines changed

2 files changed

+14
-12
lines changed

mlir/include/mlir/Dialect/Tensor/Utils/Utils.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,22 @@ FailureOr<RankedTensorType>
3232
computeTransposedType(RankedTensorType rankedTensorType,
3333
ArrayRef<int64_t> transposeVector);
3434

35+
/// Shell function to compute the Destination Permutation of PackOp
36+
/// This function uses the helper function `computePackUnPackPerm` to get
37+
/// the permutation vector. Only major difference between UnPack and Pack is
38+
/// that packOp uses destination rank whereas unpack Uses source rank.
3539
SmallVector<int64_t> getPackInverseDestPerm(tensor::PackOp packOp);
40+
41+
/// Shell function to compute the Source Permutation of unPackOp.
42+
/// This function, like the getPackInverseDestPerm uses the helper function
43+
/// computePackUnPackPerm` to get the permutation vector.
44+
/// Only major difference between UnPack and Pack is that packOp uses
45+
/// destination rank whereas unpack Uses source rank.
3646
SmallVector<int64_t> getUnPackInverseSrcPerm(tensor::UnPackOp unpackOp);
3747

48+
/// Shell function to compute the Source rank permutation for unpackOp
49+
/// Unpack requires some packing metadata data information, so created
50+
/// another function where this value is passed by reference.
3851
SmallVector<int64_t> getUnPackInverseSrcPerm(tensor::UnPackOp,
3952
PackingMetadata &metadata);
4053

mlir/lib/Dialect/Tensor/Utils/Utils.cpp

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ mlir::tensor::computeTransposedType(RankedTensorType rankedTensorType,
7272
RTTBuilder(rankedTensorType).setShape(transposedShape);
7373
return transposedTensorType;
7474
}
75+
7576
/// The permutation can be obtained from two permutations:
7677
/// a) Compute the permutation vector to move the last `numPackedDims` into
7778
/// the `innerPosDims` of a shape of rank `rank`.
@@ -100,10 +101,6 @@ computePackUnPackPerm(int64_t rank, ArrayRef<int64_t> &innerDimsPos,
100101
return packInverseDestPermutation;
101102
}
102103

103-
/// Shell function to compute the Destination Permutation of PackOp
104-
/// This function uses the helper function `computePackUnPackPerm` to get
105-
/// the permutation vector. Only major difference between UnPack and Pack is
106-
/// that packOp uses destination rank whereas unpack Uses source rank.
107104
SmallVector<int64_t> mlir::tensor::getPackInverseDestPerm(PackOp packOp) {
108105

109106
PackingMetadata pMetadata;
@@ -115,19 +112,11 @@ SmallVector<int64_t> mlir::tensor::getPackInverseDestPerm(PackOp packOp) {
115112
return packInvDestPerm;
116113
}
117114

118-
/// Shell function to compute the Source Permutation of unPackOp.
119-
/// This function, like the getPackInverseDestPerm uses the helper function
120-
/// computePackUnPackPerm` to get the permutation vector.
121-
/// Only major difference between UnPack and Pack is that packOp uses
122-
/// destination rank whereas unpack Uses source rank.
123115
SmallVector<int64_t> mlir::tensor::getUnPackInverseSrcPerm(UnPackOp unpackOp) {
124116
PackingMetadata metadata;
125117
return mlir::tensor::getUnPackInverseSrcPerm(unpackOp, metadata);
126118
}
127119

128-
/// Shell function to compute the Source rank permutation for unpackOp
129-
/// Unpack requires some packing metadata data information, so created
130-
/// another function where this value is passed by reference.
131120
SmallVector<int64_t>
132121
mlir::tensor::getUnPackInverseSrcPerm(UnPackOp unpackOp,
133122
PackingMetadata &metadata) {

0 commit comments

Comments
 (0)