@@ -1306,7 +1306,7 @@ def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResu
1306
1306
1307
1307
def SparseTensor_YieldOp : SparseTensor_Op<"yield", [Pure, Terminator,
1308
1308
ParentOneOf<["BinaryOp", "UnaryOp", "ReduceOp", "SelectOp",
1309
- "ForeachOp", "IterateOp"]>]> {
1309
+ "ForeachOp", "IterateOp", "CoIterateOp" ]>]> {
1310
1310
let summary = "Yield from sparse_tensor set-like operations";
1311
1311
let description = [{
1312
1312
Yields a value from within a `binary`, `unary`, `reduce`,
@@ -1629,14 +1629,14 @@ def IterateOp : SparseTensor_Op<"iterate",
1629
1629
1630
1630
let arguments = (ins AnySparseIterSpace:$iterSpace,
1631
1631
Variadic<AnyType>:$initArgs,
1632
- LevelSetAttr :$crdUsedLvls);
1632
+ I64BitSetAttr :$crdUsedLvls);
1633
1633
let results = (outs Variadic<AnyType>:$results);
1634
1634
let regions = (region SizedRegion<1>:$region);
1635
1635
1636
1636
let skipDefaultBuilders = 1;
1637
1637
let builders = [
1638
1638
OpBuilder<(ins "Value":$iterSpace, "ValueRange":$initArgs)>,
1639
- OpBuilder<(ins "Value":$iterSpace, "ValueRange":$initArgs, "LevelSet " :$crdUsedLvls)>
1639
+ OpBuilder<(ins "Value":$iterSpace, "ValueRange":$initArgs, "I64BitSet " :$crdUsedLvls)>
1640
1640
];
1641
1641
1642
1642
let extraClassDeclaration = [{
@@ -1669,6 +1669,123 @@ def IterateOp : SparseTensor_Op<"iterate",
1669
1669
let hasCustomAssemblyFormat = 1;
1670
1670
}
1671
1671
1672
+ def SparseTensor_CoIterateOp : SparseTensor_Op<"coiterate",
1673
+ [AttrSizedOperandSegments,
1674
+ SingleBlockImplicitTerminator<"sparse_tensor::YieldOp">,
1675
+ RecursiveMemoryEffects]> {
1676
+ let summary = "CoIterates over a set of sparse iteration spaces";
1677
+ let description = [{
1678
+ The `sparse_tensor.coiterate` operation represents a loop (nest) over
1679
+ the a set of iteration spaces.
1680
+ The operation can have multiple regions, with each of them defining a
1681
+ case to compute a result at the current iterations. The case condition
1682
+ is defined solely based on the pattern of specified iterators.
1683
+ For example:
1684
+ ```mlir
1685
+ %ret = sparse_tensor.coiterate (%sp1, %sp2) at(%coord) iter_args(%arg = %init)
1686
+ : (!sparse_tensor.iter_space<#CSR, lvls = 0>,
1687
+ !sparse_tensor.iter_space<#COO, lvls = 0>)
1688
+ -> index
1689
+ case %it1, _ {
1690
+ // %coord is specifed in space %sp1 but *NOT* specified in space %sp2.
1691
+ }
1692
+ case %it1, %it2 {
1693
+ // %coord is specifed in *BOTH* spaces %sp1 and %sp2.
1694
+ }
1695
+ ```
1696
+
1697
+ `sparse_tensor.coiterate` can also operate on loop-carried variables.
1698
+ It returns the final values after loop termination.
1699
+ The initial values of the variables are passed as additional SSA operands
1700
+ to the iterator SSA value and used coordinate SSA values.
1701
+ Each operation region has variadic arguments for specified (used), one argument
1702
+ for each loop-carried variable, representing the value of the variable
1703
+ at the current iteration, followed by a list of arguments for iterators.
1704
+ The body region must contain exactly one block that terminates with
1705
+ `sparse_tensor.yield`.
1706
+
1707
+ The results of an `sparse_tensor.coiterate` hold the final values after
1708
+ the last iteration. If the `sparse_tensor.coiterate` defines any values,
1709
+ a yield must be explicitly present in every region defined in the operation.
1710
+ The number and types of the `sparse_tensor.coiterate` results must match
1711
+ the initial values in the iter_args binding and the yield operands.
1712
+
1713
+
1714
+ A `sparse_tensor.coiterate` example that does elementwise addition between two
1715
+ sparse vectors.
1716
+
1717
+
1718
+ ```mlir
1719
+ %ret = sparse_tensor.coiterate (%sp1, %sp2) at(%coord) iter_args(%arg = %init)
1720
+ : (!sparse_tensor.iter_space<#CSR, lvls = 0>,
1721
+ !sparse_tensor.iter_space<#CSR, lvls = 0>)
1722
+ -> tensor<?xindex, #CSR>
1723
+ case %it1, _ {
1724
+ // v = v1 + 0 = v1
1725
+ %v1 = sparse_tensor.extract_value %t1 at %it1 : index
1726
+ %yield = sparse_tensor.insert %v1 into %arg[%coord]
1727
+ sparse_tensor.yield %yield
1728
+ }
1729
+ case _, %it2 {
1730
+ // v = v2 + 0 = v2
1731
+ %v2 = sparse_tensor.extract_value %t2 at %it2 : index
1732
+ %yield = sparse_tensor.insert %v1 into %arg[%coord]
1733
+ sparse_tensor.yield %yield
1734
+ }
1735
+ case %it1, %it2 {
1736
+ // v = v1 + v2
1737
+ %v1 = sparse_tensor.extract_value %t1 at %it1 : index
1738
+ %v2 = sparse_tensor.extract_value %t2 at %it2 : index
1739
+ %v = arith.addi %v1, %v2 : index
1740
+ %yield = sparse_tensor.insert %v into %arg[%coord]
1741
+ sparse_tensor.yield %yield
1742
+ }
1743
+ ```
1744
+ }];
1745
+
1746
+ let arguments = (ins Variadic<AnySparseIterSpace>:$iterSpaces,
1747
+ Variadic<AnyType>:$initArgs,
1748
+ I64BitSetAttr:$crdUsedLvls,
1749
+ I64BitSetArrayAttr:$cases);
1750
+ let results = (outs Variadic<AnyType>:$results);
1751
+ let regions = (region VariadicRegion<SizedRegion<1>>:$caseRegions);
1752
+
1753
+ let extraClassDeclaration = [{
1754
+ unsigned getSpaceDim() {
1755
+ return llvm::cast<::mlir::sparse_tensor::IterSpaceType>(
1756
+ getIterSpaces().front().getType())
1757
+ .getSpaceDim();
1758
+ }
1759
+ I64BitSet getRegionDefinedSpace(unsigned regionIdx) {
1760
+ return I64BitSet(llvm::cast<IntegerAttr>(getCases()[regionIdx])
1761
+ .getValue().getZExtValue());
1762
+ }
1763
+ // The block arguments starts with referenced coordinates, follows by
1764
+ // user-provided iteration arguments and ends with iterators.
1765
+ Block::BlockArgListType getCrds(unsigned regionIdx) {
1766
+ return getRegion(regionIdx).getArguments()
1767
+ .take_front(getCrdUsedLvls().count());
1768
+ }
1769
+ unsigned getNumRegionIterArgs(unsigned regionIdx) {
1770
+ return getInitArgs().size();
1771
+ }
1772
+ Block::BlockArgListType getRegionIterArgs(unsigned regionIdx) {
1773
+ return getRegion(regionIdx).getArguments()
1774
+ .slice(getCrdUsedLvls().count(), getNumRegionIterArgs(regionIdx));
1775
+ }
1776
+ Block::BlockArgListType getRegionIterators(unsigned regionIdx) {
1777
+ return getRegion(regionIdx).getArguments()
1778
+ .take_back(getRegionDefinedSpace(regionIdx).count());
1779
+ }
1780
+ }];
1781
+
1782
+ // TODO:
1783
+ // let hasVerifier = 1;
1784
+ // let hasRegionVerifier = 1;
1785
+ // let hasCanonicalizer = 1;
1786
+ let hasCustomAssemblyFormat = 1;
1787
+ }
1788
+
1672
1789
//===----------------------------------------------------------------------===//
1673
1790
// Sparse Tensor Debugging and Test-Only Operations.
1674
1791
//===----------------------------------------------------------------------===//
0 commit comments