@@ -1306,7 +1306,7 @@ def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResu
1306
1306
1307
1307
def SparseTensor_YieldOp : SparseTensor_Op<"yield", [Pure, Terminator,
1308
1308
ParentOneOf<["BinaryOp", "UnaryOp", "ReduceOp", "SelectOp",
1309
- "ForeachOp", "IterateOp"]>]> {
1309
+ "ForeachOp", "IterateOp", "CoIterateOp" ]>]> {
1310
1310
let summary = "Yield from sparse_tensor set-like operations";
1311
1311
let description = [{
1312
1312
Yields a value from within a `binary`, `unary`, `reduce`,
@@ -1604,14 +1604,14 @@ def IterateOp : SparseTensor_Op<"iterate",
1604
1604
1605
1605
let arguments = (ins AnySparseIterSpace:$iterSpace,
1606
1606
Variadic<AnyType>:$initArgs,
1607
- LevelSetAttr :$crdUsedLvls);
1607
+ I64BitSetAttr :$crdUsedLvls);
1608
1608
let results = (outs Variadic<AnyType>:$results);
1609
1609
let regions = (region SizedRegion<1>:$region);
1610
1610
1611
1611
let skipDefaultBuilders = 1;
1612
1612
let builders = [
1613
1613
OpBuilder<(ins "Value":$iterSpace, "ValueRange":$initArgs)>,
1614
- OpBuilder<(ins "Value":$iterSpace, "ValueRange":$initArgs, "LevelSet " :$crdUsedLvls)>
1614
+ OpBuilder<(ins "Value":$iterSpace, "ValueRange":$initArgs, "I64BitSet " :$crdUsedLvls)>
1615
1615
];
1616
1616
1617
1617
let extraClassDeclaration = [{
@@ -1644,6 +1644,123 @@ def IterateOp : SparseTensor_Op<"iterate",
1644
1644
let hasCustomAssemblyFormat = 1;
1645
1645
}
1646
1646
1647
+ def SparseTensor_CoIterateOp : SparseTensor_Op<"coiterate",
1648
+ [AttrSizedOperandSegments,
1649
+ SingleBlockImplicitTerminator<"sparse_tensor::YieldOp">,
1650
+ RecursiveMemoryEffects]> {
1651
+ let summary = "CoIterates over a set of sparse iteration spaces";
1652
+ let description = [{
1653
+ The `sparse_tensor.coiterate` operation represents a loop (nest) over
1654
+ the a set of iteration spaces.
1655
+ The operation can have multiple regions, with each of them defining a
1656
+ case to compute a result at the current iterations. The case condition
1657
+ is defined solely based on the pattern of specified iterators.
1658
+ For example:
1659
+ ```mlir
1660
+ %ret = sparse_tensor.coiterate (%sp1, %sp2) at(%coord) iter_args(%arg = %init)
1661
+ : (!sparse_tensor.iter_space<#CSR, lvls = 0>,
1662
+ !sparse_tensor.iter_space<#COO, lvls = 0>)
1663
+ -> index
1664
+ case %it1, _ {
1665
+ // %coord is specifed in space %sp1 but *NOT* specified in space %sp2.
1666
+ }
1667
+ case %it1, %it2 {
1668
+ // %coord is specifed in *BOTH* spaces %sp1 and %sp2.
1669
+ }
1670
+ ```
1671
+
1672
+ `sparse_tensor.coiterate` can also operate on loop-carried variables.
1673
+ It returns the final values after loop termination.
1674
+ The initial values of the variables are passed as additional SSA operands
1675
+ to the iterator SSA value and used coordinate SSA values.
1676
+ Each operation region has variadic arguments for specified (used), one argument
1677
+ for each loop-carried variable, representing the value of the variable
1678
+ at the current iteration, followed by a list of arguments for iterators.
1679
+ The body region must contain exactly one block that terminates with
1680
+ `sparse_tensor.yield`.
1681
+
1682
+ The results of an `sparse_tensor.coiterate` hold the final values after
1683
+ the last iteration. If the `sparse_tensor.coiterate` defines any values,
1684
+ a yield must be explicitly present in every region defined in the operation.
1685
+ The number and types of the `sparse_tensor.coiterate` results must match
1686
+ the initial values in the iter_args binding and the yield operands.
1687
+
1688
+
1689
+ A `sparse_tensor.coiterate` example that does elementwise addition between two
1690
+ sparse vectors.
1691
+
1692
+
1693
+ ```mlir
1694
+ %ret = sparse_tensor.coiterate (%sp1, %sp2) at(%coord) iter_args(%arg = %init)
1695
+ : (!sparse_tensor.iter_space<#CSR, lvls = 0>,
1696
+ !sparse_tensor.iter_space<#CSR, lvls = 0>)
1697
+ -> tensor<?xindex, #CSR>
1698
+ case %it1, _ {
1699
+ // v = v1 + 0 = v1
1700
+ %v1 = sparse_tensor.extract_value %t1 at %it1 : index
1701
+ %yield = sparse_tensor.insert %v1 into %arg[%coord]
1702
+ sparse_tensor.yield %yield
1703
+ }
1704
+ case _, %it2 {
1705
+ // v = v2 + 0 = v2
1706
+ %v2 = sparse_tensor.extract_value %t2 at %it2 : index
1707
+ %yield = sparse_tensor.insert %v1 into %arg[%coord]
1708
+ sparse_tensor.yield %yield
1709
+ }
1710
+ case %it1, %it2 {
1711
+ // v = v1 + v2
1712
+ %v1 = sparse_tensor.extract_value %t1 at %it1 : index
1713
+ %v2 = sparse_tensor.extract_value %t2 at %it2 : index
1714
+ %v = arith.addi %v1, %v2 : index
1715
+ %yield = sparse_tensor.insert %v into %arg[%coord]
1716
+ sparse_tensor.yield %yield
1717
+ }
1718
+ ```
1719
+ }];
1720
+
1721
+ let arguments = (ins Variadic<AnySparseIterSpace>:$iterSpaces,
1722
+ Variadic<AnyType>:$initArgs,
1723
+ I64BitSetAttr:$crdUsedLvls,
1724
+ I64BitSetArrayAttr:$cases);
1725
+ let results = (outs Variadic<AnyType>:$results);
1726
+ let regions = (region VariadicRegion<SizedRegion<1>>:$caseRegions);
1727
+
1728
+ let extraClassDeclaration = [{
1729
+ unsigned getSpaceDim() {
1730
+ return llvm::cast<::mlir::sparse_tensor::IterSpaceType>(
1731
+ getIterSpaces().front().getType())
1732
+ .getSpaceDim();
1733
+ }
1734
+ I64BitSet getRegionDefinedSpace(unsigned regionIdx) {
1735
+ return I64BitSet(llvm::cast<IntegerAttr>(getCases()[regionIdx])
1736
+ .getValue().getZExtValue());
1737
+ }
1738
+ // The block arguments starts with referenced coordinates, follows by
1739
+ // user-provided iteration arguments and ends with iterators.
1740
+ Block::BlockArgListType getCrds(unsigned regionIdx) {
1741
+ return getRegion(regionIdx).getArguments()
1742
+ .take_front(getCrdUsedLvls().count());
1743
+ }
1744
+ unsigned getNumRegionIterArgs(unsigned regionIdx) {
1745
+ return getInitArgs().size();
1746
+ }
1747
+ Block::BlockArgListType getRegionIterArgs(unsigned regionIdx) {
1748
+ return getRegion(regionIdx).getArguments()
1749
+ .slice(getCrdUsedLvls().count(), getNumRegionIterArgs(regionIdx));
1750
+ }
1751
+ Block::BlockArgListType getRegionIterators(unsigned regionIdx) {
1752
+ return getRegion(regionIdx).getArguments()
1753
+ .take_back(getRegionDefinedSpace(regionIdx).count());
1754
+ }
1755
+ }];
1756
+
1757
+ // TODO:
1758
+ // let hasVerifier = 1;
1759
+ // let hasRegionVerifier = 1;
1760
+ // let hasCanonicalizer = 1;
1761
+ let hasCustomAssemblyFormat = 1;
1762
+ }
1763
+
1647
1764
//===----------------------------------------------------------------------===//
1648
1765
// Sparse Tensor Debugging and Test-Only Operations.
1649
1766
//===----------------------------------------------------------------------===//
0 commit comments