@@ -624,11 +624,11 @@ def SparseTensor_InsertOp : SparseTensor_Op<"insert",
624
624
string summary = "Inserts a value into the sparse tensor";
625
625
string description = [{
626
626
Inserts the value into the underlying storage of the tensor at the
627
- given level-coordinates. The arity of `lvlCoords` must match the
628
- level-rank of the tensor. This operation can only be applied when
629
- the tensor materializes unintialized from a `bufferization.alloc_tensor`
630
- operation and the final tensor is constructed with a `load` operation
631
- which has the `hasInserts` attribute set.
627
+ given level-coordinates. The arity of `lvlCoords` must match the
628
+ level-rank of the tensor. This operation can only be applied when
629
+ the tensor materializes unintialized from a `tensor.empty` operation
630
+ and the final tensor is constructed with a `load` operation which
631
+ has the `hasInserts` attribute set.
632
632
633
633
The level-properties of the sparse tensor type fully describe what
634
634
kind of insertion order is allowed. When all levels have "unique"
@@ -974,7 +974,7 @@ def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [Pure]>,
974
974
Example of isEqual applied to intersecting elements only:
975
975
976
976
```mlir
977
- %C = bufferization.alloc_tensor ...
977
+ %C = tensor.empty( ...)
978
978
%0 = linalg.generic #trait
979
979
ins(%A: tensor<?xf64, #SparseVector>,
980
980
%B: tensor<?xf64, #SparseVector>)
@@ -996,7 +996,7 @@ def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [Pure]>,
996
996
Example of A+B in upper triangle, A-B in lower triangle:
997
997
998
998
```mlir
999
- %C = bufferization.alloc_tensor ...
999
+ %C = tensor.empty( ...)
1000
1000
%1 = linalg.generic #trait
1001
1001
ins(%A: tensor<?x?xf64, #CSR>, %B: tensor<?x?xf64, #CSR>
1002
1002
outs(%C: tensor<?x?xf64, #CSR> {
@@ -1029,7 +1029,7 @@ def SparseTensor_BinaryOp : SparseTensor_Op<"binary", [Pure]>,
1029
1029
because we never use its values, only its sparse structure:
1030
1030
1031
1031
```mlir
1032
- %C = bufferization.alloc_tensor ...
1032
+ %C = tensor.empty( ...)
1033
1033
%2 = linalg.generic #trait
1034
1034
ins(%A: tensor<?x?xf64, #CSR>, %B: tensor<?x?xi32, #CSR>
1035
1035
outs(%C: tensor<?x?xf64, #CSR> {
@@ -1069,7 +1069,9 @@ def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [Pure]>,
1069
1069
Each region contains a single block describing the computation and result.
1070
1070
A non-empty block must end with a sparse_tensor.yield and the return type
1071
1071
must match the type of `output`. The primary region's block has one
1072
- argument, while the missing region's block has zero arguments.
1072
+ argument, while the missing region's block has zero arguments. The
1073
+ absent region may only generate constants or values already computed
1074
+ on entry of the `linalg.generic` operation.
1073
1075
1074
1076
A region may also be declared empty (i.e. `absent={}`), indicating that the
1075
1077
region does not contribute to the output.
@@ -1082,17 +1084,17 @@ def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [Pure]>,
1082
1084
Example of A+1, restricted to existing elements:
1083
1085
1084
1086
```mlir
1085
- %C = bufferization.alloc_tensor ...
1087
+ %C = tensor.empty( ...) : tensor<?xf64, #SparseVector>
1086
1088
%0 = linalg.generic #trait
1087
1089
ins(%A: tensor<?xf64, #SparseVector>)
1088
1090
outs(%C: tensor<?xf64, #SparseVector>) {
1089
1091
^bb0(%a: f64, %c: f64) :
1090
1092
%result = sparse_tensor.unary %a : f64 to f64
1091
1093
present={
1092
- ^bb0(%arg0: f64):
1093
- %cf1 = arith.constant 1.0 : f64
1094
- %ret = arith.addf %arg0, %cf1 : f64
1095
- sparse_tensor.yield %ret : f64
1094
+ ^bb0(%arg0: f64):
1095
+ %cf1 = arith.constant 1.0 : f64
1096
+ %ret = arith.addf %arg0, %cf1 : f64
1097
+ sparse_tensor.yield %ret : f64
1096
1098
}
1097
1099
absent={}
1098
1100
linalg.yield %result : f64
@@ -1102,41 +1104,42 @@ def SparseTensor_UnaryOp : SparseTensor_Op<"unary", [Pure]>,
1102
1104
Example returning +1 for existing values and -1 for missing values:
1103
1105
1104
1106
```mlir
1105
- %C = bufferization.alloc_tensor...
1107
+ %p1 = arith.constant 1 : i32
1108
+ %m1 = arith.constant -1 : i32
1109
+ %C = tensor.empty(...) : tensor<?xi32, #SparseVector>
1106
1110
%1 = linalg.generic #trait
1107
1111
ins(%A: tensor<?xf64, #SparseVector>)
1108
- outs(%C: tensor<?xf64 , #SparseVector>) {
1109
- ^bb0(%a: f64, %c: f64 ) :
1112
+ outs(%C: tensor<?xi32 , #SparseVector>) {
1113
+ ^bb0(%a: f64, %c: i32 ) :
1110
1114
%result = sparse_tensor.unary %a : f64 to i32
1111
1115
present={
1112
1116
^bb0(%x: f64):
1113
- %ret = arith.constant 1 : i32
1114
- sparse_tensor.yield %ret : i32
1115
- }
1116
- absent={
1117
- %ret = arith.constant -1 : i32
1118
- sparse_tensor.yield %ret : i32
1119
- }
1120
- linalg.yield %result : f64
1121
- } -> tensor<?xf64, #SparseVector>
1117
+ sparse_tensor.yield %p1 : i32
1118
+ }
1119
+ absent={
1120
+ sparse_tensor.yield %m1 : i32
1121
+ }
1122
+ linalg.yield %result : i32
1123
+ } -> tensor<?xi32, #SparseVector>
1122
1124
```
1123
1125
1124
1126
Example showing a structural inversion (existing values become missing in
1125
1127
the output, while missing values are filled with 1):
1126
1128
1127
1129
```mlir
1128
- %C = bufferization.alloc_tensor...
1130
+ %c1 = arith.constant 1 : i64
1131
+ %C = tensor.empty(...) : tensor<?xi64, #SparseVector>
1129
1132
%2 = linalg.generic #trait
1130
- ins(%A: tensor<?xf64, #SparseVector>)
1131
- outs(%C: tensor<?xf64 , #SparseVector>) {
1132
- %result = sparse_tensor.unary %a : f64 to i64
1133
- present={}
1134
- absent={
1135
- %ret = arith.constant 1 : i64
1136
- sparse_tensor.yield %ret : i64
1137
- }
1138
- linalg.yield %result : f64
1139
- } -> tensor<?xf64 , #SparseVector>
1133
+ ins(%A: tensor<?xf64, #SparseVector>)
1134
+ outs(%C: tensor<?xi64 , #SparseVector>) {
1135
+ ^bb0(%a : f64, %c: i64) :
1136
+ %result = sparse_tensor.unary %a : f64 to i64
1137
+ present={}
1138
+ absent={
1139
+ sparse_tensor.yield %c1 : i64
1140
+ }
1141
+ linalg.yield %result : i64
1142
+ } -> tensor<?xi64 , #SparseVector>
1140
1143
```
1141
1144
}];
1142
1145
@@ -1177,7 +1180,7 @@ def SparseTensor_ReduceOp : SparseTensor_Op<"reduce", [Pure, SameOperandsAndResu
1177
1180
```mlir
1178
1181
%cf1 = arith.constant 1.0 : f64
1179
1182
%cf100 = arith.constant 100.0 : f64
1180
- %C = bufferization.alloc_tensor ...
1183
+ %C = tensor.empty( ...)
1181
1184
%0 = linalg.generic #trait
1182
1185
ins(%A: tensor<?x?xf64, #SparseMatrix>)
1183
1186
outs(%C: tensor<?xf64, #SparseVector>) {
@@ -1220,7 +1223,7 @@ def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResu
1220
1223
Example of selecting A >= 4.0:
1221
1224
1222
1225
```mlir
1223
- %C = bufferization.alloc_tensor ...
1226
+ %C = tensor.empty( ...)
1224
1227
%0 = linalg.generic #trait
1225
1228
ins(%A: tensor<?xf64, #SparseVector>)
1226
1229
outs(%C: tensor<?xf64, #SparseVector>) {
@@ -1238,7 +1241,7 @@ def SparseTensor_SelectOp : SparseTensor_Op<"select", [Pure, SameOperandsAndResu
1238
1241
Example of selecting lower triangle of a matrix:
1239
1242
1240
1243
```mlir
1241
- %C = bufferization.alloc_tensor ...
1244
+ %C = tensor.empty( ...)
1242
1245
%1 = linalg.generic #trait
1243
1246
ins(%A: tensor<?x?xf64, #CSR>)
1244
1247
outs(%C: tensor<?x?xf64, #CSR>) {
0 commit comments