@@ -102,17 +102,16 @@ func.func @tensor.cast.unranked(%a : tensor<*xf32>, %b : tensor<*xf32>, %c : ten
102
102
// -----
103
103
104
104
// CHECK-LABEL: func @linalg_effects(
105
- // CHECK-SAME: %[[A:[a-z0-9]*]]: tensor<?x?xf32>
106
- // CHECK-SAME: %[[B:[a-z0-9]*]]: memref<?x?xf32>
107
- // CHECK-SAME: %[[C:[a-z0-9]*]]: tensor<?x?xf32>
108
- func.func @linalg_effects (%a : tensor <?x?xf32 >, %b : memref <?x?xf32 >, %c : tensor <?x?xf32 >) {
105
+ func.func @linalg_effects (
106
+ %a : tensor <?x?xf32 >, %b : tensor <?x?xf32 >, %c : tensor <?x?xf32 >,
107
+ %d : memref <?x?xf32 >, %e : memref <?x?xf32 >, %f : memref <?x?xf32 >) {
109
108
// CHECK-NOT: %{{.*}} = linalg.matmul
110
- %t = linalg.matmul ins (%a , %b : tensor <?x?xf32 >, memref <?x?xf32 >)
109
+ %t = linalg.matmul ins (%a , %b : tensor <?x?xf32 >, tensor <?x?xf32 >)
111
110
outs (%c : tensor <?x?xf32 >) -> tensor <?x?xf32 >
112
111
113
112
// CHECK: linalg.matmul
114
- linalg.matmul ins (%a , %c : tensor <?x?xf32 >, tensor <?x?xf32 >)
115
- outs (%b : memref <?x?xf32 >)
113
+ linalg.matmul ins (%d , %e : memref <?x?xf32 >, memref <?x?xf32 >)
114
+ outs (%f : memref <?x?xf32 >)
116
115
return
117
116
}
118
117
@@ -889,39 +888,38 @@ func.func @fold_multi_use_generic_op_with_consumer(%arg0 : tensor<?x?x?xf32>) ->
889
888
// -----
890
889
891
890
#map = affine_map <(d0 ) -> (d0 )>
892
- func.func @identity_mixed (%arg0 : tensor <?xf32 >, %arg1: memref <?xf32 >) {
891
+ func.func @identity_buffer (%arg0 : memref <?xf32 >, %arg1: memref <?xf32 >) {
893
892
linalg.generic {
894
893
indexing_maps = [#map , #map ],
895
894
iterator_types = [" parallel" ]
896
- } ins (%arg0 : tensor <?xf32 >)
895
+ } ins (%arg0 : memref <?xf32 >)
897
896
outs (%arg1 : memref <?xf32 >) {
898
897
^bb0 (%arg2 : f32 , %arg3 : f32 ):
899
898
linalg.yield %arg2 : f32
900
899
}
901
900
return
902
901
}
903
902
904
- // There was a crash in EraseIdentityGenericOp for generic with mixed semantics.
905
- // For now, check generic remained unchanged.
906
- // CHECK-LABEL: func @identity_mixed
907
- // CHECK-SAME: (%[[ARG1:.*]]: tensor<?xf32>, %[[ARG2:.*]]: memref<?xf32>)
903
+ // Do not erase ops with buffer semantics.
904
+ // CHECK-LABEL: func @identity_buffer
905
+ // CHECK-SAME: (%[[ARG1:.*]]: memref<?xf32>, %[[ARG2:.*]]: memref<?xf32>)
908
906
// CHECK: linalg.generic {
909
907
// CHECK-SAME: indexing_maps = [#map, #map],
910
908
// CHECK-SAME: iterator_types = ["parallel"]
911
- // CHECK-SAME: } ins(%[[ARG1]] : tensor <?xf32>)
909
+ // CHECK-SAME: } ins(%[[ARG1]] : memref <?xf32>)
912
910
// CHECK-SAME: outs(%[[ARG2]] : memref<?xf32>) {
913
911
914
912
// -----
915
913
916
914
// Just make sure that we don't crash.
917
915
918
916
// CHECK-LABEL: func @dedeplicate_regression_test
919
- func.func @dedeplicate_regression_test (%0: tensor <4 xf32 >, %1: memref <4 xf32 >) {
917
+ func.func @dedeplicate_regression_test (%0: tensor <4 xf32 >, %1: tensor <4 xf32 >) {
920
918
%36 = linalg.generic
921
919
{index ing_maps = [affine_map <(d0 ) -> (d0 )>,
922
920
affine_map <(d0 ) -> (d0 )>, affine_map <(d0 ) -> (d0 )>],
923
921
iterator_types = [" parallel" ]}
924
- ins (%1 , %1 : memref <4 xf32 >, memref <4 xf32 >)
922
+ ins (%1 , %1 : tensor <4 xf32 >, tensor <4 xf32 >)
925
923
outs (%0 : tensor <4 xf32 >) {
926
924
^bb0 (%in: f32 , %in_24: f32 , %out: f32 ):
927
925
linalg.yield %in : f32
@@ -937,31 +935,6 @@ func.func @dedeplicate_regression_test(%0: tensor<4xf32>, %1: memref<4xf32>) {
937
935
938
936
// -----
939
937
940
- #map = affine_map <(d0 ) -> (d0 )>
941
- func.func @cast_producer_mixed (%arg0 : tensor <5 xf32 >, %arg1: memref <?xf32 >) {
942
- %0 = tensor.cast %arg0 : tensor <5 xf32 > to tensor <?xf32 >
943
- linalg.generic {
944
- indexing_maps = [#map , #map ],
945
- iterator_types = [" parallel" ]
946
- } ins (%0 : tensor <?xf32 >)
947
- outs (%arg1 : memref <?xf32 >) {
948
- ^bb0 (%arg2 : f32 , %arg3 : f32 ):
949
- linalg.yield %arg2 : f32
950
- }
951
- return
952
- }
953
-
954
- // We need a mixed linalg as a bridge between tensor and memref worlds.
955
- // CHECK-LABEL: func @cast_producer_mixed
956
- // CHECK-SAME: (%[[ARG1:.*]]: tensor<5xf32>, %[[ARG2:.*]]: memref<?xf32>)
957
- // CHECK: linalg.generic {
958
- // CHECK-SAME: indexing_maps = [#map, #map],
959
- // CHECK-SAME: iterator_types = ["parallel"]
960
- // CHECK-SAME: } ins(%[[ARG1]] : tensor<5xf32>)
961
- // CHECK-SAME: outs(%[[ARG2]] : memref<?xf32>) {
962
-
963
- // -----
964
-
965
938
// CHECK-LABEL: dead_softmax
966
939
func.func @dead_softmax (%arg0: tensor <16 x64 x256 xf32 >) -> tensor <16 x64 x256 xf32 > {
967
940
%0 = tensor.empty () : tensor <16 x64 x256 xf32 >
0 commit comments