Skip to content

[mlir][sparse] fix sparse tests that uses reshape operations. #90637

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 30, 2024

Conversation

PeimingLiu
Copy link
Member

@PeimingLiu PeimingLiu commented Apr 30, 2024

Due to generalization introduced in #90040

@llvmbot
Copy link
Member

llvmbot commented Apr 30, 2024

@llvm/pr-subscribers-mlir-sparse

@llvm/pr-subscribers-mlir

Author: Peiming Liu (PeimingLiu)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/90637.diff

2 Files Affected:

  • (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir (+4-4)
  • (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir (+36-12)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
index f7975e0738fa81..73dddefb0e4aa5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/reshape_dot.mlir
@@ -44,7 +44,7 @@ module {
     %cst = arith.constant 0.000000e+00 : f32
     %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<5x6xf32>) -> tensor<5x6xf32>
     %2 = linalg.matmul ins(%arg0, %collapsed : tensor<5x6xf32>, tensor<6x6xf32, #COO_2D>) outs(%1 : tensor<5x6xf32>) -> tensor<5x6xf32>
-    %expanded = tensor.expand_shape %2 [[0], [1, 2]] : tensor<5x6xf32> into tensor<5x2x3xf32>
+    %expanded = tensor.expand_shape %2 [[0], [1, 2]] output_shape [5,2,3]: tensor<5x6xf32> into tensor<5x2x3xf32>
     %ret1 = tensor.cast %expanded : tensor<5x2x3xf32> to tensor<?x?x?xf32>
 
     // Note: tensor.collapse_shape is a metadata-only operation on dense tensors
@@ -60,7 +60,7 @@ module {
     %cst = arith.constant 0.000000e+00 : f32
     %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<5x6xf32>) -> tensor<5x6xf32>
     %2 = linalg.matmul ins(%arg0, %collapsed : tensor<5x6xf32, #COO_2D>, tensor<6x6xf32, #COO_2D>) outs(%1 : tensor<5x6xf32>) -> tensor<5x6xf32>
-    %expanded = tensor.expand_shape %2 [[0], [1, 2]] : tensor<5x6xf32> into tensor<5x2x3xf32>
+    %expanded = tensor.expand_shape %2 [[0], [1, 2]] output_shape [5,2,3]: tensor<5x6xf32> into tensor<5x2x3xf32>
     %ret1 = tensor.cast %expanded : tensor<5x2x3xf32> to tensor<?x?x?xf32>
 
     // Note: tensor.collapse_shape is a metadata-only operation on dense tensors
@@ -76,7 +76,7 @@ module {
     %cst = arith.constant 0.000000e+00 : f32
     %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<5x6xf32>) -> tensor<5x6xf32>
     %2 = linalg.matmul ins(%arg0, %collapsed : tensor<5x6xf32>, tensor<6x6xf32>) outs(%1 : tensor<5x6xf32>) -> tensor<5x6xf32>
-    %expanded = tensor.expand_shape %2 [[0], [1, 2]] : tensor<5x6xf32> into tensor<5x2x3xf32>
+    %expanded = tensor.expand_shape %2 [[0], [1, 2]] output_shape [5,2,3]: tensor<5x6xf32> into tensor<5x2x3xf32>
     %ret1 = tensor.cast %expanded : tensor<5x2x3xf32> to tensor<?x?x?xf32>
     return %ret1 :  tensor<?x?x?xf32>
   }
@@ -88,7 +88,7 @@ module {
     %cst = arith.constant 0.000000e+00 : f32
     %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<5x6xf32>) -> tensor<5x6xf32>
     %2 = linalg.matmul ins(%arg0, %collapsed : tensor<5x6xf32, #COO_2D>, tensor<6x6xf32, #COO_2D>) outs(%1 : tensor<5x6xf32>) -> tensor<5x6xf32>
-    %expanded = tensor.expand_shape %2 [[0], [1, 2]] : tensor<5x6xf32> into tensor<5x2x3xf32>
+    %expanded = tensor.expand_shape %2 [[0], [1, 2]] output_shape [5,2,3]: tensor<5x6xf32> into tensor<5x2x3xf32>
     %ret1 = tensor.cast %expanded : tensor<5x2x3xf32> to tensor<?x?x?xf32>
 
     // Note: tensor.collapse_shape is a metadata-only operation on dense tensors
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir
index 6679a81c74088b..3932424845763d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_expand_shape.mlir
@@ -53,62 +53,86 @@
 module {
 
   func.func @expand_dense(%arg0: tensor<12xf64>) -> tensor<3x4xf64> {
-    %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64>
+    %0 = tensor.expand_shape %arg0 [[0, 1]] output_shape [3, 4] : tensor<12xf64> into tensor<3x4xf64>
     return %0 : tensor<3x4xf64>
   }
 
   func.func @expand_from_sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64> {
-    %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64>
+    %0 = tensor.expand_shape %arg0 [[0, 1]] output_shape [3, 4] : tensor<12xf64, #SparseVector> into tensor<3x4xf64>
     return %0 : tensor<3x4xf64>
   }
 
   func.func @expand_to_sparse(%arg0: tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix> {
-    %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64> into tensor<3x4xf64, #SparseMatrix>
+    %0 = tensor.expand_shape %arg0 [[0, 1]] output_shape [3, 4] : tensor<12xf64> into tensor<3x4xf64, #SparseMatrix>
     return %0 : tensor<3x4xf64, #SparseMatrix>
   }
 
   func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> {
-    %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix>
+    %0 = tensor.expand_shape %arg0 [[0, 1]] output_shape [3, 4] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix>
     return %0 : tensor<3x4xf64, #SparseMatrix>
   }
 
   func.func @expand_dense_3x2x2(%arg0: tensor<3x4xf64>) -> tensor<3x2x2xf64> {
-    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64> into tensor<3x2x2xf64>
+    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] output_shape [3, 2, 2] : tensor<3x4xf64> into tensor<3x2x2xf64>
     return %0 : tensor<3x2x2xf64>
   }
 
   func.func @expand_from_sparse_3x2x2(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64> {
-    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64, #SparseMatrix> into tensor<3x2x2xf64>
+    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] output_shape [3, 2, 2] : tensor<3x4xf64, #SparseMatrix> into tensor<3x2x2xf64>
     return %0 : tensor<3x2x2xf64>
   }
 
   func.func @expand_to_sparse_3x2x2(%arg0: tensor<3x4xf64>) -> tensor<3x2x2xf64, #Sparse3dTensor> {
-    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64> into tensor<3x2x2xf64, #Sparse3dTensor>
+    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] output_shape [3, 2, 2] : tensor<3x4xf64> into tensor<3x2x2xf64, #Sparse3dTensor>
     return %0 : tensor<3x2x2xf64, #Sparse3dTensor>
   }
 
   func.func @expand_sparse2sparse_3x2x2(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<3x2x2xf64, #Sparse3dTensor> {
-    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<3x4xf64, #SparseMatrix> into tensor<3x2x2xf64, #Sparse3dTensor>
+    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] output_shape [3, 2, 2] : tensor<3x4xf64, #SparseMatrix> into tensor<3x2x2xf64, #Sparse3dTensor>
     return %0 : tensor<3x2x2xf64, #Sparse3dTensor>
   }
 
   func.func @expand_dense_dyn(%arg0: tensor<?x?xf64>) -> tensor<?x2x?xf64> {
-    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<?x?xf64> into tensor<?x2x?xf64>
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %c2 = arith.constant 2 : index
+    %d0 = tensor.dim %arg0, %c0 : tensor<?x?xf64>
+    %d1 = tensor.dim %arg0, %c1 : tensor<?x?xf64>
+    %d2 = arith.divui %d1, %c2 : index
+    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] output_shape [%d0, 2, %d2]  : tensor<?x?xf64> into tensor<?x2x?xf64>
     return %0 : tensor<?x2x?xf64>
   }
 
   func.func @expand_from_sparse_dyn(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x2x?xf64> {
-    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<?x?xf64, #SparseMatrix> into tensor<?x2x?xf64>
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %c2 = arith.constant 2 : index
+    %d0 = tensor.dim %arg0, %c0 : tensor<?x?xf64, #SparseMatrix>
+    %d1 = tensor.dim %arg0, %c1 : tensor<?x?xf64, #SparseMatrix>
+    %d2 = arith.divui %d1, %c2 : index
+    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] output_shape [%d0, 2, %d2]  : tensor<?x?xf64, #SparseMatrix> into tensor<?x2x?xf64>
     return %0 : tensor<?x2x?xf64>
   }
 
   func.func @expand_to_sparse_dyn(%arg0: tensor<?x?xf64>) -> tensor<?x2x?xf64, #Sparse3dTensor> {
-    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<?x?xf64> into tensor<?x2x?xf64, #Sparse3dTensor>
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %c2 = arith.constant 2 : index
+    %d0 = tensor.dim %arg0, %c0 : tensor<?x?xf64>
+    %d1 = tensor.dim %arg0, %c1 : tensor<?x?xf64>
+    %d2 = arith.divui %d1, %c2 : index
+    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] output_shape [%d0, 2, %d2]  : tensor<?x?xf64> into tensor<?x2x?xf64, #Sparse3dTensor>
     return %0 : tensor<?x2x?xf64, #Sparse3dTensor>
   }
 
   func.func @expand_sparse2sparse_dyn(%arg0: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x2x?xf64, #Sparse3dTensor> {
-    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] : tensor<?x?xf64, #SparseMatrix> into tensor<?x2x?xf64, #Sparse3dTensor>
+    %c0 = arith.constant 0 : index
+    %c1 = arith.constant 1 : index
+    %c2 = arith.constant 2 : index
+    %d0 = tensor.dim %arg0, %c0 : tensor<?x?xf64, #SparseMatrix>
+    %d1 = tensor.dim %arg0, %c1 : tensor<?x?xf64, #SparseMatrix>
+    %d2 = arith.divui %d1, %c2 : index
+    %0 = tensor.expand_shape %arg0 [[0], [1, 2]] output_shape [%d0, 2, %d2]  : tensor<?x?xf64, #SparseMatrix> into tensor<?x2x?xf64, #Sparse3dTensor>
     return %0 : tensor<?x2x?xf64, #Sparse3dTensor>
   }
 

@PeimingLiu PeimingLiu merged commit 7cbaaed into llvm:main Apr 30, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
mlir:sparse Sparse compiler in MLIR mlir
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants