|
| 1 | +// RUN: mlir-opt %s --sparse-compiler | \ |
| 2 | +// RUN: TENSOR0="%mlir_src_dir/test/Integration/data/wide.mtx" \ |
| 3 | +// RUN: TENSOR1="%mlir_src_dir/test/Integration/data/mttkrp_b.tns" \ |
| 4 | +// RUN: mlir-cpu-runner \ |
| 5 | +// RUN: -e entry -entry-point-result=void \ |
| 6 | +// RUN: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ |
| 7 | +// RUN: FileCheck %s |
| 8 | + |
| 9 | +!Filename = !llvm.ptr<i8> |
| 10 | + |
| 11 | +#SortedCOO = #sparse_tensor.encoding<{ |
| 12 | + dimLevelType = [ "compressed-nu", "singleton" ] |
| 13 | +}> |
| 14 | + |
| 15 | +#SortedCOOPermuted = #sparse_tensor.encoding<{ |
| 16 | + dimLevelType = [ "compressed-nu", "singleton" ], |
| 17 | + dimOrdering = affine_map<(i,j) -> (j,i)> |
| 18 | +}> |
| 19 | + |
| 20 | +#SortedCOO3D = #sparse_tensor.encoding<{ |
| 21 | + dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ] |
| 22 | +}> |
| 23 | + |
| 24 | +#SortedCOO3DPermuted = #sparse_tensor.encoding<{ |
| 25 | + dimLevelType = [ "compressed-nu", "singleton-nu", "singleton" ], |
| 26 | + dimOrdering = affine_map<(i,j,k) -> (k,i,j)> |
| 27 | +}> |
| 28 | + |
| 29 | +#trait_scale = { |
| 30 | + indexing_maps = [ |
| 31 | + affine_map<(i,j) -> (i,j)> // X (out) |
| 32 | + ], |
| 33 | + iterator_types = ["parallel", "parallel"], |
| 34 | + doc = "X(i,j) = X(i,j) * 2.0" |
| 35 | +} |
| 36 | + |
| 37 | +// |
| 38 | +// Tests reading in matrix/tensor from file into Sorted COO formats |
| 39 | +// as well as applying various operations to this format. |
| 40 | +// |
| 41 | +module { |
| 42 | + |
| 43 | + func.func private @getTensorFilename(index) -> (!Filename) |
| 44 | + |
| 45 | + // |
| 46 | + // A kernel that scales a sparse matrix A by a factor of 2.0. |
| 47 | + // |
| 48 | + func.func @sparse_scale(%argx: tensor<?x?xf64, #SortedCOO>) |
| 49 | + -> tensor<?x?xf64, #SortedCOO> { |
| 50 | + %c = arith.constant 2.0 : f64 |
| 51 | + %0 = linalg.generic #trait_scale |
| 52 | + outs(%argx: tensor<?x?xf64, #SortedCOO>) { |
| 53 | + ^bb(%x: f64): |
| 54 | + %1 = arith.mulf %x, %c : f64 |
| 55 | + linalg.yield %1 : f64 |
| 56 | + } -> tensor<?x?xf64, #SortedCOO> |
| 57 | + return %0 : tensor<?x?xf64, #SortedCOO> |
| 58 | + } |
| 59 | + |
| 60 | + func.func @dumpi(%arg0: memref<?xindex>) { |
| 61 | + %c0 = arith.constant 0 : index |
| 62 | + %v = vector.transfer_read %arg0[%c0], %c0: memref<?xindex>, vector<20xindex> |
| 63 | + vector.print %v : vector<20xindex> |
| 64 | + return |
| 65 | + } |
| 66 | + |
| 67 | + func.func @dumpf(%arg0: memref<?xf64>) { |
| 68 | + %c0 = arith.constant 0 : index |
| 69 | + %nan = arith.constant 0x7FF0000001000000 : f64 |
| 70 | + %v = vector.transfer_read %arg0[%c0], %nan: memref<?xf64>, vector<20xf64> |
| 71 | + vector.print %v : vector<20xf64> |
| 72 | + return |
| 73 | + } |
| 74 | + |
| 75 | + func.func @entry() { |
| 76 | + %c0 = arith.constant 0 : index |
| 77 | + %c1 = arith.constant 1 : index |
| 78 | + |
| 79 | + %fileName0 = call @getTensorFilename(%c0) : (index) -> (!Filename) |
| 80 | + %fileName1 = call @getTensorFilename(%c1) : (index) -> (!Filename) |
| 81 | + |
| 82 | + // Read the sparse tensors from file, construct sparse storage. |
| 83 | + %0 = sparse_tensor.new %fileName0 : !Filename to tensor<?x?xf64, #SortedCOO> |
| 84 | + %1 = sparse_tensor.new %fileName0 : !Filename to tensor<?x?xf64, #SortedCOOPermuted> |
| 85 | + %2 = sparse_tensor.new %fileName1 : !Filename to tensor<?x?x?xf64, #SortedCOO3D> |
| 86 | + %3 = sparse_tensor.new %fileName1 : !Filename to tensor<?x?x?xf64, #SortedCOO3DPermuted> |
| 87 | + |
| 88 | + // Conversion from literal. |
| 89 | + %m = arith.constant sparse< |
| 90 | + [ [0,0], [1,3], [2,0], [2,3], [3,1], [4,1] ], |
| 91 | + [6.0, 5.0, 4.0, 3.0, 2.0, 11.0 ] |
| 92 | + > : tensor<5x4xf64> |
| 93 | + %4 = sparse_tensor.convert %m : tensor<5x4xf64> to tensor<?x?xf64, #SortedCOO> |
| 94 | + |
| 95 | + // |
| 96 | + // CHECK: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 97 | + // CHECK-NEXT: ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0 ) |
| 98 | + // CHECK-NEXT: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255, 0, 0, 0 ) |
| 99 | + // CHECK-NEXT: ( -1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12, -13, 14, -15, 16, -17, nan, nan, nan ) |
| 100 | + // |
| 101 | + %p0 = sparse_tensor.pointers %0 { dimension = 0 : index } |
| 102 | + : tensor<?x?xf64, #SortedCOO> to memref<?xindex> |
| 103 | + %i00 = sparse_tensor.indices %0 { dimension = 0 : index } |
| 104 | + : tensor<?x?xf64, #SortedCOO> to memref<?xindex> |
| 105 | + %i01 = sparse_tensor.indices %0 { dimension = 1 : index } |
| 106 | + : tensor<?x?xf64, #SortedCOO> to memref<?xindex> |
| 107 | + %v0 = sparse_tensor.values %0 |
| 108 | + : tensor<?x?xf64, #SortedCOO> to memref<?xf64> |
| 109 | + call @dumpi(%p0) : (memref<?xindex>) -> () |
| 110 | + call @dumpi(%i00) : (memref<?xindex>) -> () |
| 111 | + call @dumpi(%i01) : (memref<?xindex>) -> () |
| 112 | + call @dumpf(%v0) : (memref<?xf64>) -> () |
| 113 | + |
| 114 | + // |
| 115 | + // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 116 | + // CHECK-NEXT: ( 0, 0, 1, 1, 2, 3, 98, 126, 126, 127, 127, 128, 249, 253, 253, 254, 255, 0, 0, 0 ) |
| 117 | + // CHECK-NEXT: ( 0, 3, 1, 3, 2, 3, 3, 0, 3, 0, 3, 3, 3, 1, 3, 0, 3, 0, 0, 0 ) |
| 118 | + // CHECK-NEXT: ( -1, 8, -5, -9, -7, 10, -11, 2, 12, -3, -13, 14, -15, 6, 16, 4, -17, nan, nan, nan ) |
| 119 | + // |
| 120 | + %p1 = sparse_tensor.pointers %1 { dimension = 0 : index } |
| 121 | + : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex> |
| 122 | + %i10 = sparse_tensor.indices %1 { dimension = 0 : index } |
| 123 | + : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex> |
| 124 | + %i11 = sparse_tensor.indices %1 { dimension = 1 : index } |
| 125 | + : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xindex> |
| 126 | + %v1 = sparse_tensor.values %1 |
| 127 | + : tensor<?x?xf64, #SortedCOOPermuted> to memref<?xf64> |
| 128 | + call @dumpi(%p1) : (memref<?xindex>) -> () |
| 129 | + call @dumpi(%i10) : (memref<?xindex>) -> () |
| 130 | + call @dumpi(%i11) : (memref<?xindex>) -> () |
| 131 | + call @dumpf(%v1) : (memref<?xf64>) -> () |
| 132 | + |
| 133 | + // |
| 134 | + // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 135 | + // CHECK-NEXT: ( 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0 ) |
| 136 | + // CHECK-NEXT: ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0 ) |
| 137 | + // CHECK-NEXT: ( 0, 0, 1, 1, 2, 2, 2, 2, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 0 ) |
| 138 | + // CHECK-NEXT: ( 3, 63, 11, 100, 66, 61, 13, 43, 77, 10, 46, 61, 53, 3, 75, 22, 18, nan, nan, nan ) |
| 139 | + // |
| 140 | + %p2 = sparse_tensor.pointers %2 { dimension = 0 : index } |
| 141 | + : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex> |
| 142 | + %i20 = sparse_tensor.indices %2 { dimension = 0 : index } |
| 143 | + : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex> |
| 144 | + %i21 = sparse_tensor.indices %2 { dimension = 1 : index } |
| 145 | + : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex> |
| 146 | + %i22 = sparse_tensor.indices %2 { dimension = 2 : index } |
| 147 | + : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xindex> |
| 148 | + %v2 = sparse_tensor.values %2 |
| 149 | + : tensor<?x?x?xf64, #SortedCOO3D> to memref<?xf64> |
| 150 | + call @dumpi(%p2) : (memref<?xindex>) -> () |
| 151 | + call @dumpi(%i20) : (memref<?xindex>) -> () |
| 152 | + call @dumpi(%i21) : (memref<?xindex>) -> () |
| 153 | + call @dumpi(%i21) : (memref<?xindex>) -> () |
| 154 | + call @dumpf(%v2) : (memref<?xf64>) -> () |
| 155 | + |
| 156 | + // |
| 157 | + // CHECK-NEXT: ( 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 158 | + // CHECK-NEXT: ( 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 0, 0, 0 ) |
| 159 | + // CHECK-NEXT: ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0 ) |
| 160 | + // CHECK-NEXT: ( 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0 ) |
| 161 | + // CHECK-NEXT: ( 66, 77, 61, 11, 61, 53, 22, 3, 100, 13, 10, 3, 18, 63, 43, 46, 75, nan, nan, nan ) |
| 162 | + // |
| 163 | + %p3 = sparse_tensor.pointers %3 { dimension = 0 : index } |
| 164 | + : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex> |
| 165 | + %i30 = sparse_tensor.indices %3 { dimension = 0 : index } |
| 166 | + : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex> |
| 167 | + %i31 = sparse_tensor.indices %3 { dimension = 1 : index } |
| 168 | + : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex> |
| 169 | + %i32 = sparse_tensor.indices %3 { dimension = 2 : index } |
| 170 | + : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xindex> |
| 171 | + %v3 = sparse_tensor.values %3 |
| 172 | + : tensor<?x?x?xf64, #SortedCOO3DPermuted> to memref<?xf64> |
| 173 | + call @dumpi(%p3) : (memref<?xindex>) -> () |
| 174 | + call @dumpi(%i30) : (memref<?xindex>) -> () |
| 175 | + call @dumpi(%i31) : (memref<?xindex>) -> () |
| 176 | + call @dumpi(%i31) : (memref<?xindex>) -> () |
| 177 | + call @dumpf(%v3) : (memref<?xf64>) -> () |
| 178 | + |
| 179 | + // |
| 180 | + // CHECK-NEXT: ( 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 181 | + // CHECK-NEXT: ( 0, 1, 2, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 182 | + // CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 183 | + // CHECK-NEXT: ( 6, 5, 4, 3, 2, 11, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan ) |
| 184 | + // |
| 185 | + %p4 = sparse_tensor.pointers %4 { dimension = 0 : index } |
| 186 | + : tensor<?x?xf64, #SortedCOO> to memref<?xindex> |
| 187 | + %i40 = sparse_tensor.indices %4 { dimension = 0 : index } |
| 188 | + : tensor<?x?xf64, #SortedCOO> to memref<?xindex> |
| 189 | + %i41 = sparse_tensor.indices %4 { dimension = 1 : index } |
| 190 | + : tensor<?x?xf64, #SortedCOO> to memref<?xindex> |
| 191 | + %v4 = sparse_tensor.values %4 |
| 192 | + : tensor<?x?xf64, #SortedCOO> to memref<?xf64> |
| 193 | + call @dumpi(%p4) : (memref<?xindex>) -> () |
| 194 | + call @dumpi(%i40) : (memref<?xindex>) -> () |
| 195 | + call @dumpi(%i41) : (memref<?xindex>) -> () |
| 196 | + call @dumpf(%v4) : (memref<?xf64>) -> () |
| 197 | + |
| 198 | + // And last but not least, an actual operation applied to COO. |
| 199 | + // Note that this performs the operation "in place". |
| 200 | + %5 = call @sparse_scale(%4) : (tensor<?x?xf64, #SortedCOO>) -> tensor<?x?xf64, #SortedCOO> |
| 201 | + |
| 202 | + // |
| 203 | + // CHECK-NEXT: ( 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 204 | + // CHECK-NEXT: ( 0, 1, 2, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 205 | + // CHECK-NEXT: ( 0, 3, 0, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) |
| 206 | + // CHECK-NEXT: ( 12, 10, 8, 6, 4, 22, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan ) |
| 207 | + // |
| 208 | + %p5 = sparse_tensor.pointers %5 { dimension = 0 : index } |
| 209 | + : tensor<?x?xf64, #SortedCOO> to memref<?xindex> |
| 210 | + %i50 = sparse_tensor.indices %5 { dimension = 0 : index } |
| 211 | + : tensor<?x?xf64, #SortedCOO> to memref<?xindex> |
| 212 | + %i51 = sparse_tensor.indices %5 { dimension = 1 : index } |
| 213 | + : tensor<?x?xf64, #SortedCOO> to memref<?xindex> |
| 214 | + %v5 = sparse_tensor.values %5 |
| 215 | + : tensor<?x?xf64, #SortedCOO> to memref<?xf64> |
| 216 | + call @dumpi(%p5) : (memref<?xindex>) -> () |
| 217 | + call @dumpi(%i50) : (memref<?xindex>) -> () |
| 218 | + call @dumpi(%i51) : (memref<?xindex>) -> () |
| 219 | + call @dumpf(%v5) : (memref<?xf64>) -> () |
| 220 | + |
| 221 | + // Release the resources. |
| 222 | + bufferization.dealloc_tensor %0 : tensor<?x?xf64, #SortedCOO> |
| 223 | + bufferization.dealloc_tensor %1 : tensor<?x?xf64, #SortedCOOPermuted> |
| 224 | + bufferization.dealloc_tensor %2 : tensor<?x?x?xf64, #SortedCOO3D> |
| 225 | + bufferization.dealloc_tensor %3 : tensor<?x?x?xf64, #SortedCOO3DPermuted> |
| 226 | + bufferization.dealloc_tensor %4 : tensor<?x?xf64, #SortedCOO> |
| 227 | + |
| 228 | + return |
| 229 | + } |
| 230 | +} |
0 commit comments