Skip to content

[mlir][sparse] add COO to python tests #70090

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -132,13 +132,15 @@ def main():

# CHECK-LABEL: TEST: testSDDMMM
print("\nTEST: testSDDMMM")
count = 0
with ir.Context() as ctx, ir.Location.unknown():
count = 0
# Loop over various ways to compile and annotate the SDDMM kernel with
# a *single* sparse tensor. Note that we deliberate do not exhaustively
# search the full state space to reduce runtime of the test. It is
# straightforward to adapt the code below to explore more combinations.
# For these simple orderings, dim2lvl and lvl2dim are the same.
levels = [
[st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
[st.DimLevelType.dense, st.DimLevelType.dense],
[st.DimLevelType.dense, st.DimLevelType.compressed],
[st.DimLevelType.compressed, st.DimLevelType.dense],
Expand All @@ -154,15 +156,15 @@ def main():
for iwidth in [32]:
for e in [True]:
attr = st.EncodingAttr.get(
level, ordering, None, pwidth, iwidth
level, ordering, ordering, pwidth, iwidth
)
opt = f"parallelization-strategy=none"
compiler = sparse_compiler.SparseCompiler(
options=opt, opt_level=0, shared_libs=[support_lib]
)
build_compile_and_run_SDDMMM(attr, compiler)
count = count + 1
# CHECK: Passed 8 tests
# CHECK: Passed 10 tests
print("Passed ", count, "tests")


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,17 +115,18 @@ def main():

# CHECK-LABEL: TEST: testSpMM
print("\nTEST: testSpMM")
count = 0
with ir.Context() as ctx, ir.Location.unknown():
count = 0
# Loop over various ways to compile and annotate the SpMM kernel with
# a *single* sparse tensor. Note that we deliberate do not exhaustively
# search the full state space to reduce runtime of the test. It is
# straightforward to adapt the code below to explore more combinations.

# For these simple orderings, dim2lvl and lvl2dim are the same.
vl = 1
e = False
opt = f"parallelization-strategy=none"
levels = [
[st.DimLevelType.compressed_nu, st.DimLevelType.singleton],
[st.DimLevelType.dense, st.DimLevelType.dense],
[st.DimLevelType.dense, st.DimLevelType.compressed],
[st.DimLevelType.compressed, st.DimLevelType.dense],
Expand All @@ -144,11 +145,11 @@ def main():
for pwidth in bitwidths:
for iwidth in bitwidths:
attr = st.EncodingAttr.get(
level, ordering, None, pwidth, iwidth
level, ordering, ordering, pwidth, iwidth
)
build_compile_and_run_SpMM(attr, compiler)
count = count + 1
# CHECK: Passed 8 tests
# CHECK: Passed 10 tests
print("Passed ", count, "tests")


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __call__(self, module: ir.Module):
self.compile(module)

def compile(self, module: ir.Module):
"""Compiles the module by invoking the sparse copmiler pipeline."""
"""Compiles the module by invoking the sparse compiler pipeline."""
passmanager.PassManager.parse(self.pipeline).run(module.operation)

def jit(self, module: ir.Module) -> execution_engine.ExecutionEngine:
Expand Down