Skip to content

Remove graph pattern based partitioners #196

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion backends/qnnpack/partition/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ runtime.python_library(
":support_patterns",
"//executorch/backends/qnnpack:qnnpack_preprocess",
"//executorch/backends/transforms:lib",
"//executorch/backends/xnnpack/partition:xnnpack_partitioner",
"//executorch/exir:delegate",
"//executorch/exir:lib",
"//executorch/exir/backend:partitioner",
Expand Down
117 changes: 114 additions & 3 deletions backends/qnnpack/partition/qnnpack_partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import logging
from typing import Callable, Dict, List, Optional, Union

import torch

from executorch.backends.qnnpack.partition.support_patterns import (
get_dynamic_quant_addmm_with_view_copy_graph,
get_dynamic_quant_addmm_without_view_copy_graph,
Expand All @@ -14,9 +19,115 @@
from executorch.backends.transforms.addmm_mm_to_linear import (
apply_addmm_mm_to_linear_transform,
)
from executorch.backends.xnnpack.partition.xnnpack_partitioner import (
_SingleOpDelegatePartitioner,
)
from executorch.exir.backend.partitioner import DelegationSpec, Partitioner
from torch.fx.passes.utils.matcher_utils import SubgraphMatcher

logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)


class _BasePartitioner(Partitioner):
"""
Graph based partitioner base for on QNNPACK backend.
"""

def __init__(self, delegate_name, patterns):
self.patterns = patterns

self.delegation_spec = DelegationSpec(delegate_name, [])
self.partition_tags: Dict[str, DelegationSpec] = {}

@staticmethod
def check_partitions(partitions: Union[dict, list]) -> None:
"""
Warn users if there aren't any matches
"""
pl = len(partitions)
if pl == 0:
log.warning("Nothing can be partitioned!")
else:
log.info(f"Found {pl} subgraphs to be partitioned.")

def partition(self, graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:
raise NotImplementedError("This is not meant to be used directly.")
return graph_module


class _SingleOpDelegatePartitioner(_BasePartitioner):
"""
Graph based partitioner base for a single "op" or "node" or a pattern match for QNNPACK backend.
This is tailored for DQLinear where QNNPACK delegates prefers to have a single DQLinear node in the graph.
"""

def __init__(
self,
delegate_name,
patterns,
transforms: Optional[List[Callable[[torch.fx.Graph], torch.fx.Graph]]] = None,
):
"""
@param transforms: Optional list of transforms that will be applied to the graph before running the partitioner.
"""
super().__init__(delegate_name, patterns)
self.transforms = transforms

# override
def partition(self, graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:
# TODO delete this since we are not allowed to do this
if self.transforms is not None:
for transform in self.transforms: # pyre-ignore
graph_module.graph = transform(graph_module.graph)

matches = [
match
for matches in (
SubgraphMatcher(pattern, ignore_literals=True).match(graph_module.graph)
for pattern in self.patterns
)
for match in matches
]

match_sets = [
{
node_in_graph
for (node_in_pattern, node_in_graph) in match.nodes_map.items()
if (
node_in_pattern.op != "placeholder"
and node_in_graph.op != "placeholder"
)
}
for match in matches
]

# Sort match sets in descending order of length so that any match sets
# which are supersets of other match sets are processed first
match_sets = sorted(match_sets, key=len, reverse=True)

self.check_partitions(match_sets)

# Mapping from delegation tag to match set
tag_mapping = {}

for (partition_id, match_set) in enumerate(match_sets):
delegation_tag = f"tag{partition_id}"
for node in match_set:
if "delegation_tag" in node.meta:
# This node already has delegation tag assigned.
# Check that the current match set is a subset of the one
# used to assign its delegation tag, then skip this match
# set. We have this check to ensure there are no pairs of
# match sets where they are overlapping but neither is a
# subset of the other.
if not match_set.issubset(tag_mapping[node.meta["delegation_tag"]]):
raise AssertionError(
f"Found match sets which are overlapping but neither is a subset of the other: {match_set}, {tag_mapping[node.meta['delegation_tag']]}"
)
break
node.meta["delegation_tag"] = delegation_tag
self.partition_tags[delegation_tag] = self.delegation_spec
tag_mapping[delegation_tag] = match_set

return graph_module


class QnnpackPartitioner(_SingleOpDelegatePartitioner):
Expand Down
16 changes: 0 additions & 16 deletions backends/xnnpack/partition/TARGETS
Original file line number Diff line number Diff line change
@@ -1,20 +1,5 @@
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")

runtime.python_library(
name = "support_patterns",
srcs = [
"support_patterns.py",
],
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
deps = [
"//caffe2:torch",
"//executorch/exir/backend:utils",
],
)

runtime.python_library(
name = "xnnpack_partitioner",
srcs = [
Expand All @@ -26,7 +11,6 @@ runtime.python_library(
],
deps = [
":configs",
":support_patterns",
"//executorch/backends/xnnpack:xnnpack_preprocess",
"//executorch/exir:delegate",
"//executorch/exir:lib",
Expand Down
Loading