Skip to content

Commit 9b66194

Browse files
digantdesaifacebook-github-bot
authored andcommitted
Remove graph pattern based partitioners (#196)
Summary: Since we migrated to source_partition based logic for partitioning we don't need graph patterns anymore. If something goes wrong with the source_partition we will revive this but until then no need for this. Moves DQ partitioner to QNNPACK since it is still using it. Reviewed By: mcr229 Differential Revision: D48766742
1 parent 9ca4ed0 commit 9b66194

File tree

10 files changed

+130
-929
lines changed

10 files changed

+130
-929
lines changed

backends/qnnpack/partition/TARGETS

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ runtime.python_library(
2929
":support_patterns",
3030
"//executorch/backends/qnnpack:qnnpack_preprocess",
3131
"//executorch/backends/transforms:lib",
32-
"//executorch/backends/xnnpack/partition:xnnpack_partitioner",
3332
"//executorch/exir:delegate",
3433
"//executorch/exir:lib",
3534
"//executorch/exir/backend:partitioner",

backends/qnnpack/partition/qnnpack_partitioner.py

Lines changed: 114 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,11 @@
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
66

7+
import logging
8+
from typing import Callable, Dict, List, Optional, Union
9+
10+
import torch
11+
712
from executorch.backends.qnnpack.partition.support_patterns import (
813
get_dynamic_quant_addmm_with_view_copy_graph,
914
get_dynamic_quant_addmm_without_view_copy_graph,
@@ -14,9 +19,115 @@
1419
from executorch.backends.transforms.addmm_mm_to_linear import (
1520
apply_addmm_mm_to_linear_transform,
1621
)
17-
from executorch.backends.xnnpack.partition.xnnpack_partitioner import (
18-
_SingleOpDelegatePartitioner,
19-
)
22+
from executorch.exir.backend.partitioner import DelegationSpec, Partitioner
23+
from torch.fx.passes.utils.matcher_utils import SubgraphMatcher
24+
25+
logging.basicConfig(level=logging.INFO)
26+
log = logging.getLogger(__name__)
27+
28+
29+
class _BasePartitioner(Partitioner):
30+
"""
31+
Graph based partitioner base for on QNNPACK backend.
32+
"""
33+
34+
def __init__(self, delegate_name, patterns):
35+
self.patterns = patterns
36+
37+
self.delegation_spec = DelegationSpec(delegate_name, [])
38+
self.partition_tags: Dict[str, DelegationSpec] = {}
39+
40+
@staticmethod
41+
def check_partitions(partitions: Union[dict, list]) -> None:
42+
"""
43+
Warn users if there aren't any matches
44+
"""
45+
pl = len(partitions)
46+
if pl == 0:
47+
log.warning("Nothing can be partitioned!")
48+
else:
49+
log.info(f"Found {pl} subgraphs to be partitioned.")
50+
51+
def partition(self, graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:
52+
raise NotImplementedError("This is not meant to be used directly.")
53+
return graph_module
54+
55+
56+
class _SingleOpDelegatePartitioner(_BasePartitioner):
57+
"""
58+
Graph based partitioner base for a single "op" or "node" or a pattern match for QNNPACK backend.
59+
This is tailored for DQLinear where QNNPACK delegates prefers to have a single DQLinear node in the graph.
60+
"""
61+
62+
def __init__(
63+
self,
64+
delegate_name,
65+
patterns,
66+
transforms: Optional[List[Callable[[torch.fx.Graph], torch.fx.Graph]]] = None,
67+
):
68+
"""
69+
@param transforms: Optional list of transforms that will be applied to the graph before running the partitioner.
70+
"""
71+
super().__init__(delegate_name, patterns)
72+
self.transforms = transforms
73+
74+
# override
75+
def partition(self, graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:
76+
# TODO delete this since we are not allowed to do this
77+
if self.transforms is not None:
78+
for transform in self.transforms: # pyre-ignore
79+
graph_module.graph = transform(graph_module.graph)
80+
81+
matches = [
82+
match
83+
for matches in (
84+
SubgraphMatcher(pattern, ignore_literals=True).match(graph_module.graph)
85+
for pattern in self.patterns
86+
)
87+
for match in matches
88+
]
89+
90+
match_sets = [
91+
{
92+
node_in_graph
93+
for (node_in_pattern, node_in_graph) in match.nodes_map.items()
94+
if (
95+
node_in_pattern.op != "placeholder"
96+
and node_in_graph.op != "placeholder"
97+
)
98+
}
99+
for match in matches
100+
]
101+
102+
# Sort match sets in descending order of length so that any match sets
103+
# which are supersets of other match sets are processed first
104+
match_sets = sorted(match_sets, key=len, reverse=True)
105+
106+
self.check_partitions(match_sets)
107+
108+
# Mapping from delegation tag to match set
109+
tag_mapping = {}
110+
111+
for (partition_id, match_set) in enumerate(match_sets):
112+
delegation_tag = f"tag{partition_id}"
113+
for node in match_set:
114+
if "delegation_tag" in node.meta:
115+
# This node already has delegation tag assigned.
116+
# Check that the current match set is a subset of the one
117+
# used to assign its delegation tag, then skip this match
118+
# set. We have this check to ensure there are no pairs of
119+
# match sets where they are overlapping but neither is a
120+
# subset of the other.
121+
if not match_set.issubset(tag_mapping[node.meta["delegation_tag"]]):
122+
raise AssertionError(
123+
f"Found match sets which are overlapping but neither is a subset of the other: {match_set}, {tag_mapping[node.meta['delegation_tag']]}"
124+
)
125+
break
126+
node.meta["delegation_tag"] = delegation_tag
127+
self.partition_tags[delegation_tag] = self.delegation_spec
128+
tag_mapping[delegation_tag] = match_set
129+
130+
return graph_module
20131

21132

22133
class QnnpackPartitioner(_SingleOpDelegatePartitioner):

backends/xnnpack/partition/TARGETS

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,5 @@
11
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
22

3-
runtime.python_library(
4-
name = "support_patterns",
5-
srcs = [
6-
"support_patterns.py",
7-
],
8-
visibility = [
9-
"//executorch/...",
10-
"@EXECUTORCH_CLIENTS",
11-
],
12-
deps = [
13-
"//caffe2:torch",
14-
"//executorch/exir/backend:utils",
15-
],
16-
)
17-
183
runtime.python_library(
194
name = "xnnpack_partitioner",
205
srcs = [
@@ -26,7 +11,6 @@ runtime.python_library(
2611
],
2712
deps = [
2813
":configs",
29-
":support_patterns",
3014
"//executorch/backends/xnnpack:xnnpack_preprocess",
3115
"//executorch/exir:delegate",
3216
"//executorch/exir:lib",

0 commit comments

Comments
 (0)