Skip to content

Commit ca32f71

Browse files
digantdesaifacebook-github-bot
authored andcommitted
Remove graph pattern based partitioners
Summary: Since we migrated to source_partition based logic for partitioning we don't need graph patterns anymore. If something goes wrong with the source_partition we will revive this but until then no need for this. Moves DQ partitioner to QNNPACK since it is still using it. Reviewed By: mcr229 Differential Revision: D48766742 fbshipit-source-id: 4111ad35751f84858377dab10b14494225198a08
1 parent 8a738bc commit ca32f71

File tree

9 files changed

+131
-928
lines changed

9 files changed

+131
-928
lines changed

backends/qnnpack/partition/qnnpack_partitioner.py

Lines changed: 115 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,11 @@
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
66

7+
import logging
8+
from typing import Callable, Dict, List, Optional, Union
9+
10+
import torch
11+
712
from executorch.backends.qnnpack.partition.support_patterns import (
813
get_dynamic_quant_addmm_with_view_copy_graph,
914
get_dynamic_quant_addmm_without_view_copy_graph,
@@ -14,9 +19,116 @@
1419
from executorch.backends.transforms.addmm_mm_to_linear import (
1520
apply_addmm_mm_to_linear_transform,
1621
)
17-
from executorch.backends.xnnpack.partition.xnnpack_partitioner import (
18-
_SingleOpDelegatePartitioner,
19-
)
22+
from executorch.exir.backend.partitioner import DelegationSpec, Partitioner
23+
from torch.fx.passes.utils.matcher_utils import SubgraphMatcher
24+
25+
logging.basicConfig(level=logging.INFO)
26+
log = logging.getLogger(__name__)
27+
28+
29+
class _BasePartitioner(Partitioner):
30+
"""
31+
Graph based partitioner base for on XNNPACK backend.
32+
"""
33+
34+
def __init__(self, delegate_name, patterns):
35+
self.patterns = patterns
36+
37+
self.delegation_spec = DelegationSpec(delegate_name, [])
38+
self.partition_tags: Dict[str, DelegationSpec] = {}
39+
40+
@staticmethod
41+
def check_partitions(partitions: Union[dict, list]) -> None:
42+
"""
43+
Warn users if there aren't any matches
44+
"""
45+
pl = len(partitions)
46+
if pl == 0:
47+
log.warning("Nothing can be partitioned!")
48+
else:
49+
log.info(f"Found {pl} subgraphs to be partitioned.")
50+
51+
def partition(self, graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:
52+
raise NotImplementedError("This is not meant to be used directly.")
53+
return graph_module
54+
55+
56+
class _SingleOpDelegatePartitioner(_BasePartitioner):
57+
"""
58+
Graph based partitioner base for a single "op" or "node" or a pattern match for XNNPACK backend.
59+
This is tailored for DQLinear where XNNPACK (and also QNNPACK) delegates prefers to have a single DQLinear node in the graph.
60+
This is a base class given XNNPACK and QNNPACK currently share this.
61+
"""
62+
63+
def __init__(
64+
self,
65+
delegate_name,
66+
patterns,
67+
transforms: Optional[List[Callable[[torch.fx.Graph], torch.fx.Graph]]] = None,
68+
):
69+
"""
70+
@param transforms: Optional list of transforms that will be applied to the graph before running the partitioner.
71+
"""
72+
super().__init__(delegate_name, patterns)
73+
self.transforms = transforms
74+
75+
# override
76+
def partition(self, graph_module: torch.fx.GraphModule) -> torch.fx.GraphModule:
77+
# TODO delete this since we are not allowed to do this
78+
if self.transforms is not None:
79+
for transform in self.transforms: # pyre-ignore
80+
graph_module.graph = transform(graph_module.graph)
81+
82+
matches = [
83+
match
84+
for matches in (
85+
SubgraphMatcher(pattern, ignore_literals=True).match(graph_module.graph)
86+
for pattern in self.patterns
87+
)
88+
for match in matches
89+
]
90+
91+
match_sets = [
92+
{
93+
node_in_graph
94+
for (node_in_pattern, node_in_graph) in match.nodes_map.items()
95+
if (
96+
node_in_pattern.op != "placeholder"
97+
and node_in_graph.op != "placeholder"
98+
)
99+
}
100+
for match in matches
101+
]
102+
103+
# Sort match sets in descending order of length so that any match sets
104+
# which are supersets of other match sets are processed first
105+
match_sets = sorted(match_sets, key=len, reverse=True)
106+
107+
self.check_partitions(match_sets)
108+
109+
# Mapping from delegation tag to match set
110+
tag_mapping = {}
111+
112+
for (partition_id, match_set) in enumerate(match_sets):
113+
delegation_tag = f"tag{partition_id}"
114+
for node in match_set:
115+
if "delegation_tag" in node.meta:
116+
# This node already has delegation tag assigned.
117+
# Check that the current match set is a subset of the one
118+
# used to assign its delegation tag, then skip this match
119+
# set. We have this check to ensure there are no pairs of
120+
# match sets where they are overlapping but neither is a
121+
# subset of the other.
122+
if not match_set.issubset(tag_mapping[node.meta["delegation_tag"]]):
123+
raise AssertionError(
124+
f"Found match sets which are overlapping but neither is a subset of the other: {match_set}, {tag_mapping[node.meta['delegation_tag']]}"
125+
)
126+
break
127+
node.meta["delegation_tag"] = delegation_tag
128+
self.partition_tags[delegation_tag] = self.delegation_spec
129+
tag_mapping[delegation_tag] = match_set
130+
131+
return graph_module
20132

21133

22134
class QnnpackPartitioner(_SingleOpDelegatePartitioner):

backends/xnnpack/partition/TARGETS

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,5 @@
11
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
22

3-
runtime.python_library(
4-
name = "support_patterns",
5-
srcs = [
6-
"support_patterns.py",
7-
],
8-
visibility = [
9-
"//executorch/...",
10-
"@EXECUTORCH_CLIENTS",
11-
],
12-
deps = [
13-
"//caffe2:torch",
14-
"//executorch/exir/backend:utils",
15-
],
16-
)
17-
183
runtime.python_library(
194
name = "xnnpack_partitioner",
205
srcs = [
@@ -26,7 +11,6 @@ runtime.python_library(
2611
],
2712
deps = [
2813
":configs",
29-
":support_patterns",
3014
"//executorch/backends/xnnpack:xnnpack_preprocess",
3115
"//executorch/exir:delegate",
3216
"//executorch/exir:lib",

0 commit comments

Comments
 (0)