Skip to content

Commit b465cc4

Browse files
authored
Merge branch 'main' into gh/trivedivivek/111/orig
2 parents df05f80 + 50f7376 commit b465cc4

File tree

79 files changed

+4570
-635
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

79 files changed

+4570
-635
lines changed

.github/workflows/trunk.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ jobs:
262262
output=$(ls -la ${elf})
263263
arr=($output)
264264
size=${arr[4]}
265-
threshold="102400" # 100KiB
265+
threshold="103068" # ~100KiB
266266
echo "size: $size, threshold: $threshold"
267267
if [[ "$size" -le "$threshold" ]]; then
268268
echo "Success $size <= $threshold"

backends/arm/_passes/annotate_channels_last_dim_order_pass.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,10 @@
3535
def _transpose_impl(*args, **kwargs):
3636
# Validate length of dim_order array
3737
dim = args[1]
38-
assert len(dim) in (4, 5)
38+
if len(dim) != 4 and len(dim) != 5:
39+
raise ValueError(
40+
f"Dim order length must be either 4 or 5, got {len(dim)}: {dim}"
41+
)
3942
# Pass-through in edge-IR
4043
return args[0]
4144

backends/arm/_passes/convert_split_to_slice.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,14 @@ def call(self, graph_module: torch.fx.GraphModule):
4141
dim = split_node.args[2] if len(split_node.args) > 2 else 0
4242
dim = (dim + rank) % rank
4343

44-
assert (
45-
sum(split_lengths) == shape[dim]
46-
), "Given split lengths don't sum up to the size of the dimension."
44+
# Validate that split lengths cover the entire dimension
45+
length_sum = sum(split_lengths)
46+
dim_size = shape[dim]
47+
if length_sum != dim_size:
48+
raise ValueError(
49+
f"Split sizes {split_lengths} sum to {length_sum}, "
50+
f"but dimension {dim} has size {dim_size}"
51+
)
4752

4853
# Convert split argument 'split_lengths' to slice arguments start and end.
4954
starts = [0] * len(split_lengths)

backends/arm/_passes/fold_qdq_with_annotated_qparams_pass.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,9 @@ def fold_and_annotate_arg(
120120
if input_qparams is not None:
121121
node.meta["input_qparams"][i] = input_qparams
122122
for n in nodes_to_remove:
123-
assert n.target == dq_op
123+
if n.target != dq_op:
124+
raise RuntimeError(f"Expected {dq_op} dq_op, got {n.target}")
125+
124126
n.replace_all_uses_with(n.args[0]) # type: ignore[arg-type]
125127
graph_module.graph.erase_node(n)
126128

@@ -136,14 +138,16 @@ def call(self, graph_module: GraphModule) -> PassResult:
136138
continue
137139

138140
# Make sure we haven't already set qparams meta information on the node
139-
assert "input_qparams" not in n.meta, (
140-
f'Unexpected key "input_qparams" found in meta for node {n}. '
141-
"input_qparams should not have been set at this point"
142-
)
143-
assert "output_qparams" not in n.meta, (
144-
f'Unexpected key "output_qparams" found in meta for node {n}. '
145-
"output_qparams should not have been set at this point"
146-
)
141+
if "input_qparams" in n.meta:
142+
raise RuntimeError(
143+
f'Unexpected key "input_qparams" found in meta for node {n}. '
144+
"input_qparams should not have been set at this point"
145+
)
146+
if "output_qparams" in n.meta:
147+
raise RuntimeError(
148+
f'Unexpected key "output_qparams" found in meta for node {n}. '
149+
"output_qparams should not have been set at this point"
150+
)
147151

148152
# for the inputs and outputs search the graph for quantization info and
149153
# store the information in a dict with order of the _tensor_ inputs as key,

backends/arm/_passes/insert_table_ops.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -240,8 +240,17 @@ def call(self, graph_module: GraphModule) -> PassResult:
240240
args=(node.args[0],),
241241
)
242242
output_node = table_node
243-
assert len(input_qparams) == 1
244-
assert len(output_qparams) == 1
243+
# Expect exactly one quantization parameter for input and output
244+
if len(input_qparams) != 1:
245+
raise ValueError(
246+
f"InsertTableOpsPass expected exactly one input quantization parameter, "
247+
f"got {len(input_qparams)} for node {node.name}"
248+
)
249+
if len(output_qparams) != 1:
250+
raise ValueError(
251+
f"InsertTableOpsPass expected exactly one output quantization parameter, "
252+
f"got {len(output_qparams)} for node {node.name}"
253+
)
245254

246255
# Generate table buffer and how much to lshift the table output.
247256
buffer, lshift = self.generate_table_values(

backends/arm/_passes/remove_clone_pass.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,5 +17,8 @@ def call_operator(self, op, args, kwargs, meta):
1717
if op != exir_ops.edge.aten.clone.default:
1818
return super().call_operator(op, args, kwargs, meta)
1919

20-
assert len(args) == 1
20+
if len(args) != 1:
21+
raise ValueError(
22+
f"clone operator expects exactly one argument, got {len(args)}"
23+
)
2124
return args[0]

backends/arm/operators/op_upsample_bilinear2d.py

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -49,15 +49,18 @@ def define_node(
4949
input_dtype = inputs[0].dtype
5050

5151
# tosa_shape output is NHWC, take HW
52-
input_size_yx = torch.tensor(
53-
tosa_shape(inputs[0].shape, inputs[0].dim_order)[1:3]
54-
)
55-
# Ignore scale and size parameters, directly use the output size as
56-
# we only support static shapes currently
57-
output_size_yx = torch.tensor(tosa_shape(output.shape, output.dim_order)[1:3])
52+
input_size_yx = tuple([inputs[0].shape[dim] for dim in inputs[0].dim_order])[
53+
1:3
54+
]
55+
output_size_yx = tuple([output.shape[dim] for dim in output.dim_order])[1:3]
5856

57+
# Get align_corners value from the node arguments.
58+
align_corners = bool(node.args[2])
5959
scale_n_yx, scale_d_yx, offset_yx, border_yx = get_resize_parameters(
60-
input_size_yx, output_size_yx, ResizeMode.NEAREST, align_corners=True
60+
input_size_yx,
61+
output_size_yx,
62+
ResizeMode.NEAREST,
63+
align_corners=align_corners,
6164
)
6265

6366
def in_int16_range(x):
@@ -139,15 +142,18 @@ def define_node(
139142
input_dtype = inputs[0].dtype
140143

141144
# tosa_shape output is NHWC, take HW
142-
input_size_yx = torch.tensor(
143-
tosa_shape(inputs[0].shape, inputs[0].dim_order)[1:3]
144-
)
145-
# Ignore scale and size parameters, directly use the output size as
146-
# we only support static shapes currently
147-
output_size_yx = torch.tensor(tosa_shape(output.shape, output.dim_order)[1:3])
145+
input_size_yx = tuple([inputs[0].shape[dim] for dim in inputs[0].dim_order])[
146+
1:3
147+
]
148+
output_size_yx = tuple([output.shape[dim] for dim in output.dim_order])[1:3]
148149

150+
# Get align_corners value from the node arguments.
151+
align_corners = bool(node.args[2])
149152
scale_n_yx, scale_d_yx, offset_yx, border_yx = get_resize_parameters(
150-
input_size_yx, output_size_yx, ResizeMode.NEAREST, align_corners=True
153+
input_size_yx,
154+
output_size_yx,
155+
ResizeMode.NEAREST,
156+
align_corners=align_corners,
151157
)
152158

153159
def in_int16_range(x):

backends/arm/operators/op_upsample_nearest2d.py

Lines changed: 15 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
validate_same_dtype,
1818
)
1919
from executorch.backends.arm.tosa_mapping import TosaArg
20-
from executorch.backends.arm.tosa_utils import get_resize_parameters, tosa_shape
20+
from executorch.backends.arm.tosa_utils import get_resize_parameters
2121

2222
from tosa_tools.v0_80.tosa.ResizeMode import ResizeMode # type: ignore
2323

@@ -43,19 +43,16 @@ def define_node(
4343
validate_num_inputs(self.target, inputs, 3)
4444
validate_same_dtype(self.target, [inputs[0], output])
4545

46-
if inputs[0].shape is None or output.shape is None:
47-
raise ValueError("Only static shapes are supported")
48-
4946
# tosa_shape output is NHWC, take HW
50-
input_size_yx = torch.tensor(
51-
tosa_shape(inputs[0].shape, inputs[0].dim_order)[1:3]
52-
)
53-
# Ignore scale and size parameters, directly use the output size as
54-
# we only support static shapes currently
55-
output_size_yx = torch.tensor(tosa_shape(output.shape, output.dim_order)[1:3])
47+
input_size_yx = tuple([inputs[0].shape[dim] for dim in inputs[0].dim_order])[
48+
1:3
49+
]
50+
output_size_yx = tuple([output.shape[dim] for dim in output.dim_order])[1:3]
5651

52+
# Align corners shouldn't make a difference for nearest upsampling. We set to False so
53+
# half pixel centers are used for resize parameter logic.
5754
scale_n_yx, scale_d_yx, offset_yx, border_yx = get_resize_parameters(
58-
input_size_yx, output_size_yx, ResizeMode.NEAREST, align_corners=True
55+
input_size_yx, output_size_yx, ResizeMode.NEAREST, align_corners=False
5956
)
6057

6158
def in_int16_range(x):
@@ -102,19 +99,16 @@ def define_node(
10299
validate_num_inputs(self.target, inputs, 3)
103100
validate_same_dtype(self.target, [inputs[0], output])
104101

105-
if inputs[0].shape is None or output.shape is None:
106-
raise ValueError("Only static shapes are supported")
107-
108102
# tosa_shape output is NHWC, take HW
109-
input_size_yx = torch.tensor(
110-
tosa_shape(inputs[0].shape, inputs[0].dim_order)[1:3]
111-
)
112-
# Ignore scale and size parameters, directly use the output size as
113-
# we only support static shapes currently
114-
output_size_yx = torch.tensor(tosa_shape(output.shape, output.dim_order)[1:3])
103+
input_size_yx = tuple([inputs[0].shape[dim] for dim in inputs[0].dim_order])[
104+
1:3
105+
]
106+
output_size_yx = tuple([output.shape[dim] for dim in output.dim_order])[1:3]
115107

108+
# Align corners shouldn't make a difference for nearest upsampling. We set to False so
109+
# half pixel centers are used for resize parameter logic.
116110
scale_n_yx, scale_d_yx, offset_yx, border_yx = get_resize_parameters(
117-
input_size_yx, output_size_yx, ResizeMode.NEAREST, align_corners=True
111+
input_size_yx, output_size_yx, ResizeMode.NEAREST, align_corners=False
118112
)
119113

120114
def in_int16_range(x):

backends/arm/test/ops/test_upsample_nearest2d.py

Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,17 @@
4040
"rand_one_and_half_size": lambda: (torch.rand(2, 4, 8, 3), (12, 4), None, False),
4141
}
4242

43+
test_data_suite_dynamic = {
44+
# (test_name, test_data, size, scale_factor, compare_outputs)
45+
"rand_double_scale": lambda: (torch.rand(2, 4, 8, 3), None, 2.0, False),
46+
"rand_double_scale_one_dim": lambda: (
47+
torch.rand(2, 4, 8, 3),
48+
None,
49+
(1.0, 2.0),
50+
False,
51+
),
52+
}
53+
4354

4455
class UpsamplingNearest2d(torch.nn.Module):
4556
def __init__(
@@ -161,3 +172,159 @@ def test_upsample_nearest2d_vec_tosa_BI_nearest(test_data: torch.Tensor):
161172
pipeline.pop_stage(-1)
162173

163174
pipeline.run()
175+
176+
177+
@common.parametrize("test_data", test_data_suite_dynamic)
178+
def test_upsample_nearest2d_dynamic_MI_nearest(test_data: torch.Tensor):
179+
test_data, size, scale_factor, compare_outputs = test_data()
180+
181+
batch_size = torch.export.Dim("batch", min=0, max=1000)
182+
input_height = torch.export.Dim("input_height", min=0, max=1000)
183+
input_width = torch.export.Dim("input_width", min=0, max=1000)
184+
185+
dynamic_shapes = {"x": {0: batch_size, 2: input_height, 3: input_width}}
186+
187+
pipeline = TosaPipelineMI[input_t1](
188+
UpsamplingNearest2d(size, scale_factor),
189+
(test_data,),
190+
aten_op,
191+
exir_op=[],
192+
dynamic_shapes=dynamic_shapes,
193+
)
194+
if not compare_outputs:
195+
pipeline.pop_stage(-1)
196+
pipeline.run()
197+
198+
199+
@common.parametrize("test_data", test_data_suite_dynamic)
200+
def test_upsample_nearest2d_dynamic_BI_nearest(test_data: torch.Tensor):
201+
test_data, size, scale_factor, compare_outputs = test_data()
202+
203+
batch_size = torch.export.Dim("batch", min=0, max=2)
204+
input_height = torch.export.Dim("input_height", min=0, max=8)
205+
input_width = torch.export.Dim("input_width", min=0, max=8)
206+
207+
dynamic_shapes = {"x": {0: batch_size, 2: input_height, 3: input_width}}
208+
209+
pipeline = TosaPipelineBI[input_t1](
210+
UpsamplingNearest2d(size, scale_factor),
211+
(test_data,),
212+
aten_op,
213+
exir_op=[],
214+
dynamic_shapes=dynamic_shapes,
215+
)
216+
if not compare_outputs:
217+
pipeline.pop_stage(-1)
218+
pipeline.run()
219+
220+
221+
@common.parametrize("test_data", test_data_suite_dynamic)
222+
def test_upsample_nearest2d_dynamic_MI_interpolate(test_data: torch.Tensor):
223+
test_data, size, scale_factor, compare_outputs = test_data()
224+
225+
batch_size = torch.export.Dim("batch", min=0, max=2)
226+
input_height = torch.export.Dim("input_height", min=4, max=8)
227+
input_width = torch.export.Dim("input_width", min=3, max=8)
228+
229+
dynamic_shapes = {
230+
"x": {
231+
0: batch_size,
232+
2: input_height,
233+
3: input_width,
234+
}
235+
}
236+
237+
pipeline = TosaPipelineMI[input_t1](
238+
Interpolate(size, scale_factor),
239+
(test_data,),
240+
aten_op,
241+
exir_op=[],
242+
dynamic_shapes=dynamic_shapes,
243+
)
244+
if not compare_outputs:
245+
pipeline.pop_stage(-1)
246+
pipeline.run()
247+
248+
249+
@common.parametrize("test_data", test_data_suite_dynamic)
250+
def test_upsample_nearest2d_dynamic_BI_interpolate(test_data: torch.Tensor):
251+
test_data, size, scale_factor, compare_outputs = test_data()
252+
253+
batch_size = torch.export.Dim("batch", min=0, max=2)
254+
input_height = torch.export.Dim("input_height", min=4, max=8)
255+
input_width = torch.export.Dim("input_width", min=3, max=8)
256+
257+
dynamic_shapes = {
258+
"x": {
259+
0: batch_size,
260+
2: input_height,
261+
3: input_width,
262+
}
263+
}
264+
265+
pipeline = TosaPipelineBI[input_t1](
266+
Interpolate(size, scale_factor),
267+
(test_data,),
268+
aten_op,
269+
exir_op=[],
270+
dynamic_shapes=dynamic_shapes,
271+
)
272+
if not compare_outputs:
273+
pipeline.pop_stage(-1)
274+
pipeline.run()
275+
276+
277+
@common.parametrize("test_data", test_data_suite_dynamic)
278+
def test_upsample_nearest2d_dynamic_MI_upsample(test_data: torch.Tensor):
279+
test_data, size, scale_factor, compare_outputs = test_data()
280+
281+
batch_size = torch.export.Dim("batch", min=0, max=1000)
282+
input_height = torch.export.Dim("input_height", min=0, max=1000)
283+
input_width = torch.export.Dim("input_width", min=0, max=1000)
284+
285+
dynamic_shapes = {
286+
"x": {
287+
0: batch_size,
288+
2: input_height,
289+
3: input_width,
290+
}
291+
}
292+
293+
pipeline = TosaPipelineMI[input_t1](
294+
Upsample(size, scale_factor),
295+
(test_data,),
296+
aten_op,
297+
exir_op=[],
298+
dynamic_shapes=dynamic_shapes,
299+
)
300+
if not compare_outputs:
301+
pipeline.pop_stage(-1)
302+
pipeline.run()
303+
304+
305+
@common.parametrize("test_data", test_data_suite_dynamic)
306+
def test_upsample_nearest2d_dynamic_BI_upsample(test_data: torch.Tensor):
307+
test_data, size, scale_factor, compare_outputs = test_data()
308+
309+
batch_size = torch.export.Dim("batch", min=0, max=2)
310+
input_height = torch.export.Dim("input_height", min=0, max=8)
311+
input_width = torch.export.Dim("input_width", min=0, max=8)
312+
313+
dynamic_shapes = {
314+
"x": {
315+
0: batch_size,
316+
2: input_height,
317+
3: input_width,
318+
}
319+
}
320+
321+
pipeline = TosaPipelineBI[input_t1](
322+
Upsample(size, scale_factor),
323+
(test_data,),
324+
aten_op,
325+
exir_op=[],
326+
dynamic_shapes=dynamic_shapes,
327+
)
328+
if not compare_outputs:
329+
pipeline.pop_stage(-1)
330+
pipeline.run()

0 commit comments

Comments
 (0)