Skip to content

Dynamic Shapes #2442

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions backends/xnnpack/test/models/deeplab_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,5 @@ def test_fp32_dl3(self):
.partition()
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
6 changes: 2 additions & 4 deletions backends/xnnpack/test/models/edsr.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,7 @@ def test_fp32_edsr(self):
.partition()
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_edsr(self):
Expand All @@ -38,6 +37,5 @@ def test_qs8_edsr(self):
.partition()
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
28 changes: 12 additions & 16 deletions backends/xnnpack/test/models/emformer_rnnt.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ def __init__(self):
self.rnnt = decoder.model

class Joiner(EmformerRnnt):
def forward(self, predict_inputs):
return self.rnnt.join(*predict_inputs)
def forward(self, a, b, c, d):
return self.rnnt.join(a, b, c, d)

def get_example_inputs(self):
join_inputs = (
Expand All @@ -31,7 +31,7 @@ def get_example_inputs(self):
torch.rand([1, 128, 1024]),
torch.tensor([128]),
)
return (join_inputs,)
return join_inputs

def test_fp32_emformer_joiner(self):
joiner = self.Joiner()
Expand All @@ -43,21 +43,19 @@ def test_fp32_emformer_joiner(self):
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

class Predictor(EmformerRnnt):
def forward(self, predict_inputs):
return self.rnnt.predict(*predict_inputs)
def forward(self, a, b):
return self.rnnt.predict(a, b, None)

def get_example_inputs(self):
predict_inputs = (
torch.zeros([1, 128], dtype=int),
torch.tensor([128], dtype=int),
None,
)
return (predict_inputs,)
return predict_inputs

@unittest.skip("T183426271")
def test_fp32_emformer_predictor(self):
Expand All @@ -70,20 +68,19 @@ def test_fp32_emformer_predictor(self):
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

class Transcriber(EmformerRnnt):
def forward(self, predict_inputs):
return self.rnnt.transcribe(*predict_inputs)
def forward(self, a, b):
return self.rnnt.transcribe(a, b)

def get_example_inputs(self):
transcribe_inputs = (
torch.randn(1, 128, 80),
torch.tensor([128]),
)
return (transcribe_inputs,)
return transcribe_inputs

def test_fp32_emformer_transcriber(self):
transcriber = self.Transcriber()
Expand All @@ -95,6 +92,5 @@ def test_fp32_emformer_transcriber(self):
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
6 changes: 2 additions & 4 deletions backends/xnnpack/test/models/inception_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,7 @@ def test_fp32_ic3(self):
.check_not(list(self.all_operators))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_ic3(self):
Expand All @@ -63,6 +62,5 @@ def test_qs8_ic3(self):
.check_not(list(ops_after_quantization))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
6 changes: 2 additions & 4 deletions backends/xnnpack/test/models/inception_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,7 @@ def test_fp32_ic4(self):
.check_not(list(self.all_operators))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_ic4(self):
Expand All @@ -60,6 +59,5 @@ def test_qs8_ic4(self):
.check_not(list(ops_after_quantization))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
3 changes: 1 addition & 2 deletions backends/xnnpack/test/models/llama2_et_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,5 @@ def _test(self, dtype: torch.dtype = torch.float):
.dump_artifact()
.to_executorch()
.serialize()
.run_method()
.compare_outputs(atol=5e-2)
.run_method_and_compare_outputs(atol=5e-2)
)
3 changes: 1 addition & 2 deletions backends/xnnpack/test/models/mobilebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,5 @@ def test_fp32_mobilebert(self):
.check_not(list(self.supported_ops))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
6 changes: 2 additions & 4 deletions backends/xnnpack/test/models/mobilenet_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,7 @@ def test_fp32_mv2(self):
.check_not(list(self.all_operators))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_mv2(self):
Expand All @@ -61,6 +60,5 @@ def test_qs8_mv2(self):
.check_not(list(ops_after_quantization))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
6 changes: 2 additions & 4 deletions backends/xnnpack/test/models/mobilenet_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,7 @@ def test_fp32_mv3(self):
.check_not(list(self.all_operators))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_mv3(self):
Expand All @@ -63,6 +62,5 @@ def test_qs8_mv3(self):
.check_not(list(ops_after_lowering))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
6 changes: 2 additions & 4 deletions backends/xnnpack/test/models/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@ def test_fp32_resnet18(self):
.partition()
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_resnet18(self):
Expand All @@ -37,6 +36,5 @@ def test_qs8_resnet18(self):
.partition()
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
3 changes: 1 addition & 2 deletions backends/xnnpack/test/models/torchvision_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,5 @@ def test_fp32_vit(self):
.check_not(list(lowerable_xnn_operators))
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
3 changes: 1 addition & 2 deletions backends/xnnpack/test/models/very_big_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,5 @@ def test_very_big_model(self):
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
6 changes: 2 additions & 4 deletions backends/xnnpack/test/models/w2l.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@ def test_fp32_w2l(self):
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_w2l(self):
Expand All @@ -54,6 +53,5 @@ def test_qs8_w2l(self):
.check(["torch.ops.higher_order.executorch_call_delegate"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
3 changes: 1 addition & 2 deletions backends/xnnpack/test/ops/abs.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@ def _test_abs(self, inputs):
.check_not(["executorch_exir_dialects_edge__ops_aten_abs_default"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_fp16_abs(self):
Expand Down
24 changes: 8 additions & 16 deletions backends/xnnpack/test/ops/add.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,7 @@ def _test_add(self, inputs):
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_fp16_add(self):
Expand All @@ -79,8 +78,7 @@ def test_fp32_add_constant(self):
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_add_constant(self):
Expand Down Expand Up @@ -121,8 +119,7 @@ def test_qs8_add(self):
)
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_add2(self):
Expand All @@ -145,8 +142,7 @@ def test_qs8_add2(self):
)
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_add3(self):
Expand All @@ -169,8 +165,7 @@ def test_qs8_add3(self):
)
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

class AddRelu(torch.nn.Module):
Expand All @@ -194,8 +189,7 @@ def test_fp32_add_relu(self):
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_add_relu(self):
Expand All @@ -214,8 +208,7 @@ def test_qs8_add_relu(self):
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_add_relu_seq(self):
Expand Down Expand Up @@ -261,6 +254,5 @@ def forward(self, x, z):
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)
3 changes: 1 addition & 2 deletions backends/xnnpack/test/ops/avgpool2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,7 @@ def _test_argpool2d(self, inputs):
.check_not(["executorch_exir_dialects_edge__ops_aten_avg_pool2d_default"])
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_fp16_avgpool2d(self):
Expand Down
6 changes: 2 additions & 4 deletions backends/xnnpack/test/ops/bilinear2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,7 @@ def test_fp32_static_resize_bilinear2d(self):
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_fp32_static_resize_bilinear2d_with_align_cornesr(self):
Expand All @@ -103,8 +102,7 @@ def test_fp32_static_resize_bilinear2d_with_align_cornesr(self):
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
.compare_outputs()
.run_method_and_compare_outputs()
)

def test_fp32_static_resize_bilinear2d_antialiased(self):
Expand Down
Loading