Skip to content

end2end test for bundled program #395

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 27 additions & 18 deletions bundled_program/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ class ConfigIOSet:
class ConfigExecutionPlanTest:
"""All info related to verify execution plan"""

method_name: str
test_sets: List[ConfigIOSet]


Expand All @@ -60,6 +61,7 @@ class BundledConfig:

def __init__(
self,
method_names: List[str],
# pyre-ignore
inputs: List[List[Any]],
# pyre-ignore
Expand All @@ -68,34 +70,34 @@ def __init__(
"""Contruct the config given inputs and expected outputs

Args:
inputs: All sets of input need to be test on for all execution plans. Each list
of `inputs` is all sets which will be run on the execution plan in the
program sharing same index. Each set of any `inputs` element should
method_names: All method names need to be verified in program.
inputs: All sets of input need to be test on for all methods. Each list
of `inputs` is all sets which will be run on the method in the
program with corresponding method name. Each set of any `inputs` element should
contain all inputs required by eager_model with the same inference function
as corresponding execution plan for one-time execution.

Please note that currently we do not have any consensus about the mapping rule
between inference name in eager_model and execution plan id in executorch
program. Hence, user should take care of the data order in `inputs`: each list
of `inputs` is all sets which will be run on the execution plan with same index,
not the inference function with same index in the result of get_inference_name.
Same as the `expected_outputs` and `metadatas` below.

It shouldn't be a problem if there's only one inferenece function per model.

expected_outputs: Expected outputs for inputs sharing same index. The size of
expected_outputs should be the same as the size of inputs.
expected_outputs should be the same as the size of inputs and provided method_names.
"""
BundledConfig._check_io_type(inputs)
BundledConfig._check_io_type(expected_outputs)
assert len(inputs) == len(expected_outputs), (
"length of inputs and expected_outputs should match,"
+ " but got {} and {}".format(len(inputs), len(expected_outputs))

for m_name in method_names:
assert isinstance(m_name, str)

assert len(method_names) == len(inputs) == len(expected_outputs), (
"length of method_names, inputs and expected_outputs should match,"
+ " but got {}, {} and {}".format(
len(method_names), len(inputs), len(expected_outputs)
)
)

self.execution_plan_tests: List[
ConfigExecutionPlanTest
] = BundledConfig._gen_execution_plan_tests(inputs, expected_outputs)
] = BundledConfig._gen_execution_plan_tests(
method_names, inputs, expected_outputs
)

@staticmethod
# TODO(T138930448): Give pyre-ignore commands appropriate warning type and comments.
Expand Down Expand Up @@ -145,6 +147,7 @@ def _check_io_type(test_data_program: List[List[Any]]) -> None:

@staticmethod
def _gen_execution_plan_tests(
method_names: List[str],
# pyre-ignore
inputs: List[List[Any]],
# pyre-ignore
Expand All @@ -155,9 +158,10 @@ def _gen_execution_plan_tests(
execution_plan_tests: List[ConfigExecutionPlanTest] = []

for (
m_name,
inputs_per_plan_test,
expect_outputs_per_plan_test,
) in zip(inputs, expected_outputs):
) in zip(method_names, inputs, expected_outputs):
test_sets: List[ConfigIOSet] = []

# transfer I/O sets into ConfigIOSet for each execution plan
Expand All @@ -182,7 +186,12 @@ def _gen_execution_plan_tests(

execution_plan_tests.append(
ConfigExecutionPlanTest(
method_name=m_name,
test_sets=test_sets,
)
)

# sort the execution plan tests by method name to in line with core program emitter.
execution_plan_tests.sort(key=lambda x: x.method_name)

return execution_plan_tests
78 changes: 53 additions & 25 deletions bundled_program/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,32 +136,56 @@ def assert_valid_bundle(

"""

# Check the number of execution plan tests
assert len(bundled_config.execution_plan_tests) == len(
program.execution_plan
), "The length of execution_plan_tests in config should match the length of execution_plan in program, but get {} and {}.".format(
len(bundled_config.execution_plan_tests), len(program.execution_plan)
)
program_plan_id = 0
bp_plan_id = 0

method_name_of_program = {e.name for e in program.execution_plan}
method_name_of_test_suites = {
t.method_name for t in bundled_config.execution_plan_tests
}

assert method_name_of_test_suites.issubset(
method_name_of_program
), f"All methods in method_test_suites should be found in program.execution_plan, \
but {str(method_name_of_test_suites - method_name_of_program)} does not include."

# check if method_tesdt_suites has been sorted in ascending alphabetical order of method name.
for bp_plan_id in range(1, len(bundled_config.execution_plan_tests)):
assert (
bundled_config.execution_plan_tests[bp_plan_id - 1].method_name
<= bundled_config.execution_plan_tests[bp_plan_id].method_name
), f"The method name of test suite should be sorted in ascending alphabetical \
order of method name, but {bp_plan_id-1}-th and {bp_plan_id}-th method_test_suite aren't."

# Check if the inputs' type meet Program's requirement
for plan_id in range(len(program.execution_plan)):
while bp_plan_id < len(bundled_config.execution_plan_tests):

plan_test: ConfigExecutionPlanTest = bundled_config.execution_plan_tests[
plan_id
bp_plan_id
]
plan: ExecutionPlan = program.execution_plan[program_plan_id]

plan: ExecutionPlan = program.execution_plan[plan_id]
# User does not provide testcases for current plan, skip it
if plan_test.method_name > plan.name:
program_plan_id += 1
continue

# Check if the method name in user provided test matches the one in the original program
assert (
plan_test.method_name == plan.name
), f"BundledConfig has testcases for method {plan_test.method_name}, but can not find it in the given program. All method names in the program are {', '.join([p.name for p in program.execution_plan])}."

# Check if the type of Program's input is supported
for index in range(len(plan.inputs)):
assert (
type(get_program_input(program, plan_id, index))
type(get_program_input(program, program_plan_id, index))
in supported_program_type_table
), "The type of program's input isn't supported."

# Check if the type of Program's output is supported
for index in range(len(plan.outputs)):
assert (
type(get_program_output(program, plan_id, index)) == Tensor
type(get_program_output(program, program_plan_id, index)) == Tensor
), "Only supports program with output in Tensor type."

# Check if the I/O sets of each execution plan test match program's requirement.
Expand All @@ -181,14 +205,14 @@ def assert_valid_bundle(
assert (
type(cur_plan_test_inputs[j])
== supported_program_type_table[
type(get_program_input(program, plan_id, j))
type(get_program_input(program, program_plan_id, j))
]
), "The type {}-th input in {}-th test set of {}-th execution plan does not meet Program's requirement: expected {} but get {}".format(
j,
i,
plan_id,
program_plan_id,
supported_program_type_table[
type(get_program_input(program, plan_id, j))
type(get_program_input(program, program_plan_id, j))
],
type(cur_plan_test_inputs[j]),
)
Expand All @@ -198,10 +222,10 @@ def assert_valid_bundle(
# pyre-fixme[16]: Undefined attribute [16]: Item `bool` of `typing.Union[bool, float, int, torch._tensor.Tensor]`
# has no attribute `dtype`.
assert cur_plan_test_inputs[j].dtype == get_input_dtype(
program, plan_id, j
program, program_plan_id, j
), "The input tensor {} dtype shall be {}, but now is {}".format(
cur_plan_test_inputs[j],
get_input_dtype(program, plan_id, j),
get_input_dtype(program, program_plan_id, j),
cur_plan_test_inputs[j].dtype,
)
elif type(cur_plan_test_inputs[j]) in (
Expand All @@ -210,9 +234,9 @@ def assert_valid_bundle(
float,
):
assert type(cur_plan_test_inputs[j]) == get_input_type(
program, plan_id, j
program, program_plan_id, j
), "The input primitive dtype shall be {}, but now is {}".format(
get_input_type(program, plan_id, j),
get_input_type(program, program_plan_id, j),
type(cur_plan_test_inputs[j]),
)

Expand All @@ -221,13 +245,16 @@ def assert_valid_bundle(
# pyre-fixme[16]: Undefined attribute [16]: Item `bool` of `typing.Union[bool, float, int, torch._tensor.Tensor]`
# has no attribute `dtype`.
assert cur_plan_test_expected_outputs[j].dtype == get_output_dtype(
program, plan_id, j
program, program_plan_id, j
), "The label tensor {} dtype shall be {}, but now is {}".format(
cur_plan_test_expected_outputs[j],
get_output_dtype(program, plan_id, j),
get_output_dtype(program, program_plan_id, j),
cur_plan_test_expected_outputs[j].dtype,
)

program_plan_id += 1
bp_plan_id += 1


def create_bundled_program(
program: Program,
Expand All @@ -245,10 +272,7 @@ def create_bundled_program(
execution_plan_tests: List[BundledExecutionPlanTest] = []

# Emit data and metadata of bundled tensor
for plan_id in range(len(program.execution_plan)):
plan_test: ConfigExecutionPlanTest = bundled_config.execution_plan_tests[
plan_id
]
for plan_test in bundled_config.execution_plan_tests:
test_sets: List[BundledIOSet] = []

# emit I/O sets for each execution plan test
Expand Down Expand Up @@ -283,7 +307,11 @@ def create_bundled_program(
)

# emit the whole execution plan test
execution_plan_tests.append(BundledExecutionPlanTest(test_sets=test_sets))
execution_plan_tests.append(
BundledExecutionPlanTest(
method_name=plan_test.method_name, test_sets=test_sets
)
)

program_bytes: bytes = _serialize_pte_binary(program)

Expand Down
5 changes: 5 additions & 0 deletions bundled_program/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ class BundledIOSet:
class BundledExecutionPlanTest:
"""Context for testing and verifying an exceution plan."""

# The name of the method to test; e.g., "forward" for the forward() method
# of an nn.Module. This name match a method defined by the ExecuTorch
# program.
method_name: str

# Sets of input/outputs to test with.
test_sets: List[BundledIOSet]

Expand Down
32 changes: 32 additions & 0 deletions bundled_program/tests/TARGETS
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# @noautodeps

load("@fbcode_macros//build_defs:python_library.bzl", "python_library")
load("@fbcode_macros//build_defs:python_unittest.bzl", "python_unittest")

Expand Down Expand Up @@ -58,3 +60,33 @@ python_unittest(
"//executorch/extension/pytree:pylib",
],
)

python_unittest(
name = "end2end",
srcs = [
"test_end2end.py",
],
deps = [
":lib",
"//caffe2:torch",
"//executorch/bundled_program:config",
"//executorch/bundled_program:core",
"//executorch/bundled_program/serialize:lib",
"//executorch/exir:dynamic_shape",
"//executorch/exir:lib",
"//executorch/exir:memory",
"//executorch/exir:pass_manager",
"//executorch/exir:print_program",
"//executorch/exir:tensor",
"//executorch/exir/_serialize:lib",
"//executorch/exir/emit:lib",
"//executorch/exir/passes:lib",
"//executorch/exir/tests:control_flow_models",
"//executorch/exir/tests:dynamic_shape_models",
"//executorch/exir/tests:models",
"//executorch/exir/tests:transformer",
"//executorch/extension/pybindings:portable_lib",
"//executorch/extension/pytree:pybindings",
"//executorch/kernels/portable:custom_ops_generated_lib",
],
)
Loading