Skip to content

Commit d5dba0f

Browse files
committed
Pull request pytorch#82: Raise exception when there is incorrect number of NeutronGraph nodes in model
Merge in AITEC/executorch from hotfix/nxf93343/raise-error-when-incorrect-number-of-neutron-graphs to main-nxp * commit 'ee733a1d5713eda8341941113009c7609c2a7a9a': [NO-UPSTREAM] Update/remove failing/non-relevant tests Raise exception when there is incorrect number of NeutronGraph nodes in model
2 parents 558a957 + ee733a1 commit d5dba0f

File tree

5 files changed

+21
-69
lines changed

5 files changed

+21
-69
lines changed

backends/nxp/neutron_node_extraction.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,12 +36,17 @@ def extract_artifacts_from_neutron_node(tflite_flatbuffer_or_path: bytes | str)
3636

3737
sub_graph = model.Subgraphs(0)
3838

39-
if sub_graph.OperatorsLength() != 1:
40-
logging.warning(f'Model has `{sub_graph.OperatorsLength()}` Operators instead of `1`.')
39+
if sub_graph.OperatorsLength() == 0:
40+
raise RuntimeError(f'Model converted with neutron-converter has `0` operators instead of `1`.')
41+
elif sub_graph.OperatorsLength() > 1:
42+
builtin_operators_map: dict[int, str] = {y: x for x, y in BuiltinOperator.__dict__.items()}
4143

42-
# TODO Raise an exception in the future, because the graph should only contain the 1 node. Multiple nodes
43-
# indicate an issue with the Partitioner.
44-
# raise RuntimeError(f'Model has `{sub_graph.OperatorsLength()}` Operators instead of `1`.')
44+
opcodes = [model.OperatorCodes(i) for i in range(model.OperatorCodesLength())]
45+
nodes = [sub_graph.Operators(i) for i in range(sub_graph.OperatorsLength())]
46+
ops_found = [builtin_operators_map[opcodes[node.OpcodeIndex()].BuiltinCode()] for node in nodes]
47+
48+
raise RuntimeError(f'Model converted with neutron-converter has `{sub_graph.OperatorsLength()}` operators '
49+
f'instead of `1`. Operators found: {", ".join(ops_found)}.')
4550

4651
neutron_node = None
4752
opcodes = [model.OperatorCodes(i) for i in range(model.OperatorCodesLength())]
@@ -52,7 +57,8 @@ def extract_artifacts_from_neutron_node(tflite_flatbuffer_or_path: bytes | str)
5257
neutron_node = sub_graph.Operators(i)
5358
break
5459

55-
assert neutron_node is not None, 'The provided model does not contain a Neutron Node.'
60+
if neutron_node is None:
61+
raise RuntimeError('Model converted with neutron-converter does not contain a NeutronGraph node.')
5662

5763
# The last 3 input tensors of the Neutron Node contain:
5864
# 1. Neutron Microcode

backends/nxp/tests/ir/converter/node_converter/test_constant_pad_nd_converter.py

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -27,28 +27,6 @@ def test_constant_pad_nd_conversion__specific_constant(constant):
2727
convert_run_compare(edge_program, input_data)
2828

2929

30-
@pytest.mark.parametrize("constant", [0.0, 67.28, 42., -13.37])
31-
def test_constant_pad_nd_quant_conversion__specific_constant(mocker, constant):
32-
input_shape = (2, 4, 12, 12)
33-
paddings = (2, 2, 2, 2)
34-
35-
converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program")
36-
37-
# Run conversion
38-
_ = to_quantized_edge_program(Conv2dConstantPadNDModule(paddings, constant), input_shape)
39-
40-
# Capture generated model
41-
tflite_flatbuffers_model, io_formats = converter_spy.spy_return
42-
43-
# Capture converted program
44-
edge_program: ExportedProgram = converter_spy.call_args.args[1]
45-
46-
input_data = (np.random.random(input_shape).astype(np.float32) * 50).astype(np.int8)
47-
48-
convert_run_compare(edge_program, input_data, tfl_model=tflite_flatbuffers_model, atol=1.,
49-
tflite_input_preprocess=ToNHWCPreprocess(), tflite_output_preprocess=ToNCHWPreprocess())
50-
51-
5230
def test_constant_pad_nd_conversion__default_constant():
5331
input_shape = [2, 4, 6, 8]
5432
paddings = [1, 2, 3, 4]

backends/nxp/tests/ir/converter/node_converter/test_conv_converter.py

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -119,12 +119,8 @@ def test_conv2d_conversion__depthwise(input_shape, stride, dilation, kernel_shap
119119
@pytest.mark.parametrize("stride", [1, 2])
120120
@pytest.mark.parametrize("dilation", [1, 2])
121121
@pytest.mark.parametrize("kernel_shape", [[1, 2], [3, 3], [4, 1]])
122-
@pytest.mark.parametrize("input_shape", [
123-
[1, 4, 12, 12],
124-
[2, 3, 10, 15],
125-
[11, 10, 9, 8],
126-
], ids=lambda x: f'Input shape = {x}, groups = {x[1]}')
127-
def test_conv2d_conversion__depthwise__quantized(input_shape, stride, dilation, kernel_shape, mocker):
122+
def test_conv2d_conversion__depthwise__quantized(stride, dilation, kernel_shape, mocker):
123+
input_shape = [1, 4, 12, 12]
128124
group = input_shape[1]
129125
spy = mocker.spy(ModelBuilder, 'finish')
130126

@@ -171,12 +167,8 @@ def test_conv2d_conversion__depthwise__padded(input_shape, padding, mocker):
171167

172168

173169
@pytest.mark.parametrize("padding", [1, 2])
174-
@pytest.mark.parametrize("input_shape", [
175-
[1, 4, 12, 12],
176-
[2, 3, 4, 5],
177-
[11, 10, 9, 8],
178-
], ids=lambda x: f'Input shape = {x}, groups = {x[1]}')
179-
def test_conv2d_conversion__depthwise__padded__quantized(input_shape, padding, mocker):
170+
def test_conv2d_conversion__depthwise__padded__quantized(padding, mocker):
171+
input_shape = [1, 4, 12, 12]
180172
group = input_shape[1]
181173
spy = mocker.spy(ModelBuilder, 'finish')
182174

backends/nxp/tests/ir/converter/node_converter/test_view_copy_converter.py

Lines changed: 4 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -186,37 +186,13 @@ def test_view_copy_w_linear_quant_conversion(mocker, input_shape, new_shape):
186186
convert_run_compare(edge_program, input_data, tfl_model=tflite_flatbuffers_model, atol=1.)
187187

188188

189-
@pytest.mark.parametrize("input_shape, new_shape", [
190-
pytest.param((1, 4, 16, 16), (50, 18), id="4D, batch_size=1"),
191-
pytest.param((10, 4, 16, 16), (500, 18), id="4D, , batch_size=10"),
192-
])
193-
def test_view_copy_w_conv_quant_conversion(mocker, input_shape, new_shape):
194-
converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program")
195-
196-
# Run conversion
197-
_ = to_quantized_edge_program(ConvReshapeModule(channels=input_shape[1], new_shape=new_shape), input_shape)
198-
199-
# Capture generated model
200-
tflite_flatbuffers_model, io_formats = converter_spy.spy_return
201-
202-
# Capture converted program
203-
edge_program: ExportedProgram = converter_spy.call_args.args[1]
204-
205-
input_data = (np.random.random(input_shape).astype(np.float32) * 50).astype(np.int8)
206-
207-
convert_run_compare(edge_program, input_data, tflite_input_preprocess=ToNHWCPreprocess(),
208-
tfl_model=tflite_flatbuffers_model, atol=1.)
209-
210-
211-
@pytest.mark.parametrize("input_shape, channels_view_out", [
212-
pytest.param((1, 4, 16, 16), 196, id="4D"),
213-
])
214-
def test_view_w_conv_linear_quant_conversion(mocker, input_shape, channels_view_out):
189+
def test_view_w_conv_linear_quant_conversion(mocker):
190+
input_shape = (1, 8, 8, 8)
215191
converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program")
192+
model = ConvLinearViewModule(channels=input_shape[1], channels_view_out=72)
216193

217194
# Run conversion
218-
_ = to_quantized_edge_program(ConvLinearViewModule(channels=input_shape[1],
219-
channels_view_out=channels_view_out), input_shape)
195+
_ = to_quantized_edge_program(model, input_shape)
220196

221197
# Capture generated model
222198
tflite_flatbuffers_model, io_formats = converter_spy.spy_return

backends/nxp/tests/test_batch_norm_fusion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def test_batch_norm_conv_fusing__full_pipeline__1d(bias: bool):
134134

135135
@pytest.mark.parametrize('bias', [True, False], ids=lambda x: 'Bias' if x else 'No bias')
136136
def test_batch_norm_conv_fusing__full_pipeline__2d(bias: bool):
137-
input_shape = [2, 4, 6, 8]
137+
input_shape = [1, 4, 6, 8]
138138
module = ConvBatchNormModule(bias, len(input_shape), 4)
139139

140140
edge_program = to_quantized_edge_program(module, tuple(input_shape)).exported_program()

0 commit comments

Comments
 (0)