Skip to content

Commit 07f79c2

Browse files
Pop-kornStrycekSimon
authored andcommitted
NXP backend: Prohibit delegation of convolution with batch size != 1.
Neutron only supports batch == 1.
1 parent cb91593 commit 07f79c2

File tree

5 files changed

+28
-38
lines changed

5 files changed

+28
-38
lines changed

backends/nxp/backend/ir/converter/node_converters/ops_converters/convolution_converter.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,10 @@ def _is_supported_in_IR(
8484
if weight_tensor.dtype not in [torch.float32, torch.int8, torch.uint8]:
8585
return False
8686

87+
if node.args[0].meta["val"].shape[0] != 1:
88+
# Only batch size 1 is supported on neutron.
89+
return False
90+
8791
return True
8892

8993
Stride = Padding = Dilation = OutPadding = list[int]

backends/nxp/tests/ir/converter/node_converter/test_constant_pad_nd_converter.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -124,12 +124,10 @@ def test_constant_pad_nd_conversion__format_less(input_shape, paddings):
124124
@pytest.mark.parametrize(
125125
"input_shape, paddings",
126126
[
127-
pytest.param([2, 4, 6, 8], list(range(2)), id="4D, padding W"),
128-
pytest.param([2, 4, 6, 8], list(range(4)), id="4D, padding H, W"),
129-
pytest.param([2, 1, 6, 8], [1, 2, 3, 4, 2, 1], id="4D, padding C, H, W"),
130-
pytest.param(
131-
[2, 1, 6, 8], [1, 2, 3, 4, 2, 1, 5, 6], id="4D, padding N, C, H, W"
132-
),
127+
pytest.param([1, 4, 6, 8], list(range(2)), id="4D, padding W"),
128+
pytest.param([1, 4, 6, 8], list(range(4)), id="4D, padding H, W"),
129+
pytest.param([1, 1, 6, 8], [1, 2, 3, 4, 2, 1], id="4D, padding C, H, W"),
130+
# pytest.param([1, 1, 6, 8], [1, 2, 3, 4, 2, 1, 5, 6], id='4D, padding N, C, H, W'), # Batch size must stay 0.
133131
],
134132
)
135133
def test_constant_pad_nd_conversion__channels_first(input_shape, paddings):

backends/nxp/tests/ir/converter/node_converter/test_conv_converter.py

Lines changed: 5 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -215,18 +215,8 @@ def test_conv2d_quant_conversion(mocker, model: torch.nn.Module, input_shape):
215215
@pytest.mark.parametrize("stride", [1, 2])
216216
@pytest.mark.parametrize("dilation", [1, 2])
217217
@pytest.mark.parametrize("kernel_shape", [[1, 2], [3, 3], [4, 1]])
218-
@pytest.mark.parametrize(
219-
"input_shape",
220-
[
221-
[1, 4, 12, 12],
222-
[2, 3, 10, 15],
223-
[11, 10, 9, 8],
224-
],
225-
ids=lambda x: f"Input shape = {x}, groups = {x[1]}",
226-
)
227-
def test_conv2d_conversion__depthwise(
228-
input_shape, stride, dilation, kernel_shape, mocker
229-
):
218+
def test_conv2d_conversion__depthwise(stride, dilation, kernel_shape, mocker):
219+
input_shape = [1, 3, 12, 16]
230220
group = input_shape[1]
231221
edge_program = to_edge_program(
232222
Conv2dModule(
@@ -292,16 +282,8 @@ def test_conv2d_conversion__depthwise__quantized(
292282

293283

294284
@pytest.mark.parametrize("padding", [1, 2])
295-
@pytest.mark.parametrize(
296-
"input_shape",
297-
[
298-
[1, 4, 12, 12],
299-
[2, 3, 4, 5],
300-
[11, 10, 9, 8],
301-
],
302-
ids=lambda x: f"Input shape = {x}, groups = {x[1]}",
303-
)
304-
def test_conv2d_conversion__depthwise__padded(input_shape, padding, mocker):
285+
def test_conv2d_conversion__depthwise__padded(padding, mocker):
286+
input_shape = [1, 3, 13, 15]
305287
group = input_shape[1]
306288
edge_program = to_edge_program(
307289
Conv2dModule(
@@ -377,7 +359,7 @@ def test_conv2d_conversion__separated(
377359
input_data = np.random.random(input_shape).astype(np.float32)
378360

379361
# Note: The generic group convolution is not yet supported by Neutron Converter. Once supported, the
380-
# commented out code allows usuall testing flow for this test-case.
362+
# commented out code allows usual testing flow for this test-case.
381363

382364
# spy = mocker.spy(ModelBuilder, 'finish')
383365

backends/nxp/tests/ir/converter/node_converter/test_softmax_converter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,8 @@ def test_softmax_conversion__unknown_input_format(input_shape, dim: int):
7070
@pytest.mark.parametrize(
7171
"input_shape,dim",
7272
[
73-
pytest.param((10, 4, 32, 32), 1, id="4D,dim=1"),
74-
pytest.param((10, 4, 16, 16), -3, id="4D,dim=-3"),
73+
pytest.param((1, 4, 32, 32), 1, id="4D,dim=1"),
74+
pytest.param((1, 4, 16, 16), -3, id="4D,dim=-3"),
7575
],
7676
)
7777
def test_softmax_conversion_channel_last(input_shape, dim: int):

backends/nxp/tests/ir/converter/node_converter/test_view_copy_converter.py

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -90,8 +90,8 @@ def forward(self, x):
9090

9191

9292
def test__channels_first_to_2d(mocker):
93-
input_shape = [2, 4, 7, 9]
94-
new_shape = [12, 32] # Mix up the dimensions for a thorough test.
93+
input_shape = [1, 4, 7, 9]
94+
new_shape = [6, 32] # Mix up the dimensions for a thorough test.
9595

9696
torch_model = ConvReshapeModule(channels=input_shape[1], new_shape=new_shape)
9797
edge_program = to_edge_program(torch_model, input_shape).exported_program()
@@ -113,7 +113,7 @@ def test__channels_first_to_2d(mocker):
113113

114114

115115
def test__channels_first_to_4d(mocker):
116-
input_shape = [2, 4, 6, 8]
116+
input_shape = [1, 8, 6, 8]
117117
new_shape = [7, 4, 2, 5]
118118

119119
torch_model = ConvReshapeModule(channels=input_shape[1], new_shape=new_shape)
@@ -124,7 +124,10 @@ def test__channels_first_to_4d(mocker):
124124
converter_spy = mocker.spy(ModelBuilder, "finish")
125125

126126
convert_run_compare(
127-
edge_program, input_data, tflite_input_preprocess=ToNHWCPreprocess()
127+
edge_program,
128+
input_data,
129+
tflite_input_preprocess=ToNHWCPreprocess(),
130+
atol=2.0e-7,
128131
)
129132

130133
tflite_model = converter_spy.spy_return
@@ -137,7 +140,7 @@ def test__channels_first_to_4d(mocker):
137140

138141
def test__formatless_to_channels_first(mocker):
139142
input_shape = [12, 32]
140-
new_shape = [2, 4, 6, 8] # Mix up the dimensions for a thorough test.
143+
new_shape = [1, 4, 12, 8] # Mix up the dimensions for a thorough test.
141144

142145
torch_model = FormatlessToChannelsFirstModule(
143146
channels=new_shape[1], new_shape=new_shape
@@ -149,7 +152,10 @@ def test__formatless_to_channels_first(mocker):
149152
converter_spy = mocker.spy(ModelBuilder, "finish")
150153

151154
convert_run_compare(
152-
edge_program, input_data, tflite_output_preprocess=ToNCHWPreprocess()
155+
edge_program,
156+
input_data,
157+
tflite_output_preprocess=ToNCHWPreprocess(),
158+
atol=2.0e-7,
153159
)
154160

155161
tflite_model = converter_spy.spy_return
@@ -162,7 +168,7 @@ def test__formatless_to_channels_first(mocker):
162168

163169
def test__formatless_to_formatless(mocker):
164170
input_shape = [12, 32]
165-
new_shape = [2, 4, 6, 8]
171+
new_shape = [1, 4, 6, 16]
166172

167173
torch_model = FormatlessToFormatlessModule(new_shape=new_shape)
168174
edge_program = to_edge_program(torch_model, input_shape).exported_program()

0 commit comments

Comments
 (0)