Skip to content

Commit ed486c2

Browse files
Arm backend: Replace asserts with exceptions in quantizer module (#11519)
Replace all assert statements in the quantizer module with appropriate exceptions (ValueError/TypeError) so misuse raises clear errors instead of AssertionError.
1 parent 796dcd7 commit ed486c2

File tree

2 files changed

+29
-17
lines changed

2 files changed

+29
-17
lines changed

backends/arm/quantizer/arm_quantizer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -247,9 +247,9 @@ def set_module_name(
247247
quantizer.set_module_name("blocks.sub"), it will quantize all supported operator/operator
248248
patterns in the submodule with this module name with the given `quantization_config`
249249
"""
250-
assert (
251-
quantization_config is not None
252-
), " quantization_config == None is not supported yet"
250+
# Validate that quantization_config is provided
251+
if quantization_config is None:
252+
raise ValueError("quantization_config == None is not supported yet")
253253
self.module_name_config[module_name] = quantization_config
254254
return self
255255

backends/arm/quantizer/quantization_config.py

Lines changed: 26 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -29,30 +29,40 @@ def get_input_act_qspec(self) -> QuantizationSpec | None:
2929
"""Returns QuantizationSpec 'input_activation' after asserting that input_activation.qscheme is valid."""
3030
if self.input_activation is None:
3131
return None
32-
assert self.input_activation.qscheme in [
32+
# Validate that input_activation uses a supported qscheme
33+
if self.input_activation.qscheme not in [
3334
torch.per_tensor_affine,
3435
torch.per_tensor_symmetric,
35-
], f"Unsupported quantization_spec {self.input_activation} for input_activation."
36+
]:
37+
raise ValueError(
38+
f"Unsupported quantization_spec {self.input_activation} for input_activation."
39+
)
3640
return self.input_activation
3741

3842
def get_output_act_qspec(self) -> QuantizationSpec | None:
3943
"""Returns QuantizationSpec 'output_activation' after asserting that output_activation.qscheme is valid."""
4044
if self.output_activation is None:
4145
return None
42-
assert self.output_activation.qscheme in [
46+
# Validate that output_activation uses a supported qscheme
47+
if self.output_activation.qscheme not in [
4348
torch.per_tensor_affine,
4449
torch.per_tensor_symmetric,
45-
], f"Unsupported quantization_spec {self.output_activation} for output_activation."
50+
]:
51+
raise ValueError(
52+
f"Unsupported quantization_spec {self.output_activation} for output_activation."
53+
)
4654
return self.output_activation
4755

4856
def get_weight_qspec(self) -> QuantizationSpec | None:
4957
"""Returns QuantizationSpec 'weight' after asserting that weight.qscheme is valid."""
5058
if self.weight is None:
5159
return None
52-
assert self.weight.qscheme in [
60+
# Validate that weight uses a supported qscheme
61+
if self.weight.qscheme not in [
5362
torch.per_tensor_symmetric,
5463
torch.per_channel_symmetric,
55-
], f"Unsupported quantization_spec {self.weight} for weight"
64+
]:
65+
raise ValueError(f"Unsupported quantization_spec {self.weight} for weight")
5666
return self.weight
5767

5868
def get_bias_qspec(self, node: torch.fx.Node) -> QuantizationSpec | None:
@@ -61,11 +71,11 @@ def get_bias_qspec(self, node: torch.fx.Node) -> QuantizationSpec | None:
6171
def _derive_qparams_fn(
6272
obs_or_fqs: list[ObserverOrFakeQuantize],
6373
) -> tuple[torch.Tensor, torch.Tensor]:
64-
assert (
65-
len(obs_or_fqs) == 2
66-
), "Expecting two obs/fqs, one for activation and one for weight, got: {}".format(
67-
len(obs_or_fqs)
68-
)
74+
# Validate expected number of observers/fake-quantizes
75+
if len(obs_or_fqs) != 2:
76+
raise ValueError(
77+
f"Expecting two obs/fqs, one for activation and one for weight, got: {len(obs_or_fqs)}"
78+
)
6979
act_obs_or_fq = obs_or_fqs[0]
7080
weight_obs_or_fq = obs_or_fqs[1]
7181
act_scale, act_zp = act_obs_or_fq.calculate_qparams()
@@ -94,9 +104,11 @@ def _derive_qparams_fn(
94104

95105
if self.bias is None:
96106
return None
97-
assert (
98-
self.bias.dtype == torch.float
99-
), "Only float dtype for bias is supported for bias right now"
107+
# Validate that bias dtype is floating-point
108+
if self.bias.dtype != torch.float:
109+
raise ValueError(
110+
"Only float dtype for bias is supported for bias right now"
111+
)
100112
return self.bias
101113

102114
def get_fixed_qspec(

0 commit comments

Comments
 (0)