Skip to content

Commit ebe5b77

Browse files
committed
Skip annotate boolean input
Pull Request resolved: #2957 ghstack-source-id: 222193714 @exported-using-ghexport It only makes sense to quantize fp tensor, but not boolean. Add a check to make sure only fp tensor are annotated in quantizer Differential Revision: [D55946526](https://our.internmc.facebook.com/intern/diff/D55946526/)
1 parent 7d4bafc commit ebe5b77

File tree

1 file changed

+11
-2
lines changed

1 file changed

+11
-2
lines changed

backends/qualcomm/quantizer/utils.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
import torch
1010

1111
from torch._ops import OpOverload
12+
from torch._subclasses import FakeTensor
1213

1314
from torch.ao.quantization.quantizer import (
1415
QuantizationAnnotation,
@@ -41,6 +42,14 @@ def decorator(annotator: Callable):
4142

4243
return decorator
4344

45+
def _is_input_float_tensor(node: Node):
46+
"""Check if the input is not a float tensor, so that we can skip quantization for the node
47+
since observers only works with float Tensors
48+
"""
49+
if not isinstance(node, Node) or "val" not in node.meta or not isinstance(node.meta["val"], FakeTensor):
50+
return False
51+
return node.meta["val"].dtype == torch.float32
52+
4453

4554
def _is_annotated(nodes: List[Node]):
4655
"""
@@ -123,11 +132,11 @@ def annotate_binary(node: Node, quantization_config: QuantizationConfig) -> None
123132

124133
input_qspec_map = {}
125134
input_act0 = node.args[0]
126-
if isinstance(input_act0, Node):
135+
if _is_input_float_tensor(input_act0):
127136
input_qspec_map[input_act0] = input_act_qspec
128137

129138
input_act1 = node.args[1]
130-
if isinstance(input_act1, Node):
139+
if _is_input_float_tensor(input_act1):
131140
input_qspec_map[input_act1] = input_act_qspec
132141

133142
node.meta[QUANT_ANNOTATION_KEY] = QuantizationAnnotation(

0 commit comments

Comments
 (0)