Skip to content

Fix cadence BUCK deps and pyre #7116

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions backends/cadence/runtime/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ python_library(
"//executorch/devtools/bundled_program:config",
"//executorch/devtools/bundled_program:core",
"//executorch/devtools/bundled_program/serialize:lib",
"//executorch/devtools:lib",
"//executorch/exir:lib",
],
)
3 changes: 0 additions & 3 deletions backends/cadence/runtime/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,9 +167,7 @@ def run(


def compare(
# pyre-fixme[2]: Parameter annotation cannot be `Any`.
outputs: Any,
# pyre-fixme[2]: Parameter annotation cannot be `Any`.
ref_outputs: Any,
name: str = "",
eps_error: float = 1e-1,
Expand Down Expand Up @@ -223,7 +221,6 @@ def run_and_compare(
compare(outputs, ref_outputs, eps_error=eps_error, eps_warn=eps_warn)


# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
def to_nd_array(v: Union[bool, numbers.Number, ndarray, torch.Tensor]) -> np.ndarray:
if isinstance(v, np.ndarray):
return v
Expand Down
16 changes: 3 additions & 13 deletions backends/cadence/runtime/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,11 @@
import torch


# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
def distance(fn: Callable[[np.ndarray, np.ndarray], float]) -> Callable[
def distance(
fn: Callable[[np.ndarray, np.ndarray], float],
) -> Callable[
[
# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
typing.Union[np.ndarray, torch._tensor.Tensor],
# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
typing.Union[np.ndarray, torch._tensor.Tensor],
],
float,
Expand All @@ -27,9 +26,7 @@ def distance(fn: Callable[[np.ndarray, np.ndarray], float]) -> Callable[
# the distance between two N-D tensors given a function. This can be a RMS
# function, maximum abs diff, or any kind of distance function.
def wrapper(
# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
a: Union[np.ndarray, torch.Tensor],
# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
b: Union[np.ndarray, torch.Tensor],
) -> float:
# convert a and b to np.ndarray type fp64
Expand Down Expand Up @@ -68,24 +65,20 @@ def wrapper(


@distance
# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
def rms(a: np.ndarray, b: np.ndarray) -> float:
return ((a - b) ** 2).mean() ** 0.5


@distance
# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
def max_abs_diff(a: np.ndarray, b: np.ndarray) -> float:
return np.abs(a - b).max()


@distance
# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
def max_rel_diff(x: np.ndarray, x_ref: np.ndarray) -> float:
return np.abs((x - x_ref) / x_ref).max()


# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
def to_np_arr_fp64(x: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
Expand All @@ -94,11 +87,8 @@ def to_np_arr_fp64(x: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
return x


# pyre-fixme[3]: Return type must be annotated.
def normalized_rms(
# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
predicted: Union[np.ndarray, torch.Tensor],
# pyre-fixme[24]: Generic type `np.ndarray` expects 2 type parameters.
ground_truth: Union[np.ndarray, torch.Tensor],
):
num = rms(predicted, ground_truth)
Expand Down
2 changes: 2 additions & 0 deletions examples/cadence/operators/test_add_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@


class ATenOpTestCases(unittest.TestCase):
# pyre-fixme[16]: Module `parameterized.parameterized` has no attribute `expand`.
@parameterized.expand(
[
[(7, 5, 6), (7, 5, 6)],
Expand Down Expand Up @@ -61,6 +62,7 @@ def forward(self, x: torch.Tensor, y: torch.Tensor):
model, (X, Y), file_name=self._testMethodName, run_and_compare=False
)

# pyre-fixme[16]: Module `parameterized.parameterized` has no attribute `expand`.
@parameterized.expand(
[
[(7, 5, 6), (7, 5, 6)],
Expand Down
2 changes: 0 additions & 2 deletions extension/llm/export/quantizer_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,14 +184,12 @@ def get_qnn_quantizer(
)
qnn_quantizer.set_per_channel_conv_quant(enable=False)
qnn_quantizer.set_per_channel_linear_quant(enable=False)
# pyre-ignore: Undefined attribute [16]: Module `executorch.backends` has no attribute `qualcomm`.
qnn_quantizer.set_quant_config(
quant_dtype, is_qat=is_qat, act_observer=MinMaxObserver
)
elif quant_config == "16a4w":
# pyre-ignore: Undefined attribute [16]: Module `executorch.backends` has no attribute `qualcomm`.
quant_dtype = QuantDtype.use_16a4w
# pyre-ignore: Undefined attribute [16]: Module `executorch.backends` has no attribute `qualcomm`.
qnn_quantizer.set_quant_config(
quant_dtype, is_qat=is_qat, act_observer=MinMaxObserver
)
Expand Down