Skip to content

Commit ac8ea41

Browse files
authored
Merge branch 'pytorch:main' into pr_print_tosa_deserialized
2 parents bde6b53 + 12b5324 commit ac8ea41

File tree

5 files changed

+63
-5
lines changed

5 files changed

+63
-5
lines changed

CMakeLists.txt

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,24 @@ if(NOT EXECUTORCH_ENABLE_LOGGING)
7777
add_definitions(-DET_LOG_ENABLED=0)
7878
endif()
7979

80+
# Configure log level. Must be one of debug, info, error, fatal.
81+
set(EXECUTORCH_LOG_LEVEL "Info" CACHE STRING
82+
"Build with the given ET_MIN_LOG_LEVEL value")
83+
string(TOLOWER "${EXECUTORCH_LOG_LEVEL}" LOG_LEVEL_LOWER)
84+
if(LOG_LEVEL_LOWER STREQUAL "debug")
85+
add_definitions(-DET_MIN_LOG_LEVEL=Debug)
86+
elseif(LOG_LEVEL_LOWER STREQUAL "info")
87+
add_definitions(-DET_MIN_LOG_LEVEL=Info)
88+
elseif(LOG_LEVEL_LOWER STREQUAL "error")
89+
add_definitions(-DET_MIN_LOG_LEVEL=Error)
90+
elseif(LOG_LEVEL_LOWER STREQUAL "fatal")
91+
add_definitions(-DET_MIN_LOG_LEVEL=Fatal)
92+
else()
93+
message(SEND_ERROR
94+
"Unknown log level \"${EXECUTORCH_LOG_LEVEL}\". Expected one of Debug, " +
95+
"Info, Error, or Fatal.")
96+
endif()
97+
8098
option(EXECUTORCH_ENABLE_PROGRAM_VERIFICATION
8199
"Build with ET_ENABLE_PROGRAM_VERIFICATION"
82100
${_default_release_disabled_options})

backends/vulkan/runtime/graph/ops/glsl/binary_op.glsl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,21 +52,21 @@ void main() {
5252
}
5353

5454
ivec4 in_coord = out_coord_to_in_coord(coord, in_sizes.data);
55-
vec4 in_texel = texelFetch(
55+
${VEC4_T[DTYPE]} in_texel = ${VEC4_T[DTYPE]}(texelFetch(
5656
image_in,
5757
COORD_TO_POS_${PACKING}(in_coord, in_sizes.data),
58-
0);
58+
0));
5959

6060
ivec4 other_coord = out_coord_to_in_coord(coord, other_sizes.data);
61-
vec4 other_texel = texelFetch(
61+
${VEC4_T[DTYPE]} other_texel = ${VEC4_T[DTYPE]}(texelFetch(
6262
image_other,
6363
COORD_TO_POS_${PACKING}(other_coord, other_sizes.data),
64-
0);
64+
0));
6565

6666
// Detect broadcasting
6767
if (PACKED_DIM_${PACKING}(other_sizes.data) < PACKED_DIM_${PACKING}(in_sizes.data)) {
6868
other_texel = other_texel.xxxx;
6969
}
7070

71-
imageStore(image_out, pos, OP(in_texel, other_texel, alpha.data));
71+
imageStore(image_out, pos, ${VEC4_T[DTYPE]}(OP(in_texel, other_texel, alpha.data)));
7272
}

backends/vulkan/runtime/graph/ops/glsl/binary_op.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ binary_op:
2323
SUFFIX: half
2424
- VALUE: float
2525
SUFFIX: float
26+
- VALUE: int
27+
SUFFIX: int
2628
shader_variants:
2729
- NAME: binary_add
2830
- NAME: binary_sub

build/Utils.cmake

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,8 @@ function(executorch_print_configuration_summary)
3434
message(STATUS " FLATC_EXECUTABLE : ${FLATC_EXECUTABLE}")
3535
message(
3636
STATUS " EXECUTORCH_ENABLE_LOGGING : ${EXECUTORCH_ENABLE_LOGGING}")
37+
message(
38+
STATUS " EXECUTORCH_LOG_LEVEL : ${EXECUTORCH_LOG_LEVEL}")
3739
message(STATUS " EXECUTORCH_ENABLE_PROGRAM_VERIFICATION : "
3840
"${EXECUTORCH_ENABLE_PROGRAM_VERIFICATION}")
3941
message(
@@ -46,6 +48,8 @@ function(executorch_print_configuration_summary)
4648
)
4749
message(
4850
STATUS " REGISTER_EXAMPLE_CUSTOM_OPS : ${REGISTER_EXAMPLE_CUSTOM_OPS}")
51+
message(STATUS " EXECUTORCH_BUILD_EXTENSION_AOT_UTIL : "
52+
"${EXECUTORCH_BUILD_EXTENSION_AOT_UTIL}")
4953
message(STATUS " EXECUTORCH_BUILD_EXTENSION_DATA_LOADER : "
5054
"${EXECUTORCH_BUILD_EXTENSION_DATA_LOADER}")
5155
message(STATUS " EXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL : "

examples/models/llama2/export_llama_lib.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -396,6 +396,7 @@ def build_args_parser() -> argparse.ArgumentParser:
396396
parser.add_argument("-X", "--xnnpack", action="store_true")
397397
parser.add_argument("-V", "--vulkan", action="store_true")
398398
parser.add_argument("--mps", action="store_true")
399+
parser.add_argument("--coreml", action="store_true")
399400

400401
parser.add_argument(
401402
"--expand_rope_table",
@@ -576,6 +577,39 @@ def _export_llama(modelname, args) -> str: # noqa: C901
576577
partitioners[MPSPartitioner.__name__] = MPSPartitioner(compile_specs)
577578
modelname = f"mps_{modelname}"
578579

580+
if args.coreml:
581+
assert (
582+
args.use_kv_cache is True
583+
), "CoreML backend currently only supports static shape and use_kv_cache=True is the only way to support it at the moment"
584+
try:
585+
# pyre-ignore: Undefined import [21]: Could not find a module corresponding to import `executorch.backends.apple.coreml.partition.coreml_partitioner`.
586+
import coremltools as ct
587+
588+
# pyre-ignore: Undefined import [21]: Could not find a module corresponding to import `executorch.backends.apple.coreml.compiler`
589+
from executorch.backends.apple.coreml.compiler import CoreMLBackend
590+
591+
# pyre-ignore: Undefined import [21]: Could not find a module corresponding to import `executorch.backends.apple.coreml.partition.coreml_partitioner`
592+
from executorch.backends.apple.coreml.partition.coreml_partitioner import (
593+
CoreMLPartitioner,
594+
)
595+
except ImportError:
596+
raise ImportError(
597+
"Please install the CoreML backend follwing https://pytorch.org/executorch/main/build-run-coreml.html"
598+
)
599+
600+
# pyre-ignore: Undefined attribute [16]: Module `executorch.backends` has no attribute `apple`.
601+
compile_specs = CoreMLBackend.generate_compile_specs(
602+
compute_precision=ct.precision(ct.precision.FLOAT16.value),
603+
compute_unit=ct.ComputeUnit[ct.ComputeUnit.ALL.name.upper()],
604+
# pyre-ignore: Undefined attribute [16]: Module `executorch.backends` has no attribute `apple`
605+
model_type=CoreMLBackend.MODEL_TYPE.MODEL,
606+
)
607+
# pyre-ignore: Undefined attribute [16]: Module `executorch.backends` has no attribute `apple`
608+
partitioners[CoreMLPartitioner.__name__] = CoreMLPartitioner(
609+
skip_ops_for_coreml_delegation=None, compile_specs=compile_specs
610+
)
611+
modelname = f"coreml_{modelname}"
612+
579613
if args.generate_etrecord:
580614
if not builder_exported_to_edge.edge_manager:
581615
raise ValueError("Unable to generate etrecord due to missing edge manager.")

0 commit comments

Comments
 (0)