Skip to content

Commit 5e830c8

Browse files
authored
Merge branch 'main' into export-D69600543
2 parents 2090a46 + fc5a492 commit 5e830c8

File tree

15 files changed

+71
-24
lines changed

15 files changed

+71
-24
lines changed

.ci/scripts/setup-linux.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ fi
2222
# have already been installed, so we use PyTorch build from source here instead
2323
# of nightly. This allows CI to test against latest commits from PyTorch
2424
install_executorch "use-pt-pinned-commit"
25-
build_executorch_runner "${BUILD_TOOL}"
25+
build_executorch_runner "${BUILD_TOOL}" "${2:-Release}"
2626

2727
if [[ "${GITHUB_BASE_REF:-}" == *main* || "${GITHUB_BASE_REF:-}" == *gh* ]]; then
2828
do_not_use_nightly_on_ci

.ci/scripts/setup-macos.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ install_pytorch_and_domains
136136
# We build PyTorch from source here instead of using nightly. This allows CI to test against
137137
# the pinned commit from PyTorch
138138
install_executorch "use-pt-pinned-commit"
139-
build_executorch_runner "${BUILD_TOOL}"
139+
build_executorch_runner "${BUILD_TOOL}" "${2:-Release}"
140140

141141
if [[ "${GITHUB_BASE_REF:-}" == *main* || "${GITHUB_BASE_REF:-}" == *gh* ]]; then
142142
do_not_use_nightly_on_ci

.ci/scripts/unittest-linux.sh

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,14 @@ else
1414
exit 1
1515
fi
1616

17+
BUILD_MODE=$2
18+
if [[ "${BUILD_MODE:-}" =~ ^(Debug|Release)$ ]]; then
19+
echo "Running tests in build mode ${BUILD_MODE} ..."
20+
else
21+
echo "Unsupported build mode ${BUILD_MODE}, options are Debug or Release."
22+
exit 1
23+
fi
24+
1725
# The generic Linux job chooses to use base env, not the one setup by the image
1826
eval "$(conda shell.bash hook)"
1927
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
@@ -25,7 +33,7 @@ source .ci/scripts/setup-vulkan-linux-deps.sh
2533
PYTHON_EXECUTABLE=python \
2634
EXECUTORCH_BUILD_PYBIND=ON \
2735
CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
28-
.ci/scripts/setup-linux.sh "$BUILD_TOOL"
36+
.ci/scripts/setup-linux.sh "$BUILD_TOOL" "$BUILD_MODE"
2937

3038
# Install llama3_2_vision dependencies.
3139
PYTHON_EXECUTABLE=python ./examples/models/llama3_2_vision/install_requirements.sh

.ci/scripts/unittest-macos.sh

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,14 @@ else
1414
exit 1
1515
fi
1616

17+
BUILD_MODE=$2
18+
if [[ $BUILD_MODE =~ ^(Debug|Release)$ ]]; then
19+
echo "Running tests in build mode ${BUILD_MODE} ..."
20+
else
21+
echo "Unsupported build mode ${BUILD_MODE}, options are Debug or Release."
22+
exit 1
23+
fi
24+
1725
bash .ci/scripts/setup-conda.sh
1826
eval "$(conda shell.bash hook)"
1927

@@ -27,7 +35,7 @@ PYTHON_EXECUTABLE=python \
2735
EXECUTORCH_BUILD_PYBIND=ON \
2836
CMAKE_ARGS="-DEXECUTORCH_BUILD_COREML=ON -DEXECUTORCH_BUILD_MPS=ON -DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" \
2937
${CONDA_RUN} --no-capture-output \
30-
.ci/scripts/setup-macos.sh cmake
38+
.ci/scripts/setup-macos.sh "${BUILD_TOOL}" "${BUILD_MODE}"
3139

3240
# Install llama3_2_vision dependencies.
3341
PYTHON_EXECUTABLE=python ./examples/models/llama3_2_vision/install_requirements.sh

.ci/scripts/utils.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ build_executorch_runner_cmake() {
109109
pushd "${CMAKE_OUTPUT_DIR}" || return
110110
# This command uses buck2 to gather source files and buck2 could crash flakily
111111
# on MacOS
112-
retry cmake -DPYTHON_EXECUTABLE="${PYTHON_EXECUTABLE}" -DCMAKE_BUILD_TYPE=Release ..
112+
retry cmake -DPYTHON_EXECUTABLE="${PYTHON_EXECUTABLE}" -DCMAKE_BUILD_TYPE="${1:-Release}" ..
113113
popd || return
114114

115115
if [ "$(uname)" == "Darwin" ]; then
@@ -124,7 +124,7 @@ build_executorch_runner() {
124124
if [[ $1 == "buck2" ]]; then
125125
build_executorch_runner_buck2
126126
elif [[ $1 == "cmake" ]]; then
127-
build_executorch_runner_cmake
127+
build_executorch_runner_cmake "$2"
128128
else
129129
echo "Invalid build tool $1. Only buck2 and cmake are supported atm"
130130
exit 1

.github/scripts/extract_benchmark_results.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -229,21 +229,22 @@ def extract_ios_metric(
229229

230230
elif method == "forward":
231231
if metric_name == "Clock Monotonic Time, s":
232-
benchmark_result["metric"] = (
233-
"generate_time(ms)"
234-
if "llama" in test_name
235-
else "avg_inference_latency(ms)"
236-
)
232+
benchmark_result["metric"] = "avg_inference_latency(ms)"
237233
benchmark_result["actualValue"] = metric_value * 1000
238234

239235
elif metric_name == "Memory Peak Physical, kB":
240236
# NB: Showing the value in mB is friendlier IMO
241237
benchmark_result["metric"] = "peak_inference_mem_usage(mb)"
242238
benchmark_result["actualValue"] = metric_value / 1024
243239

244-
elif method == "generate" and metric_name == "Tokens Per Second, t/s":
245-
benchmark_result["metric"] = "token_per_sec"
246-
benchmark_result["actualValue"] = metric_value
240+
elif method == "generate":
241+
if metric_name == "Clock Monotonic Time, s":
242+
benchmark_result["metric"] = "generate_time(ms)"
243+
benchmark_result["actualValue"] = metric_value * 1000
244+
245+
elif metric_name == "Tokens Per Second, t/s":
246+
benchmark_result["metric"] = "token_per_sec"
247+
benchmark_result["actualValue"] = metric_value
247248

248249
return benchmark_result
249250

.github/workflows/_unittest.yml

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,10 @@ on:
77
required: true
88
type: string
99
description: Name of the docker image to use.
10+
build-mode:
11+
required: true
12+
type: string
13+
description: Build mode to use, Debug or Release.
1014
build-tool:
1115
required: true
1216
type: string
@@ -30,7 +34,7 @@ jobs:
3034
timeout: 90
3135
script: |
3236
set -eux
33-
.ci/scripts/unittest-linux.sh "${{ inputs.build-tool }}"
37+
.ci/scripts/unittest-linux.sh "${{ inputs.build-tool }}" "${{ inputs.build-mode }}"
3438
3539
macos:
3640
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
@@ -41,4 +45,4 @@ jobs:
4145
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
4246
script: |
4347
set -eux
44-
.ci/scripts/unittest-macos.sh "${{ inputs.build-tool }}"
48+
.ci/scripts/unittest-macos.sh "${{ inputs.build-tool }}" "${{ inputs.build-mode }}"

.github/workflows/pull.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -367,6 +367,7 @@ jobs:
367367
id-token: write
368368
contents: read
369369
with:
370+
build-mode: Debug
370371
build-tool: cmake
371372
docker-image: executorch-ubuntu-22.04-clang12
372373

@@ -376,6 +377,7 @@ jobs:
376377
id-token: write
377378
contents: read
378379
with:
380+
build-mode: Debug
379381
build-tool: buck2
380382
docker-image: executorch-ubuntu-22.04-clang12
381383

.github/workflows/trunk.yml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -489,3 +489,13 @@ jobs:
489489
PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh
490490
# Test llama2
491491
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh -model stories110M -build_tool "${BUILD_TOOL}" -mode "${MODE}" -dtype "${DTYPE}" -pt2e_quantize "${PT2E_QUANTIZE}"
492+
493+
unittest-release:
494+
uses: ./.github/workflows/_unittest.yml
495+
permissions:
496+
id-token: write
497+
contents: read
498+
with:
499+
build-mode: Release
500+
build-tool: cmake
501+
docker-image: executorch-ubuntu-22.04-clang12

examples/models/llama/TARGETS

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -95,11 +95,8 @@ runtime.command_alias(
9595
)
9696

9797
runtime.python_library(
98-
name = "export_library",
98+
name = "source_transformation",
9999
srcs = [
100-
"export_llama.py",
101-
"export_llama_lib.py",
102-
"model.py",
103100
"source_transformation/apply_spin_quant_r1_r2.py",
104101
"source_transformation/attention.py",
105102
"source_transformation/lora.py",
@@ -114,6 +111,15 @@ runtime.python_library(
114111
"source_transformation/vulkan_rope.py",
115112
"source_transformation/attention_sink.py",
116113
],
114+
)
115+
116+
runtime.python_library(
117+
name = "export_library",
118+
srcs = [
119+
"export_llama.py",
120+
"export_llama_lib.py",
121+
"model.py",
122+
],
117123
_is_external_target = True,
118124
base_module = "executorch.examples.models.llama",
119125
visibility = [
@@ -123,6 +129,7 @@ runtime.python_library(
123129
"@EXECUTORCH_CLIENTS",
124130
],
125131
deps = [
132+
":source_transformation",
126133
"//ai_codesign/gen_ai/fast_hadamard_transform:fast_hadamard_transform",
127134
"//caffe2:torch",
128135
"//executorch/backends/vulkan/_passes:vulkan_passes",

extension/benchmark/apple/Benchmark/Tests/LLaMA/LLaMATests.mm

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ @implementation LLaMATests
8282
return;
8383
}
8484
TokensPerSecondMetric *tokensPerSecondMetric = [TokensPerSecondMetric new];
85-
[testCase measureWithMetrics:@[ tokensPerSecondMetric, [XCTMemoryMetric new] ]
85+
[testCase measureWithMetrics:@[ tokensPerSecondMetric, [XCTClockMetric new], [XCTMemoryMetric new] ]
8686
block:^{
8787
tokensPerSecondMetric.tokenCount = 0;
8888
const auto status = runner->generate(

runtime/core/result.h

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,13 @@ class Result final {
5959
* a non-Ok value.
6060
*/
6161
/* implicit */ Result(Error error)
62-
: error_(error == Error::Ok ? Error::Internal : error),
63-
hasValue_(false) {}
62+
: error_(error == Error::Ok ? Error::Internal : error), hasValue_(false) {
63+
if ET_UNLIKELY (error == Error::Ok) {
64+
ET_LOG(
65+
Debug,
66+
"Attempted to create Result from Error::Ok, this has been converted to Error::Internal.");
67+
}
68+
}
6469

6570
/// Value copy constructor.
6671
/* implicit */ Result(const T& val) : value_(val), hasValue_(true) {}

runtime/core/test/error_handling_test.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ TEST(ErrorHandlingTest, ResultBasic) {
110110
}
111111

112112
TEST(ErrorHandlingTest, OkErrorNotPossible) {
113+
executorch::runtime::runtime_init();
113114
Result<uint32_t> r(Error::Ok);
114115
ASSERT_FALSE(r.ok());
115116
ASSERT_NE(r.error(), Error::Ok);

shim/xplat/executorch/build/runtime_wrapper.bzl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ def _patch_kwargs_common(kwargs):
171171
# don't pick up unexpected clients while things are still in flux.
172172
if not kwargs.pop("_is_external_target", False):
173173
for target in kwargs.get("visibility", []):
174-
if not (target.startswith("//executorch") or target.startswith("@")):
174+
if not (target.startswith("//executorch") or target.startswith("//pytorch/tokenizers") or target.startswith("@")):
175175
fail("Please manage all external visibility using the " +
176176
"EXECUTORCH_CLIENTS list in " +
177177
"//executorch/build/fb/clients.bzl. " +

test/utils/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ def define_common_targets():
2121
],
2222
visibility = [
2323
"//executorch/...",
24+
"//pytorch/tokenizers/...",
2425
"@EXECUTORCH_CLIENTS",
2526
],
2627
deps = [

0 commit comments

Comments
 (0)