Skip to content

Commit d38ca81

Browse files
authored
Android refactor cmake build
Differential Revision: D62408596 Pull Request resolved: #5204
1 parent c76b22f commit d38ca81

File tree

8 files changed

+57
-113
lines changed

8 files changed

+57
-113
lines changed

build/build_android_llm_demo.sh

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -54,27 +54,14 @@ build_android_native_library() {
5454
fi
5555
cmake --build "${CMAKE_OUT}" -j "${CMAKE_JOBS}" --target install --config Release
5656

57-
cmake examples/models/llama2 \
58-
-DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake \
59-
-DANDROID_ABI="$ANDROID_ABI" \
60-
-DANDROID_PLATFORM=android-23 \
61-
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
62-
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
63-
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
64-
-DEXECUTORCH_BUILD_XNNPACK=ON \
65-
-DCMAKE_BUILD_TYPE=Release \
66-
-B"${CMAKE_OUT}"/examples/models/llama2
67-
68-
cmake --build "${CMAKE_OUT}"/examples/models/llama2 -j "${CMAKE_JOBS}" --config Release
69-
70-
7157
cmake extension/android \
7258
-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \
7359
-DANDROID_ABI="${ANDROID_ABI}" \
7460
-DANDROID_PLATFORM=android-23 \
7561
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
7662
-DEXECUTORCH_ENABLE_LOGGING=ON \
7763
-DEXECUTORCH_LOG_LEVEL=Info \
64+
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
7865
-DEXECUTORCH_BUILD_LLAMA_JNI=ON \
7966
-DCMAKE_BUILD_TYPE=Release \
8067
-B"${CMAKE_OUT}"/extension/android
@@ -110,7 +97,7 @@ build_aar() {
11097
find jni -type f -name "libexecutorch_jni.so" -exec bash -c 'mv "$1" "${1/_jni/}"' bash {} \;
11198
# Zip all necessary files into the AAR file
11299
zip -r executorch.aar libs jni/*/libexecutorch.so jni/*/libqnn*.so jni/*/libQnn*.so AndroidManifest.xml
113-
zip -r executorch-llama.aar libs jni/*/libexecutorch_llama_jni.so jni/*/libqnn*.so jni/*/libQnn*.so AndroidManifest.xml
100+
zip -r executorch-llama.aar libs jni/*/libexecutorch.so jni/*/libqnn*.so jni/*/libQnn*.so AndroidManifest.xml
114101
popd
115102
}
116103

examples/demo-apps/android/LlamaDemo/setup-with-qnn.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ cmake examples/models/llama2 \
3737
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
3838
-DEXECUTORCH_USE_TIKTOKEN="${EXECUTORCH_USE_TIKTOKEN}" \
3939
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
40+
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
4041
-DCMAKE_BUILD_TYPE=Release \
4142
-B"${CMAKE_OUT}"/examples/models/llama2
4243

@@ -47,6 +48,7 @@ cmake extension/android \
4748
-DANDROID_ABI="${ANDROID_ABI}" \
4849
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
4950
-DEXECUTORCH_BUILD_LLAMA_JNI=ON \
51+
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
5052
-DEXECUTORCH_USE_TIKTOKEN="${EXECUTORCH_USE_TIKTOKEN}" \
5153
-DCMAKE_BUILD_TYPE=Release \
5254
-B"${CMAKE_OUT}"/extension/android
@@ -59,7 +61,7 @@ mkdir -p "${JNI_LIBS_PATH}/${ANDROID_ABI}"
5961
BUILD_AAR_DIR="$(mktemp -d)"
6062
mkdir -p "${BUILD_AAR_DIR}/jni/${ANDROID_ABI}" "${BUILD_AAR_DIR}/libs"
6163
JNI_LIBS_PATH="${BUILD_AAR_DIR}/jni"
62-
cp "${CMAKE_OUT}"/extension/android/libexecutorch_llama_jni.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/"
64+
cp "${CMAKE_OUT}"/extension/android/libexecutorch_jni.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/libexecutorch_jni.so"
6365
cp "${CMAKE_OUT}"/lib/libqnn_executorch_backend.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/"
6466
cp "${QNN_SDK_ROOT}"/lib/aarch64-android/libQnnHtp.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/"
6567
cp "${QNN_SDK_ROOT}"/lib/aarch64-android/libQnnSystem.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/"

examples/demo-apps/android/LlamaDemo/setup.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ cmake --build "${CMAKE_OUT}"/extension/android -j "${CMAKE_JOBS}" --config Relea
5656

5757
BUILD_AAR_DIR="$(mktemp -d)"
5858
mkdir -p "${BUILD_AAR_DIR}/jni/${ANDROID_ABI}" "${BUILD_AAR_DIR}/libs"
59-
cp "${CMAKE_OUT}"/extension/android/libexecutorch_llama_jni.so "${BUILD_AAR_DIR}/jni/${ANDROID_ABI}"
59+
cp "${CMAKE_OUT}"/extension/android/libexecutorch_jni.so "${BUILD_AAR_DIR}/jni/${ANDROID_ABI}/libexecutorch.so"
6060
cp extension/android/build/libs/executorch.jar "${BUILD_AAR_DIR}/libs"
6161
echo \<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" \
6262
package=\"org.pytorch.executorch\"\> \

extension/android/CMakeLists.txt

Lines changed: 37 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ project(executorch_jni)
1010

1111
if(NOT CMAKE_CXX_STANDARD)
1212
set(CMAKE_CXX_STANDARD 17)
13-
# Can't set to 11 due to executor_runner.cpp make_unique
1413
endif()
1514

1615
if(NOT ANDROID)
@@ -71,78 +70,54 @@ if(TARGET vulkan_backend)
7170
list(APPEND link_libraries vulkan_backend)
7271
endif()
7372

73+
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
74+
add_subdirectory(
75+
${EXECUTORCH_ROOT}/extension/llm/custom_ops
76+
${CMAKE_CURRENT_BINARY_DIR}/../../extension/llm/custom_ops
77+
)
78+
list(APPEND link_libraries custom_ops)
79+
target_link_options_shared_lib(custom_ops)
80+
endif()
81+
7482
add_library(executorch_jni SHARED jni/jni_layer.cpp)
75-
target_link_libraries(executorch_jni ${link_libraries})
76-
target_include_directories(
77-
executorch_jni PRIVATE ${_common_include_directories}
78-
)
79-
target_compile_options(executorch_jni PUBLIC ${_common_compile_options})
8083

8184
if(EXECUTORCH_BUILD_LLAMA_JNI)
82-
set(LLAMA_RUNNER_PATH
83-
${CMAKE_CURRENT_BINARY_DIR}/../../examples/models/llama2/runner/libllama_runner.a
84-
)
85-
add_library(llama_runner STATIC IMPORTED)
86-
set_property(
87-
TARGET llama_runner PROPERTY IMPORTED_LOCATION ${LLAMA_RUNNER_PATH}
88-
)
89-
85+
target_sources(executorch_jni PRIVATE jni/jni_layer_llama.cpp)
86+
list(APPEND link_libraries llama_runner llava_runner)
87+
target_compile_definitions(executorch_jni PUBLIC EXECUTORCH_BUILD_LLAMA_JNI=1)
9088
add_subdirectory(
9189
${EXECUTORCH_ROOT}/examples/models/llava/runner
9290
${CMAKE_CURRENT_BINARY_DIR}/../../examples/models/llava/runner
9391
)
9492

95-
set(CUSTOM_OPS_PATH
96-
${CMAKE_CURRENT_BINARY_DIR}/../../extension/llm/custom_ops/libcustom_ops.a
93+
add_subdirectory(
94+
${EXECUTORCH_ROOT}/examples/models/llama2/runner
95+
${CMAKE_CURRENT_BINARY_DIR}/../../examples/models/llama2/runner
9796
)
98-
add_library(custom_ops STATIC IMPORTED)
99-
set_property(TARGET custom_ops PROPERTY IMPORTED_LOCATION ${CUSTOM_OPS_PATH})
100-
target_link_options_shared_lib(custom_ops)
97+
endif()
10198

102-
target_link_options_shared_lib(quantized_ops_lib)
103-
104-
set(LLAMA_JNI_SRCS jni/jni_layer_llama.cpp)
105-
add_library(executorch_llama_jni SHARED ${LLAMA_JNI_SRCS})
106-
if(TARGET pthreadpool)
107-
target_compile_definitions(executorch_llama_jni PRIVATE ET_USE_THREADPOOL=1)
108-
target_include_directories(
109-
executorch_llama_jni
110-
PUBLIC
111-
${CMAKE_CURRENT_SOURCE_DIR}/../../backends/xnnpack/third-party/cpuinfo/include
112-
)
113-
target_include_directories(
114-
executorch_llama_jni
115-
PUBLIC
116-
${CMAKE_CURRENT_SOURCE_DIR}/../../backends/xnnpack/third-party/pthreadpool/include
117-
)
118-
endif()
99+
if(TARGET quantized_kernels)
100+
list(APPEND link_libraries quantized_kernels quantized_ops_lib)
101+
endif()
102+
103+
target_include_directories(
104+
executorch_jni PRIVATE ${_common_include_directories}
105+
)
106+
107+
target_compile_options(executorch_jni PUBLIC ${_common_compile_options})
108+
109+
target_link_libraries(executorch_jni ${link_libraries})
110+
111+
if(TARGET pthreadpool)
112+
target_compile_definitions(executorch_jni PRIVATE ET_USE_THREADPOOL=1)
119113
target_include_directories(
120-
executorch_llama_jni PRIVATE ${_common_include_directories}
121-
)
122-
target_link_libraries(
123-
executorch_llama_jni
124-
${link_libraries}
125-
llama_runner
126-
llava_runner
127-
custom_ops
128-
cpublas
129-
eigen_blas
130-
quantized_kernels
131-
quantized_ops_lib
114+
executorch_jni
115+
PUBLIC
116+
${CMAKE_CURRENT_SOURCE_DIR}/../../backends/xnnpack/third-party/cpuinfo/include
132117
)
133-
target_compile_options(executorch_llama_jni PUBLIC ${_common_compile_options})
134-
# link re2
135-
set(ABSL_ENABLE_INSTALL ON)
136-
set(_pic_flag ${CMAKE_POSITION_INDEPENDENT_CODE})
137-
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
138-
add_subdirectory(
139-
${CMAKE_CURRENT_SOURCE_DIR}/../../extension/llm/third-party/abseil-cpp
140-
${CMAKE_CURRENT_BINARY_DIR}/abseil-cpp
141-
)
142-
add_subdirectory(
143-
${CMAKE_CURRENT_SOURCE_DIR}/../../extension/llm/third-party/re2
144-
${CMAKE_CURRENT_BINARY_DIR}/re2
118+
target_include_directories(
119+
executorch_jni
120+
PUBLIC
121+
${CMAKE_CURRENT_SOURCE_DIR}/../../backends/xnnpack/third-party/pthreadpool/include
145122
)
146-
set(CMAKE_POSITION_INDEPENDENT_CODE ${_pic_flag})
147-
target_link_libraries(executorch_llama_jni re2::re2)
148123
endif()

extension/android/jni/BUCK

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ fb_android_cxx_library(
7777
"-fexceptions",
7878
"-Wno-format",
7979
],
80-
soname = "libexecutorch_llama_jni.$(ext)",
80+
soname = "libexecutorch.$(ext)",
8181
visibility = ["PUBLIC"],
8282
deps = [
8383
"//fbandroid/libraries/fbjni:fbjni",

extension/android/jni/jni_layer.cpp

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,15 @@ class ExecuTorchJni : public facebook::jni::HybridClass<ExecuTorchJni> {
386386
};
387387
} // namespace executorch::extension
388388

389+
#ifdef EXECUTORCH_BUILD_LLAMA_JNI
390+
extern void register_natives_for_llama();
391+
#else
392+
// No op if we don't build llama
393+
void register_natives_for_llama() {}
394+
#endif
389395
JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void*) {
390-
return facebook::jni::initialize(
391-
vm, [] { executorch::extension::ExecuTorchJni::registerNatives(); });
396+
return facebook::jni::initialize(vm, [] {
397+
executorch::extension::ExecuTorchJni::registerNatives();
398+
register_natives_for_llama();
399+
});
392400
}

extension/android/jni/jni_layer_llama.cpp

Lines changed: 2 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -30,33 +30,6 @@
3030
#include <fbjni/ByteBuffer.h>
3131
#include <fbjni/fbjni.h>
3232

33-
#ifdef __ANDROID__
34-
#include <android/log.h>
35-
36-
// For Android, write to logcat
37-
void et_pal_emit_log_message(
38-
et_timestamp_t timestamp,
39-
et_pal_log_level_t level,
40-
const char* filename,
41-
const char* function,
42-
size_t line,
43-
const char* message,
44-
size_t length) {
45-
int android_log_level = ANDROID_LOG_UNKNOWN;
46-
if (level == 'D') {
47-
android_log_level = ANDROID_LOG_DEBUG;
48-
} else if (level == 'I') {
49-
android_log_level = ANDROID_LOG_INFO;
50-
} else if (level == 'E') {
51-
android_log_level = ANDROID_LOG_ERROR;
52-
} else if (level == 'F') {
53-
android_log_level = ANDROID_LOG_FATAL;
54-
}
55-
56-
__android_log_print(android_log_level, "LLAMA", "%s", message);
57-
}
58-
#endif
59-
6033
using namespace torch::executor;
6134

6235
namespace executorch_jni {
@@ -300,7 +273,6 @@ class ExecuTorchLlamaJni
300273

301274
} // namespace executorch_jni
302275

303-
JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void*) {
304-
return facebook::jni::initialize(
305-
vm, [] { executorch_jni::ExecuTorchLlamaJni::registerNatives(); });
276+
void register_natives_for_llama() {
277+
executorch_jni::ExecuTorchLlamaJni::registerNatives();
306278
}

extension/android/src/main/java/org/pytorch/executorch/LlamaModule.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ public class LlamaModule {
2828
if (!NativeLoader.isInitialized()) {
2929
NativeLoader.init(new SystemDelegate());
3030
}
31-
NativeLoader.loadLibrary("executorch_llama_jni");
31+
NativeLoader.loadLibrary("executorch");
3232
}
3333

3434
private final HybridData mHybridData;

0 commit comments

Comments
 (0)