Skip to content

Android refactor cmake build #5204

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 29 commits into from
Sep 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
7271dff
try to merge jni
kirklandsign Aug 31, 2024
c81a8f1
new activity!
kirklandsign Aug 31, 2024
b406fbb
remove unused
kirklandsign Aug 31, 2024
11b3d6a
Remove API forwardOnes
kirklandsign Aug 31, 2024
08c7664
Merge remote-tracking branch 'origin/main' into experiments-jni
kirklandsign Sep 5, 2024
4b3437d
Merge remote-tracking branch 'origin/main' into experiments-jni
kirklandsign Sep 5, 2024
4259eb1
Remove managed_tensors
kirklandsign Sep 5, 2024
3516aae
[Android] Remove forwardOnes
kirklandsign Sep 6, 2024
0cc883e
fix build
kirklandsign Sep 6, 2024
5947dd5
Merge remote-tracking branch 'origin/main' into experiments-jni
kirklandsign Sep 6, 2024
7c369a3
copy qnn part
kirklandsign Sep 6, 2024
bab4d66
load method first
kirklandsign Sep 6, 2024
1d6a86e
Need to load method
kirklandsign Sep 6, 2024
38ae11c
linter
kirklandsign Sep 6, 2024
ff1fe3c
Merge branch 'android-api-change' into experiments-jni
kirklandsign Sep 6, 2024
7864c62
Merge remote-tracking branch 'origin/main' into experiments-jni
kirklandsign Sep 6, 2024
e384d07
add qnn
kirklandsign Sep 7, 2024
608e020
Merge remote-tracking branch 'origin/main' into experiments-jni
kirklandsign Sep 8, 2024
e688c2e
Merge branch 'experiments-jni' of github.com:kirklandsign/executorch …
kirklandsign Sep 8, 2024
be3abec
Merge remote-tracking branch 'origin/experiments-jni' into experiment…
kirklandsign Sep 8, 2024
2b1d2e7
Update Java Activity part
kirklandsign Sep 9, 2024
5152950
update cmake
kirklandsign Sep 9, 2024
19cec4e
add a way to build non llm
kirklandsign Sep 9, 2024
0265e6b
Remove reference libexecutorch_llama_jni.so
kirklandsign Sep 9, 2024
7163807
Remove app related stuff for now
kirklandsign Sep 9, 2024
232e746
linter
kirklandsign Sep 9, 2024
50920d5
linter
kirklandsign Sep 9, 2024
320164a
Merge branch 'experiments-jni' of github.com:pytorch/executorch into …
kirklandsign Sep 10, 2024
957d1f1
Link custom_ops
kirklandsign Sep 10, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 2 additions & 15 deletions build/build_android_llm_demo.sh
Original file line number Diff line number Diff line change
Expand Up @@ -54,27 +54,14 @@ build_android_native_library() {
fi
cmake --build "${CMAKE_OUT}" -j "${CMAKE_JOBS}" --target install --config Release

cmake examples/models/llama2 \
-DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake \
-DANDROID_ABI="$ANDROID_ABI" \
-DANDROID_PLATFORM=android-23 \
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
-DEXECUTORCH_BUILD_XNNPACK=ON \
-DCMAKE_BUILD_TYPE=Release \
-B"${CMAKE_OUT}"/examples/models/llama2

cmake --build "${CMAKE_OUT}"/examples/models/llama2 -j "${CMAKE_JOBS}" --config Release


cmake extension/android \
-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \
-DANDROID_ABI="${ANDROID_ABI}" \
-DANDROID_PLATFORM=android-23 \
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
-DEXECUTORCH_ENABLE_LOGGING=ON \
-DEXECUTORCH_LOG_LEVEL=Info \
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
-DEXECUTORCH_BUILD_LLAMA_JNI=ON \
-DCMAKE_BUILD_TYPE=Release \
-B"${CMAKE_OUT}"/extension/android
Expand Down Expand Up @@ -110,7 +97,7 @@ build_aar() {
find jni -type f -name "libexecutorch_jni.so" -exec bash -c 'mv "$1" "${1/_jni/}"' bash {} \;
# Zip all necessary files into the AAR file
zip -r executorch.aar libs jni/*/libexecutorch.so jni/*/libqnn*.so jni/*/libQnn*.so AndroidManifest.xml
zip -r executorch-llama.aar libs jni/*/libexecutorch_llama_jni.so jni/*/libqnn*.so jni/*/libQnn*.so AndroidManifest.xml
zip -r executorch-llama.aar libs jni/*/libexecutorch.so jni/*/libqnn*.so jni/*/libQnn*.so AndroidManifest.xml
popd
}

Expand Down
4 changes: 3 additions & 1 deletion examples/demo-apps/android/LlamaDemo/setup-with-qnn.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ cmake examples/models/llama2 \
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
-DEXECUTORCH_USE_TIKTOKEN="${EXECUTORCH_USE_TIKTOKEN}" \
-DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
-DCMAKE_BUILD_TYPE=Release \
-B"${CMAKE_OUT}"/examples/models/llama2

Expand All @@ -47,6 +48,7 @@ cmake extension/android \
-DANDROID_ABI="${ANDROID_ABI}" \
-DCMAKE_INSTALL_PREFIX="${CMAKE_OUT}" \
-DEXECUTORCH_BUILD_LLAMA_JNI=ON \
-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
-DEXECUTORCH_USE_TIKTOKEN="${EXECUTORCH_USE_TIKTOKEN}" \
-DCMAKE_BUILD_TYPE=Release \
-B"${CMAKE_OUT}"/extension/android
Expand All @@ -59,7 +61,7 @@ mkdir -p "${JNI_LIBS_PATH}/${ANDROID_ABI}"
BUILD_AAR_DIR="$(mktemp -d)"
mkdir -p "${BUILD_AAR_DIR}/jni/${ANDROID_ABI}" "${BUILD_AAR_DIR}/libs"
JNI_LIBS_PATH="${BUILD_AAR_DIR}/jni"
cp "${CMAKE_OUT}"/extension/android/libexecutorch_llama_jni.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/"
cp "${CMAKE_OUT}"/extension/android/libexecutorch_jni.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/libexecutorch_jni.so"
cp "${CMAKE_OUT}"/lib/libqnn_executorch_backend.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/"
cp "${QNN_SDK_ROOT}"/lib/aarch64-android/libQnnHtp.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/"
cp "${QNN_SDK_ROOT}"/lib/aarch64-android/libQnnSystem.so "${JNI_LIBS_PATH}/${ANDROID_ABI}/"
Expand Down
2 changes: 1 addition & 1 deletion examples/demo-apps/android/LlamaDemo/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ cmake --build "${CMAKE_OUT}"/extension/android -j "${CMAKE_JOBS}" --config Relea

BUILD_AAR_DIR="$(mktemp -d)"
mkdir -p "${BUILD_AAR_DIR}/jni/${ANDROID_ABI}" "${BUILD_AAR_DIR}/libs"
cp "${CMAKE_OUT}"/extension/android/libexecutorch_llama_jni.so "${BUILD_AAR_DIR}/jni/${ANDROID_ABI}"
cp "${CMAKE_OUT}"/extension/android/libexecutorch_jni.so "${BUILD_AAR_DIR}/jni/${ANDROID_ABI}/libexecutorch.so"
cp extension/android/build/libs/executorch.jar "${BUILD_AAR_DIR}/libs"
echo \<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" \
package=\"org.pytorch.executorch\"\> \
Expand Down
99 changes: 37 additions & 62 deletions extension/android/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ project(executorch_jni)

if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17)
# Can't set to 11 due to executor_runner.cpp make_unique
endif()

if(NOT ANDROID)
Expand Down Expand Up @@ -71,78 +70,54 @@ if(TARGET vulkan_backend)
list(APPEND link_libraries vulkan_backend)
endif()

if(EXECUTORCH_BUILD_KERNELS_CUSTOM)
add_subdirectory(
${EXECUTORCH_ROOT}/extension/llm/custom_ops
${CMAKE_CURRENT_BINARY_DIR}/../../extension/llm/custom_ops
)
list(APPEND link_libraries custom_ops)
target_link_options_shared_lib(custom_ops)
endif()

add_library(executorch_jni SHARED jni/jni_layer.cpp)
target_link_libraries(executorch_jni ${link_libraries})
target_include_directories(
executorch_jni PRIVATE ${_common_include_directories}
)
target_compile_options(executorch_jni PUBLIC ${_common_compile_options})

if(EXECUTORCH_BUILD_LLAMA_JNI)
set(LLAMA_RUNNER_PATH
${CMAKE_CURRENT_BINARY_DIR}/../../examples/models/llama2/runner/libllama_runner.a
)
add_library(llama_runner STATIC IMPORTED)
set_property(
TARGET llama_runner PROPERTY IMPORTED_LOCATION ${LLAMA_RUNNER_PATH}
)

target_sources(executorch_jni PRIVATE jni/jni_layer_llama.cpp)
list(APPEND link_libraries llama_runner llava_runner)
target_compile_definitions(executorch_jni PUBLIC EXECUTORCH_BUILD_LLAMA_JNI=1)
add_subdirectory(
${EXECUTORCH_ROOT}/examples/models/llava/runner
${CMAKE_CURRENT_BINARY_DIR}/../../examples/models/llava/runner
)

set(CUSTOM_OPS_PATH
${CMAKE_CURRENT_BINARY_DIR}/../../extension/llm/custom_ops/libcustom_ops.a
add_subdirectory(
${EXECUTORCH_ROOT}/examples/models/llama2/runner
${CMAKE_CURRENT_BINARY_DIR}/../../examples/models/llama2/runner
)
add_library(custom_ops STATIC IMPORTED)
set_property(TARGET custom_ops PROPERTY IMPORTED_LOCATION ${CUSTOM_OPS_PATH})
target_link_options_shared_lib(custom_ops)
endif()

target_link_options_shared_lib(quantized_ops_lib)

set(LLAMA_JNI_SRCS jni/jni_layer_llama.cpp)
add_library(executorch_llama_jni SHARED ${LLAMA_JNI_SRCS})
if(TARGET pthreadpool)
target_compile_definitions(executorch_llama_jni PRIVATE ET_USE_THREADPOOL=1)
target_include_directories(
executorch_llama_jni
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/../../backends/xnnpack/third-party/cpuinfo/include
)
target_include_directories(
executorch_llama_jni
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/../../backends/xnnpack/third-party/pthreadpool/include
)
endif()
if(TARGET quantized_kernels)
list(APPEND link_libraries quantized_kernels quantized_ops_lib)
endif()

target_include_directories(
executorch_jni PRIVATE ${_common_include_directories}
)

target_compile_options(executorch_jni PUBLIC ${_common_compile_options})

target_link_libraries(executorch_jni ${link_libraries})

if(TARGET pthreadpool)
target_compile_definitions(executorch_jni PRIVATE ET_USE_THREADPOOL=1)
target_include_directories(
executorch_llama_jni PRIVATE ${_common_include_directories}
)
target_link_libraries(
executorch_llama_jni
${link_libraries}
llama_runner
llava_runner
custom_ops
cpublas
eigen_blas
quantized_kernels
quantized_ops_lib
executorch_jni
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/../../backends/xnnpack/third-party/cpuinfo/include
)
target_compile_options(executorch_llama_jni PUBLIC ${_common_compile_options})
# link re2
set(ABSL_ENABLE_INSTALL ON)
set(_pic_flag ${CMAKE_POSITION_INDEPENDENT_CODE})
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
add_subdirectory(
${CMAKE_CURRENT_SOURCE_DIR}/../../extension/llm/third-party/abseil-cpp
${CMAKE_CURRENT_BINARY_DIR}/abseil-cpp
)
add_subdirectory(
${CMAKE_CURRENT_SOURCE_DIR}/../../extension/llm/third-party/re2
${CMAKE_CURRENT_BINARY_DIR}/re2
target_include_directories(
executorch_jni
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/../../backends/xnnpack/third-party/pthreadpool/include
)
set(CMAKE_POSITION_INDEPENDENT_CODE ${_pic_flag})
target_link_libraries(executorch_llama_jni re2::re2)
endif()
2 changes: 1 addition & 1 deletion extension/android/jni/BUCK
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ fb_android_cxx_library(
"-fexceptions",
"-Wno-format",
],
soname = "libexecutorch_llama_jni.$(ext)",
soname = "libexecutorch.$(ext)",
visibility = ["PUBLIC"],
deps = [
"//fbandroid/libraries/fbjni:fbjni",
Expand Down
12 changes: 10 additions & 2 deletions extension/android/jni/jni_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,15 @@ class ExecuTorchJni : public facebook::jni::HybridClass<ExecuTorchJni> {
};
} // namespace executorch::extension

#ifdef EXECUTORCH_BUILD_LLAMA_JNI
extern void register_natives_for_llama();
#else
// No op if we don't build llama
void register_natives_for_llama() {}
#endif
JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void*) {
return facebook::jni::initialize(
vm, [] { executorch::extension::ExecuTorchJni::registerNatives(); });
return facebook::jni::initialize(vm, [] {
executorch::extension::ExecuTorchJni::registerNatives();
register_natives_for_llama();
});
}
32 changes: 2 additions & 30 deletions extension/android/jni/jni_layer_llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,33 +30,6 @@
#include <fbjni/ByteBuffer.h>
#include <fbjni/fbjni.h>

#ifdef __ANDROID__
#include <android/log.h>

// For Android, write to logcat
void et_pal_emit_log_message(
et_timestamp_t timestamp,
et_pal_log_level_t level,
const char* filename,
const char* function,
size_t line,
const char* message,
size_t length) {
int android_log_level = ANDROID_LOG_UNKNOWN;
if (level == 'D') {
android_log_level = ANDROID_LOG_DEBUG;
} else if (level == 'I') {
android_log_level = ANDROID_LOG_INFO;
} else if (level == 'E') {
android_log_level = ANDROID_LOG_ERROR;
} else if (level == 'F') {
android_log_level = ANDROID_LOG_FATAL;
}

__android_log_print(android_log_level, "LLAMA", "%s", message);
}
#endif

using namespace torch::executor;

namespace executorch_jni {
Expand Down Expand Up @@ -291,7 +264,6 @@ class ExecuTorchLlamaJni

} // namespace executorch_jni

JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void*) {
return facebook::jni::initialize(
vm, [] { executorch_jni::ExecuTorchLlamaJni::registerNatives(); });
void register_natives_for_llama() {
executorch_jni::ExecuTorchLlamaJni::registerNatives();
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ public class LlamaModule {
if (!NativeLoader.isInitialized()) {
NativeLoader.init(new SystemDelegate());
}
NativeLoader.loadLibrary("executorch_llama_jni");
NativeLoader.loadLibrary("executorch");
}

private final HybridData mHybridData;
Expand Down
Loading