File tree Expand file tree Collapse file tree 2 files changed +6
-1
lines changed
examples/demo-apps/android/LlamaDemo Expand file tree Collapse file tree 2 files changed +6
-1
lines changed Original file line number Diff line number Diff line change @@ -34,6 +34,9 @@ cmake examples/models/llama2 \
34
34
-DANDROID_ABI=" $ANDROID_ABI " \
35
35
-DCMAKE_INSTALL_PREFIX=" ${CMAKE_OUT} " \
36
36
-DEXECUTORCH_USE_TIKTOKEN=" ${EXECUTORCH_USE_TIKTOKEN} " \
37
+ -DEXECUTORCH_BUILD_CUSTOM=ON \
38
+ -DEXECUTORCH_BUILD_OPTIMIZED=ON \
39
+ -DEXECUTORCH_BUILD_XNNPACK=ON \
37
40
-DCMAKE_BUILD_TYPE=Release \
38
41
-B" ${CMAKE_OUT} " /examples/models/llama2
39
42
Original file line number Diff line number Diff line change @@ -68,6 +68,8 @@ if(EXECUTORCH_BUILD_LLAMA_JNI)
68
68
set_property (TARGET custom_ops PROPERTY IMPORTED_LOCATION ${CUSTOM_OPS_PATH} )
69
69
target_link_options_shared_lib (custom_ops )
70
70
71
+ target_link_options_shared_lib (quantized_ops_lib )
72
+
71
73
if (TARGET pthreadpool )
72
74
set (LLAMA_JNI_SRCS jni/jni_layer_llama.cpp ../../backends/xnnpack/threadpool/cpuinfo_utils.cpp )
73
75
else ()
@@ -83,7 +85,7 @@ if(EXECUTORCH_BUILD_LLAMA_JNI)
83
85
endif ()
84
86
target_include_directories (executorch_llama_jni PRIVATE ${_common_include_directories} )
85
87
target_link_libraries (executorch_llama_jni ${link_libraries} llama_runner
86
- custom_ops cpublas eigen_blas )
88
+ custom_ops cpublas eigen_blas quantized_kernels quantized_ops_lib )
87
89
target_compile_options (executorch_llama_jni PUBLIC ${_common_compile_options} )
88
90
if (EXECUTORCH_USE_TIKTOKEN )
89
91
set (ABSL_ENABLE_INSTALL ON )
You can’t perform that action at this time.
0 commit comments