Skip to content

Commit eff9669

Browse files
committed
adapt to test-backend-ops.cpp
1 parent 180ab5f commit eff9669

File tree

9 files changed

+242
-43
lines changed

9 files changed

+242
-43
lines changed

README-qnn.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,12 +93,14 @@ Any **mainstream** Android phone based on Qualcomm's mobile SoC should be suppor
9393
### II. Build llama.cpp + QNN backend
9494

9595

96-
Please refer to [project kantv](https://github.com/zhouwg/kantv) firstly.
96+
Please refer to [project kantv](https://github.com/zhouwg/kantv)
9797

9898

99-
A small and standalone Android example(or re-use [the existing Android example in llama.cpp](https://github.com/ggerganov/llama.cpp/tree/master/examples/llama.android)) for purpose of facilitate community developers to participate in develop/verify QNN backend.
99+
or
100100

101101

102+
using [test-backend-ops.cpp](tests/ggml-qnn) to verify it on Qualcomm mobile SoC based Android phone
103+
102104
### III. Run the inference on Qualcomm mobile SoC based Android phone
103105

104106

ggml-backend.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -445,6 +445,11 @@ GGML_CALL static void ggml_backend_registry_init(void) {
445445
extern GGML_CALL void ggml_backend_kompute_reg_devices(void);
446446
ggml_backend_kompute_reg_devices();
447447
#endif
448+
449+
#ifdef GGML_USE_QNN
450+
extern GGML_CALL int ggml_backend_qnn_reg_devices(void);
451+
ggml_backend_qnn_reg_devices();
452+
#endif
448453
}
449454

450455
GGML_CALL void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data) {

ggml-qnn.cpp

Lines changed: 34 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1176,7 +1176,6 @@ static void qnn_buf_buffer_put(qnn_buf_t * fifo, buf_element_t * element) {
11761176
fifo->qnn_buf_size++;
11771177
fifo->qnn_buf_data_size += element->size;
11781178

1179-
LOGJ("put:index %d, fifo->size is %d, self->buffer_pool_num_free %d\n", element->id, fifo->qnn_buf_size, fifo->buffer_pool_num_free);
11801179
pthread_cond_signal (&fifo->not_empty);
11811180

11821181
pthread_mutex_unlock (&fifo->mutex);
@@ -1426,9 +1425,12 @@ static void ggml_qnn_log_internal(ggml_log_level level, const char * file, const
14261425
int len = vsnprintf(s_ggml_qnn_log_internal_buf + len_prefix, GGML_QNN_LOGBUF_LEN - len_prefix, format, args);
14271426
if (len < (GGML_QNN_LOGBUF_LEN - len_prefix)) {
14281427
#if (defined __ANDROID__) || (defined ANDROID)
1429-
__android_log_print(level, "ggml-qnn", "%s", s_ggml_qnn_log_internal_buf);
1428+
//for Android APP
1429+
__android_log_print(level, "ggml-qnn", "%s\n", s_ggml_qnn_log_internal_buf);
1430+
//for Android terminal
1431+
printf("%s\n", s_ggml_qnn_log_internal_buf);
14301432
#else
1431-
printf("%s", buffer); //Qualcomm's QNN could running on Windows over ARM(aka WoA)
1433+
printf("%s\n", s_ggml_qnn_log_internal_buf);
14321434
#endif
14331435
}
14341436
va_end(args);
@@ -2125,9 +2127,9 @@ int qnn_instance::load_system() {
21252127

21262128
_qnn_interface.qnn_system_context_create(&_qnn_system_handle);
21272129
if (nullptr == _qnn_system_handle) {
2128-
LOGW("can not create QNN system contenxt\n");
2130+
QNN_LOG_WARN("can not create QNN system contenxt\n");
21292131
} else {
2130-
QNN_LOG_DEBUG("initialize qnn system successfully\n");
2132+
QNN_LOG_INFO("initialize qnn system successfully\n");
21312133
}
21322134

21332135
return 0;
@@ -2494,24 +2496,23 @@ static bool ggml_qnn_can_handle_op(const struct ggml_tensor * src0, const struct
24942496
if (dst->op == GGML_OP_ADD) {
24952497
return (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16) &&
24962498
(src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16) &&
2497-
(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16) && ((ne00 > 1 && ne01 > 1 && ne10 > 1 && ne11 > 1)) &&
2498-
(src0->rank == src1->rank);
2499+
(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16) && ((ne00 > 1 && ne01 > 1 && ne10 > 1 && ne11 > 1));
24992500

25002501
}
25012502

25022503
if (dst->op == GGML_OP_MUL_MAT) {
25032504
#if 1 // log output have significant effect to performance but useful during development stage
25042505
QNN_LOG_DEBUG("GGML_OP_MUL_MAT");
2505-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2506-
src0->name, src0->rank,
2506+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2507+
src0->name,
25072508
src0->type, ggml_type_name(src0->type), src0->ne[0], src0->ne[1], src0->ne[2],
25082509
src0->nb[0], src0->nb[1], src0->nb[2]);
2509-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2510-
src1->name, src1->rank,
2510+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2511+
src1->name,
25112512
src1->type, ggml_type_name(src1->type), src1->ne[0], src1->ne[1], src1->ne[2],
25122513
src1->nb[0], src1->nb[1], src1->nb[2]);
2513-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2514-
dst->name, dst->rank,
2514+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2515+
dst->name,
25152516
dst->type, ggml_type_name(dst->type), dst->ne[0], dst->ne[1], dst->ne[2], dst->nb[0],
25162517
dst->nb[1], dst->nb[2]);
25172518
#endif
@@ -2576,18 +2577,18 @@ static void ggml_qnn_add(const ggml_tensor * src0, const ggml_tensor * src1, ggm
25762577
QNN_INTERFACE_VER_TYPE qnn_raw_interface = ctx->raw_interface;
25772578

25782579
n_begin_time = ggml_time_us();
2579-
#if 0 //it works fine with whisper.cpp and llama.cpp. comment them because focus on mulmat in llama.cpp inference since 04-23-2024
2580+
#if 0
25802581
QNN_LOG_DEBUG("call %s\n", __func__);
2581-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2582-
src0->name, src0->rank,
2582+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2583+
src0->name,
25832584
src0->type, ggml_type_name(src0->type), src0->ne[0], src0->ne[1], src0->ne[2],
25842585
src0->nb[0], src0->nb[1], src0->nb[2]);
2585-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2586-
src1->name, src1->rank,
2586+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2587+
src1->name,
25872588
src1->type, ggml_type_name(src1->type), src1->ne[0], src1->ne[1], src1->ne[2],
25882589
src1->nb[0], src1->nb[1], src1->nb[2]);
2589-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2590-
dst->name, dst->rank,
2590+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2591+
dst->name,
25912592
dst->type, ggml_type_name(dst->type), dst->ne[0], dst->ne[1], dst->ne[2], dst->nb[0],
25922593
dst->nb[1], dst->nb[2]);
25932594
QNN_LOG_DEBUG("%d, %d, %d, %d", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]);
@@ -2793,16 +2794,16 @@ static void ggml_qnn_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1,
27932794

27942795
n_begin_time = ggml_time_us();
27952796
QNN_LOG_DEBUG("call %s\n", __func__);
2796-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2797-
src0->name, src0->rank,
2797+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2798+
src0->name,
27982799
src0->type, ggml_type_name(src0->type), src0->ne[0], src0->ne[1], src0->ne[2],
27992800
src0->nb[0], src0->nb[1], src0->nb[2]);
2800-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2801-
src1->name, src1->rank,
2801+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2802+
src1->name,
28022803
src1->type, ggml_type_name(src1->type), src1->ne[0], src1->ne[1], src1->ne[2],
28032804
src1->nb[0], src1->nb[1], src1->nb[2]);
2804-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2805-
dst->name, dst->rank,
2805+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
2806+
dst->name,
28062807
dst->type, ggml_type_name(dst->type), dst->ne[0], dst->ne[1], dst->ne[2], dst->nb[0],
28072808
dst->nb[1], dst->nb[2]);
28082809
QNN_LOG_DEBUG("%d, %d, %d, %d", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]);
@@ -3000,16 +3001,16 @@ static void ggml_qnn_hanlde_op(const enum ggml_op ggmlop, const ggml_tensor * sr
30003001

30013002
n_begin_time = ggml_time_us();
30023003
QNN_LOG_DEBUG("call %s\n", __func__);
3003-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
3004-
src0->name, src0->rank,
3004+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
3005+
src0->name,
30053006
src0->type, ggml_type_name(src0->type), src0->ne[0], src0->ne[1], src0->ne[2],
30063007
src0->nb[0], src0->nb[1], src0->nb[2]);
3007-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
3008-
src1->name, src1->rank,
3008+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
3009+
src1->name,
30093010
src1->type, ggml_type_name(src1->type), src1->ne[0], src1->ne[1], src1->ne[2],
30103011
src1->nb[0], src1->nb[1], src1->nb[2]);
3011-
QNN_LOG_INFO("%15s: rank = %d, type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
3012-
dst->name, dst->rank,
3012+
QNN_LOG_INFO("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi)\n",
3013+
dst->name,
30133014
dst->type, ggml_type_name(dst->type), dst->ne[0], dst->ne[1], dst->ne[2], dst->nb[0],
30143015
dst->nb[1], dst->nb[2]);
30153016
QNN_LOG_DEBUG("%d, %d, %d, %d", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]);
@@ -4396,7 +4397,6 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads, int n_cur_
43964397
}
43974398

43984399

4399-
#if 0 //replaced with ggml_status ggml_backend_qnn_graph_compute_multithread
44004400
static void * ggml_graph_compute_thread(void * data) {
44014401
struct ggml_compute_state * state = (struct ggml_compute_state *) data;
44024402

@@ -4531,7 +4531,6 @@ static void * ggml_graph_compute_thread(void * data) {
45314531

45324532
return 0;
45334533
}
4534-
#endif
45354534

45364535

45374536
static ggml_status ggml_backend_qnn_graph_compute_multithread(ggml_backend_t backend, ggml_cgraph * cgraph) {
@@ -4830,8 +4829,7 @@ ggml_backend_t ggml_backend_qnn_init(size_t device, const char * qnn_lib_path) {
48304829
}
48314830

48324831

4833-
extern "C" int ggml_backend_qnn_reg_devices();
4834-
4832+
extern "C" int ggml_backend_qnn_reg_devices(void);
48354833

48364834
int ggml_backend_qnn_reg_devices() {
48374835
for (size_t idx = 0; idx < GGML_QNN_MAX_DEVICES; idx++) {

ggml-qnn.h

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ enum QNNBackend {
1818
QNN_HTP,
1919
};
2020

21-
GGML_API int ggml_backend_qnn_reg_devices();
21+
GGML_API int ggml_backend_qnn_reg_devices(void);
2222

2323
/**
2424
*
@@ -39,10 +39,6 @@ GGML_API void ggml_backend_qnn_get_device_description(int device, char
3939

4040
GGML_API ggml_backend_buffer_type_t ggml_backend_qnn_buffer_type(size_t dev_num);
4141

42-
// TODO: this is a temporary API, should be removed in the future
43-
GGML_API bool ggml_qnn_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
44-
45-
4642
#ifdef __cplusplus
4743
}
4844
#endif

tests/ggml-qnn/.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
out
2+
android-ndk-r26c*
3+
test-qnn*

tests/ggml-qnn/CMakeLists.txt

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
cmake_minimum_required(VERSION 3.22.1)
2+
project(ggml-qnn)
3+
4+
set(CMAKE_VERBOSE_MAKEFILE on)
5+
set(CMAKE_CXX_STANDARD 17)
6+
set(CMAKE_CXX_STANDARD_REQUIRED ON)
7+
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
8+
9+
set(TARGET_SNAPDRAGON_8_GEN3 OFF)
10+
11+
set(LLAMACPP_SRC_PATH ${PROJECT_ROOT_PATH})
12+
set(QNN_INC_PATH ${QNN_SDK_PATH}/include/QNN)
13+
set(QNN_LIB_PATH ${QNN_SDK_PATH}/lib/aarch64-android)
14+
15+
include_directories(${QNN_INC_PATH})
16+
include_directories(${LLAMACPP_SRC_PATH})
17+
include_directories(${LLAMACPP_SRC_PATH}/common)
18+
19+
set(SOURCE_FILES
20+
${LLAMACPP_SRC_PATH}/ggml.c
21+
${LLAMACPP_SRC_PATH}/ggml-alloc.c
22+
${LLAMACPP_SRC_PATH}/ggml-backend.c
23+
${LLAMACPP_SRC_PATH}/ggml-quants.c
24+
${LLAMACPP_SRC_PATH}/ggml-qnn.cpp
25+
${LLAMACPP_SRC_PATH}/tests/test-backend-ops.cpp
26+
)
27+
28+
29+
message("PROJECT_ROOT_PATH : ${PROJECT_ROOT_PATH}")
30+
message("LLAMACPP_SRC_PATH : ${LLAMACPP_SRC_PATH}")
31+
message("QNN_SDK_PATH : ${QNN_SDK_PATH}")
32+
message("QNN_INC_PATH : ${QNN_INC_PATH}")
33+
message("QNN_LIB_PATH : ${QNN_LIB_PATH}")
34+
message("target name : ${TARGET_NAME}")
35+
36+
37+
add_definitions(-DTARGET_ANDROID)
38+
add_definitions(-D__ARM_NEON)
39+
add_definitions(-DGGML_USE_QNN)
40+
41+
add_definitions(-DNDEBUG)
42+
add_definitions(-O3)
43+
44+
if (TARGET_SNAPDRAGON_8_GEN3)
45+
add_definitions(-march=armv8.7-a)
46+
add_definitions(-mcpu=cortex-x1)
47+
add_definitions(-mtune=cortex-x1)
48+
49+
else()
50+
51+
# the below build optimization might be works well on ALL mainstream Android phones
52+
add_definitions(-mcpu=cortex-a72)
53+
54+
endif()
55+
56+
add_compile_options("-Wall" "-Wno-sign-compare")
57+
58+
if (GGML_JNI_QNN)
59+
file(GLOB allPrebuiltQNNLibs "${QNN_LIB_PATH}/libQnn*.so")
60+
61+
#file(COPY ${allPrebuiltQNNLibs} DESTINATION ${PREBUILT_LIB_PATH}/ )
62+
63+
endif()
64+
65+
find_library(LOG_LIB log)
66+
67+
add_library(QNNCpu
68+
SHARED
69+
IMPORTED)
70+
71+
set_target_properties(QNNCpu
72+
PROPERTIES
73+
IMPORTED_LOCATION
74+
${PREBUILT_LIB_PATH}/libQnnCpu.so)
75+
76+
link_libraries(${LOG_LIB} android)
77+
78+
add_executable(${TARGET_NAME}
79+
${SOURCE_FILES}
80+
)

tests/ggml-qnn/build-ggml-qnn.sh

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
#!/bin/bash
2+
3+
set -e
4+
5+
#modify following lines to adapt to local dev envs
6+
PROJECT_ROOT_PATH=~/github/llama.cpp/
7+
#https://qpm.qualcomm.com/#/main/tools/details/qualcomm_ai_engine_direct
8+
#https://developer.qualcomm.com/software/hexagon-dsp-sdk/tools
9+
QNN_SDK_PATH=/opt/qcom/aistack/qnn/2.20.0.240223/
10+
11+
12+
ANDROID_NDK=`pwd`/android-ndk-r26c
13+
TARGET=ggml-qnn-test
14+
15+
16+
function dump_vars()
17+
{
18+
echo -e "PROJECT_ROOT_PATH: ${PROJECT_ROOT_PATH}"
19+
echo -e "ANDROID_NDK: ${ANDROID_NDK}"
20+
echo -e "QNN_SDK_PATH: ${QNN_SDK_PATH}"
21+
}
22+
23+
24+
function show_pwd()
25+
{
26+
echo -e "current working path:$(pwd)\n"
27+
}
28+
29+
30+
function check_and_download_ndk()
31+
{
32+
is_android_ndk_exist=1
33+
34+
if [ ! -d ${ANDROID_NDK} ]; then
35+
is_android_ndk_exist=0
36+
fi
37+
38+
if [ ! -f ${ANDROID_NDK}/build/cmake/android.toolchain.cmake ]; then
39+
is_android_ndk_exist=0
40+
fi
41+
42+
if [ ${is_android_ndk_exist} -eq 0 ]; then
43+
44+
if [ ! -f android-ndk-r26c-linux.zip ]; then
45+
wget --no-config --quiet --show-progress -O android-ndk-r26c-linux.zip https://dl.google.com/android/repository/android-ndk-r26c-linux.zip
46+
fi
47+
48+
unzip android-ndk-r26c-linux.zip
49+
50+
if [ $? -ne 0 ]; then
51+
printf "failed to download android ndk to %s \n" "${ANDROID_NDK}"
52+
exit 1
53+
fi
54+
55+
printf "android ndk saved to ${ANDROID_NDK} \n\n"
56+
else
57+
printf "android ndk already exist:${ANDROID_NDK} \n\n"
58+
fi
59+
}
60+
61+
62+
function build_arm64
63+
{
64+
cmake -H. -B./out/arm64-v8a -DPROJECT_ROOT_PATH=${PROJECT_ROOT_PATH} -DTARGET_NAME=${TARGET} -DCMAKE_BUILD_TYPE=${PROJECT_BUILD_TYPE} -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=${ANDROID_PLATFORM} -DANDROID_NDK=${ANDROID_NDK} -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake -DQNN_SDK_PATH=${QNN_SDK_PATH}
65+
66+
cd ./out/arm64-v8a
67+
make
68+
69+
ls -lah ${TARGET}
70+
/bin/cp ${TARGET} ../../
71+
cd -
72+
}
73+
74+
75+
function remove_temp_dir()
76+
{
77+
if [ -d out ]; then
78+
echo "remove out directory in `pwd`"
79+
rm -rf out
80+
fi
81+
}
82+
83+
84+
85+
show_pwd
86+
check_and_download_ndk
87+
dump_vars
88+
remove_temp_dir
89+
build_arm64

0 commit comments

Comments
 (0)