Skip to content

Commit a47bbf3

Browse files
committed
ggml-qnn: refine code format to make PR reviewers happy
1 parent b852d74 commit a47bbf3

File tree

1 file changed

+38
-39
lines changed

1 file changed

+38
-39
lines changed

ggml/src/ggml-qnn/ggml-qnn.cpp

Lines changed: 38 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1286,8 +1286,8 @@ static Qnn_Tensor_t * ggml_qnn_create_compute_tensor(const ggml_tensor * tensor)
12861286
qnn_data_type = qnn_datatype_from_ggml_datatype(tensor->type);
12871287
Qnn_Tensor_t * p_qnn_tensor = ggml_qnn_create_general_tensor(tensor, nullptr,
12881288
qnn_tensor_type, qnn_data_type,
1289-
ggml_n_dims(tensor), dimensions,
1290-
nullptr, 0);
1289+
ggml_n_dims(tensor), dimensions,
1290+
nullptr, 0);
12911291

12921292
return p_qnn_tensor;
12931293
}
@@ -1526,7 +1526,7 @@ class qnn_instance {
15261526
using BackendIdType = decltype(QnnInterface_t{}.backendId);
15271527

15281528
explicit qnn_instance(const std::string & lib_path, const std::string & backend_name,
1529-
const std::string & model_name) :
1529+
const std::string & model_name) :
15301530
_lib_path(std::move(lib_path)),
15311531
_backend_name(std::move(backend_name)),
15321532
_model_name(std::move(model_name)) {};
@@ -1608,8 +1608,7 @@ class qnn_instance {
16081608
if (_qnn_rpc_pollingtime > 0) {
16091609
QnnHtpPerfInfrastructure_PowerConfig_t rpc_pollingtime;
16101610
memset(&rpc_pollingtime, 0, sizeof(rpc_pollingtime));
1611-
rpc_pollingtime.option =
1612-
QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_RPC_POLLING_TIME;
1611+
rpc_pollingtime.option = QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_RPC_POLLING_TIME;
16131612
rpc_pollingtime.rpcPollingTimeConfig = _qnn_rpc_pollingtime;
16141613
const QnnHtpPerfInfrastructure_PowerConfig_t * power_configs[] = {&rpc_pollingtime, nullptr};
16151614
if (_qnn_htp_perfinfra) {
@@ -1694,16 +1693,15 @@ class qnn_instance {
16941693
}
16951694

16961695
void probe_device_meminfo() {
1697-
size_t candidate_size = 0;
1698-
uint8_t *rpc_buffer = nullptr;
1699-
const int SIZE_IN_MB = (1 << 20);
1700-
size_t probe_slots[] = {1024, 1536, 2048 - 48, 2048};
1701-
size_t probe_counts = sizeof(probe_slots) / sizeof(size_t);
1696+
size_t candidate_size = 0;
1697+
uint8_t * rpc_buffer = nullptr;
1698+
const int SIZE_IN_MB = (1 << 20);
1699+
size_t probe_slots[] = {1024, 1536, 2048 - 48, 2048};
1700+
size_t probe_counts = sizeof(probe_slots) / sizeof(size_t);
17021701
for (size_t idx = 0; idx < probe_counts; idx++) {
17031702
rpc_buffer = static_cast<uint8_t *>(alloc_rpcmem_internal(probe_slots[idx] * SIZE_IN_MB, 4));
17041703
if (nullptr == rpc_buffer) {
1705-
GGMLQNN_LOG_DEBUG("alloc rpcmem %d (MB) failure, %s\n", probe_slots[idx],
1706-
strerror(errno));
1704+
GGMLQNN_LOG_DEBUG("alloc rpcmem %d (MB) failure, %s\n", probe_slots[idx], strerror(errno));
17071705
break;
17081706
} else {
17091707
candidate_size = probe_slots[idx];
@@ -1822,13 +1820,13 @@ void * qnn_instance::alloc_rpcmem_internal(size_t bytes, size_t alignment) {
18221820

18231821
auto allocate_bytes = static_cast<int32_t>(bytes + alignment);
18241822
void * buf = _pfn_rpc_mem_alloc(RPCMEM_HEAP_ID_SYSTEM, RPCMEM_DEFAULT_FLAGS, allocate_bytes);
1825-
if (buf == nullptr) {
1823+
if (nullptr == buf) {
18261824
GGMLQNN_LOG_WARN("failed to allocate rpc memory\n");
18271825
return nullptr;
18281826
}
18291827

18301828
auto aligned_buf = reinterpret_cast<void *>(ggmlqnn_align_to(alignment,
1831-
reinterpret_cast<intptr_t>(buf)));
1829+
reinterpret_cast<intptr_t>(buf)));
18321830
bool status = _rpcmem_store_map.insert(std::pair<void *, void *>(aligned_buf, buf)).second;
18331831
if (!status) {
18341832
GGMLQNN_LOG_WARN("failed to allocate rpc memory\n");
@@ -2028,8 +2026,7 @@ void qnn_instance::unregister_rpcmem() {
20282026
Qnn_MemHandle_t mem_handle = it->second;
20292027
error = _qnn_interface.qnn_mem_de_register(&mem_handle, 1);
20302028
if (error != QNN_SUCCESS) {
2031-
GGMLQNN_LOG_WARN("failed to unregister shared memory, error %d\n",
2032-
QNN_GET_ERROR_CODE(error));
2029+
GGMLQNN_LOG_WARN("failed to unregister shared memory, error %d\n", QNN_GET_ERROR_CODE(error));
20332030
} else {
20342031
GGMLQNN_LOG_DEBUG("unregister shared memory ok");
20352032
}
@@ -2074,9 +2071,9 @@ int qnn_instance::load_backend(std::string & lib_path, const QnnSaver_Config_t *
20742071
return 1;
20752072
}
20762073

2077-
auto get_providers =
2078-
load_qnn_functionpointers<_pfn_QnnInterface_getProviders *>(lib_handle,
2079-
"QnnInterface_getProviders");
2074+
auto get_providers = load_qnn_functionpointers<_pfn_QnnInterface_getProviders *>(
2075+
lib_handle,
2076+
"QnnInterface_getProviders");
20802077
if (nullptr == get_providers) {
20812078
GGMLQNN_LOG_WARN("can not load symbol QnnInterface_getProviders : %s", dlerror());
20822079
return 2;
@@ -2154,7 +2151,7 @@ int qnn_instance::load_backend(std::string & lib_path, const QnnSaver_Config_t *
21542151

21552152
int qnn_instance::unload_backend() {
21562153
int dlclose_error = 0;
2157-
for (auto &it : _loaded_lib_handle) {
2154+
for (auto & it : _loaded_lib_handle) {
21582155
dlclose_error = dlclose(it.second);
21592156
if (dlclose_error != 0) {
21602157
GGMLQNN_LOG_WARN("failed to close QNN backend %d, error %s\n", it.first, dlerror());
@@ -2647,7 +2644,7 @@ int qnn_instance::init_qnn_graph(const std::string & graph_name, QNNBackend devi
26472644
}
26482645

26492646
int qnn_instance::init_qnn_graph(const char * graph_name, bool debug, uint8_t do_node_validation,
2650-
const QnnGraph_Config_t ** graph_configs) {
2647+
const QnnGraph_Config_t ** graph_configs) {
26512648
int result = 0;
26522649

26532650
if (nullptr == graph_name) {
@@ -3173,7 +3170,7 @@ static void ggml_qnn_mul_mat(ggml_backend_t backend, ggml_tensor * op) {
31733170
Qnn_Param_t out_0_params[] = {
31743171
{QNN_PARAMTYPE_SCALAR,
31753172
QNN_OP_MAT_MUL_PARAM_TRANSPOSE_IN1,
3176-
.scalarParam = {QNN_DATATYPE_BOOL_8, .bool8Value = 1}
3173+
.scalarParam = {QNN_DATATYPE_BOOL_8, .bool8Value = 1}
31773174
}
31783175
};
31793176

@@ -3223,13 +3220,13 @@ static void ggml_qnn_mul_mat(ggml_backend_t backend, ggml_tensor * op) {
32233220
CHECK_QNN_API(error, qnn_raw_interface.graphAddNode(graph_handle,out_trans1_0));
32243221

32253222
//step-6: finalize qnn graph and execute qnn graph
3226-
CHECK_QNN_API(error, qnn_raw_interface.graphFinalize(graph_handle, NULL, NULL));
3223+
CHECK_QNN_API(error, qnn_raw_interface.graphFinalize(graph_handle, nullptr, nullptr));
32273224
Qnn_Tensor_t input_tensors_0[] = {*p_tensor0, *p_tensor1};
32283225
Qnn_Tensor_t output_tensors_0[] = {*p_tensor2};
32293226
CHECK_QNN_API(error, qnn_raw_interface.graphExecute(graph_handle,
32303227
input_tensors_0, 2,
32313228
output_tensors_0, 1,
3232-
NULL, NULL));
3229+
nullptr, nullptr));
32333230

32343231
qnn_tensors_t ggml_op_mulmat_tensors;
32353232
ggml_op_mulmat_tensors.reserve(5);
@@ -3387,7 +3384,7 @@ static ggml_backend_buffer_i ggml_backend_qnn_buffer_interface = {
33873384
/* .get_tensor = */ ggml_backend_qnn_buffer_get_tensor,
33883385
/* .cpy_tensor = */ ggml_backend_qnn_buffer_cpy_tensor,
33893386
/* .clear = */ ggml_backend_qnn_buffer_clear,
3390-
/* .reset = */ NULL,
3387+
/* .reset = */ nullptr,
33913388
};
33923389

33933390
static const char * ggml_backend_qnn_buffer_type_name(ggml_backend_buffer_type_t buft) {
@@ -3418,7 +3415,7 @@ static size_t ggml_backend_qnn_buffer_type_get_alignment(ggml_backend_buffer_typ
34183415
return 32;
34193416
}
34203417

3421-
//FIXME: this value is an experimental value on Xiaomi14
3418+
//FIXME: this value is an experimental value on Snapdragon 8 Gen3 based phone
34223419
static size_t ggml_backend_qnn_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
34233420
GGML_UNUSED(buft);
34243421

@@ -3498,8 +3495,6 @@ static const char * ggml_backend_qnn_device_get_name(ggml_backend_dev_t dev) {
34983495
return "unknown";
34993496
}
35003497
return ctx->name;
3501-
3502-
GGML_UNUSED(dev);
35033498
}
35043499

35053500
static const char * ggml_backend_qnn_device_get_description(ggml_backend_dev_t dev) {
@@ -3589,10 +3584,10 @@ ggml_backend_buffer_type_t ggml_backend_qnn_buffer_type(size_t device_index) {
35893584
/* .alloc_buffer = */ ggml_backend_qnn_buffer_type_alloc_buffer,
35903585
/* .get_alignment = */ ggml_backend_qnn_buffer_type_get_alignment,
35913586
/* .get_max_size = */ ggml_backend_qnn_buffer_type_get_max_size,
3592-
/* .get_alloc_size = */ NULL,// defaults to ggml_nbytes
3587+
/* .get_alloc_size = */ nullptr,// defaults to ggml_nbytes
35933588
/* .is_host = */ ggml_backend_qnn_buffer_is_host
35943589
},
3595-
/* .context = */ NULL,
3590+
/* .context = */ nullptr,
35963591
};
35973592

35983593
return &ggml_backend_buffer_type_qnn;
@@ -3630,14 +3625,14 @@ static struct ggml_backend_device_i ggml_backend_qnn_device_interface = {
36303625
/* .get_props = */ ggml_backend_qnn_device_get_props,
36313626
/* .init_backend = */ ggml_backend_qnn_device_init_backend,
36323627
/* .get_buffer_type = */ ggml_backend_qnn_device_get_buffer_type,
3633-
/* .get_host_buffer_type = */ NULL,
3628+
/* .get_host_buffer_type = */ nullptr,
36343629
/* .buffer_from_host_ptr = */ ggml_backend_qnn_device_buffer_from_host_ptr,
36353630
/* .supports_op = */ ggml_backend_qnn_device_supports_op,
36363631
/* .supports_buft = */ ggml_backend_qnn_device_supports_buft,
3637-
/* .offload_op = */ NULL,
3638-
/* .event_new = */ NULL,
3639-
/* .event_free = */ NULL,
3640-
/* .event_synchronize = */ NULL,
3632+
/* .offload_op = */ nullptr,
3633+
/* .event_new = */ nullptr,
3634+
/* .event_free = */ nullptr,
3635+
/* .event_synchronize = */ nullptr,
36413636
};
36423637

36433638
static ggml_backend_i ggml_backend_qnn_interface = {
@@ -3685,9 +3680,8 @@ struct ggml_backend_qnn_reg_context {
36853680
};
36863681

36873682
static const char * ggml_backend_qnn_reg_get_name(ggml_backend_reg_t reg) {
3688-
return "ggml-qnn";
3689-
36903683
GGML_UNUSED(reg);
3684+
return "ggml-qnn";
36913685
}
36923686

36933687
static size_t ggml_backend_qnn_reg_get_device_count(ggml_backend_reg_t reg) {
@@ -3708,10 +3702,15 @@ static ggml_backend_dev_t ggml_backend_qnn_reg_get_device(ggml_backend_reg_t reg
37083702
static void * ggml_backend_qnn_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) {
37093703
GGML_UNUSED(reg);
37103704

3711-
if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) {
3705+
if (nullptr == name)
3706+
return nullptr;
3707+
3708+
const char * slot_name = "ggml_backend_set_n_threads";
3709+
//avoid buffer attack rather than strcmp
3710+
if (0 == std::memcmp(name, slot_name, strlen(slot_name))) {
37123711
return (void *)ggml_backend_qnn_set_n_threads;
37133712
}
3714-
return NULL;
3713+
return nullptr;
37153714
}
37163715

37173716
static const ggml_backend_reg_i ggml_backend_qnn_reg_interface = {

0 commit comments

Comments
 (0)