Skip to content

Commit 1a1ffd4

Browse files
committed
update
1 parent ca8fa3b commit 1a1ffd4

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

llama.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,7 @@ static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph *
570570
// llama helpers
571571
//
572572

573-
static inline void * llama_host_malloc(size_t n) {
573+
inline void * llama_host_malloc(size_t n) {
574574
#ifdef GGML_USE_CUBLAS
575575
if (ggml_cublas_loaded()) {
576576
return ggml_cuda_host_malloc(n);
@@ -580,13 +580,13 @@ static inline void * llama_host_malloc(size_t n) {
580580
#elif GGML_USE_METAL
581581
return ggml_metal_host_malloc(n);
582582
#elif GGML_USE_CPU_HBM
583-
return hbw_malloc(n)
583+
return hbw_malloc(n);
584584
#else
585-
return malloc(n)
585+
return malloc(n);
586586
#endif
587587
}
588588

589-
static inline void llama_host_free(void * ptr) {
589+
inline void llama_host_free(void * ptr) {
590590
#ifdef GGML_USE_CUBLAS
591591
if (ggml_cublas_loaded()) {
592592
return ggml_cuda_host_free(ptr);
@@ -596,9 +596,9 @@ static inline void llama_host_free(void * ptr) {
596596
#elif GGML_USE_METAL
597597
return ggml_metal_host_free(ptr);
598598
#elif GGML_USE_CPU_HBM
599-
return hbw_free(ptr)
599+
return hbw_free(ptr);
600600
#else
601-
return free(ptr)
601+
return free(ptr);
602602
#endif
603603
}
604604

0 commit comments

Comments
 (0)