Skip to content

Commit 7f412da

Browse files
enable CPU HBM (#2603)
* add cpu hbm support * add memalign 0 byte check * Update ggml.c * Update llama.cpp * ggml : allow ggml_init with 0 size * retrigger ci * fix code style --------- Co-authored-by: Georgi Gerganov <[email protected]>
1 parent 6336d83 commit 7f412da

File tree

3 files changed

+38
-2
lines changed

3 files changed

+38
-2
lines changed

CMakeLists.txt

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -557,6 +557,11 @@ endif()
557557

558558
# ggml
559559

560+
if (GGML_USE_CPU_HBM)
561+
add_definitions(-DGGML_USE_CPU_HBM)
562+
find_library(memkind memkind REQUIRED)
563+
endif()
564+
560565
add_library(ggml OBJECT
561566
ggml.c
562567
ggml.h
@@ -572,6 +577,9 @@ add_library(ggml OBJECT
572577
target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
573578
target_compile_features(ggml PUBLIC c_std_11) # don't bump
574579
target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
580+
if (GGML_USE_CPU_HBM)
581+
target_link_libraries(ggml PUBLIC memkind)
582+
endif()
575583

576584
add_library(ggml_static STATIC $<TARGET_OBJECTS:ggml>)
577585
if (BUILD_SHARED_LIBS)

ggml.c

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,9 @@ typedef void * thread_ret_t;
103103
#include <sys/stat.h>
104104
#include <unistd.h>
105105

106+
#endif
107+
#ifdef GGML_USE_CPU_HBM
108+
#include <hbwmalloc.h>
106109
#endif
107110

108111
// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
@@ -192,8 +195,14 @@ typedef void * thread_ret_t;
192195
#define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
193196
#else
194197
inline static void * ggml_aligned_malloc(size_t size) {
198+
if (size == 0) {
199+
GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
200+
return NULL;
201+
}
195202
void * aligned_memory = NULL;
196-
#ifdef GGML_USE_METAL
203+
#ifdef GGML_USE_CPU_HBM
204+
int result = hbw_posix_memalign(&aligned_memory, 16, size);
205+
#elif GGML_USE_METAL
197206
int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
198207
#else
199208
int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
@@ -215,8 +224,12 @@ inline static void * ggml_aligned_malloc(size_t size) {
215224
return aligned_memory;
216225
}
217226
#define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
227+
#ifdef GGML_USE_CPU_HBM
228+
#define GGML_ALIGNED_FREE(ptr) if(NULL != ptr) hbw_free(ptr)
229+
#else
218230
#define GGML_ALIGNED_FREE(ptr) free(ptr)
219231
#endif
232+
#endif
220233

221234
#define UNUSED GGML_UNUSED
222235
#define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
@@ -4566,6 +4579,11 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
45664579
return NULL;
45674580
}
45684581

4582+
// allow to call ggml_init with 0 size
4583+
if (params.mem_size == 0) {
4584+
params.mem_size = GGML_MEM_ALIGN;
4585+
}
4586+
45694587
const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
45704588

45714589
*ctx = (struct ggml_context) {

llama.cpp

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,9 @@ void replace_all(std::string & s, const std::string & search, const std::string
126126
}
127127
s = std::move(result);
128128
}
129+
#ifdef GGML_USE_CPU_HBM
130+
#include <hbwmalloc.h>
131+
#endif
129132

130133
static void zeros(std::ofstream & file, size_t n) {
131134
char zero = 0;
@@ -450,6 +453,9 @@ static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph *
450453
#elif GGML_USE_METAL
451454
# define llama_host_malloc(n) ggml_metal_host_malloc(n)
452455
# define llama_host_free(data) ggml_metal_host_free(data)
456+
#elif GGML_USE_CPU_HBM
457+
# define llama_host_malloc(n) hbw_malloc(n)
458+
# define llama_host_free(data) if (data != NULL) hbw_free(data)
453459
#else
454460
# define llama_host_malloc(n) malloc(n)
455461
# define llama_host_free(data) free(data)
@@ -1489,7 +1495,11 @@ struct llama_model_loader {
14891495
// allocate temp buffer if not using mmap
14901496
if (!use_mmap && cur->data == NULL) {
14911497
GGML_ASSERT(cur->backend != GGML_BACKEND_CPU);
1492-
cur->data = malloc(ggml_nbytes(cur));
1498+
#ifdef GGML_USE_CPU_HBM
1499+
cur->data = (uint8_t*)hbw_malloc(ggml_nbytes(cur));
1500+
#else
1501+
cur->data = (uint8_t*)malloc(ggml_nbytes(cur));
1502+
#endif
14931503
}
14941504

14951505
load_data_for(cur);

0 commit comments

Comments
 (0)