Skip to content

Commit 0bb9613

Browse files
committed
cuBLAS: use host pinned memory
1 parent 86b170d commit 0bb9613

File tree

6 files changed

+52
-9
lines changed

6 files changed

+52
-9
lines changed

Makefile

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@ ifdef LLAMA_OPENBLAS
106106
endif
107107
ifdef LLAMA_CUBLAS
108108
CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
109+
CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
109110
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
110111
OBJS += ggml-cuda.o
111112
NVCC = nvcc
@@ -157,10 +158,10 @@ $(info )
157158
# Build library
158159
#
159160

160-
ggml.o: ggml.c ggml.h
161+
ggml.o: ggml.c ggml.h ggml-cuda.h
161162
$(CC) $(CFLAGS) -c $< -o $@
162163

163-
llama.o: llama.cpp ggml.h llama.h llama_util.h
164+
llama.o: llama.cpp ggml.h ggml-cuda.h llama.h llama_util.h
164165
$(CXX) $(CXXFLAGS) -c $< -o $@
165166

166167
common.o: examples/common.cpp examples/common.h

ggml-cuda.cu

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -364,3 +364,13 @@ void ggml_init_cublas() {
364364
// CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, NULL));
365365
}
366366
}
367+
368+
void * ggml_cuda_host_malloc(size_t size) {
369+
void * ptr;
370+
CUDA_CHECK(cudaMallocHost((void **) &ptr, size));
371+
return ptr;
372+
}
373+
374+
void ggml_cuda_host_free(void * ptr) {
375+
CUDA_CHECK(cudaFreeHost(ptr));
376+
}

ggml-cuda.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,9 @@ extern cudaStream_t g_cudaStream2;
3131
extern cudaEvent_t g_cudaEvent;
3232

3333
void ggml_init_cublas(void);
34+
void * ggml_cuda_host_malloc(size_t size);
35+
void ggml_cuda_host_free(void * ptr);
36+
3437
void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size);
3538
void ggml_cuda_pool_free(void * ptr, size_t size);
3639

ggml.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8412,8 +8412,6 @@ static void ggml_compute_forward_mul_mat_f16_f32(
84128412
}
84138413

84148414
#if defined(GGML_USE_CUBLAS)
8415-
ggml_fp16_t * const wdata = params->wdata;
8416-
84178415
const float alpha = 1.0f;
84188416
const float beta = 0.0f;
84198417
const int x_ne = ne01 * ne00;
@@ -8431,6 +8429,7 @@ static void ggml_compute_forward_mul_mat_f16_f32(
84318429
for (int64_t i02 = 0; i02 < ne02; i02++) {
84328430
#if defined(GGML_USE_CUBLAS)
84338431
// with cuBlAS, instead of converting src0 to fp32, we convert src1 to fp16
8432+
ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + (ne11 * ne10) * (i03 * ne02 + i02);
84348433
{
84358434
size_t id = 0;
84368435
for (int64_t i01 = 0; i01 < ne11; ++i01) {
@@ -8706,7 +8705,6 @@ static void ggml_compute_forward_mul_mat_q_f32(
87068705
const float * x = wdata;
87078706
#endif
87088707

8709-
87108708
#if defined(GGML_USE_CUBLAS)
87118709
// copy data to device
87128710
CUDA_CHECK(cudaMemcpyAsync(d_Y, y, sizeof(float) * y_ne, cudaMemcpyHostToDevice, g_cudaStream));
@@ -11568,7 +11566,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
1156811566
if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
1156911567
node->n_tasks = 1; // TODO: this actually is doing nothing
1157011568
// the threads are still spinning
11571-
cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
11569+
cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*MAX(ggml_nelements(node->src1), ggml_nelements(node->src0));
1157211570
//printf("src0: ne0 = %d, ne1 = %d, ne = %d\n", node->src0->ne[0], node->src0->ne[1], node->src0->ne[0]*node->src0->ne[1]);
1157311571
//printf("src1: ne0 = %d, ne1 = %d, ne = %d\n", node->src1->ne[0], node->src1->ne[1], node->src1->ne[0]*node->src1->ne[1]);
1157411572
//printf("cur = %zu\n", cur);
@@ -11580,6 +11578,11 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
1158011578
#endif
1158111579
} else if (node->src0->type == GGML_TYPE_F32 && node->src1->type == GGML_TYPE_F32) {
1158211580
cur = 0;
11581+
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS)
11582+
if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
11583+
node->n_tasks = 1;
11584+
}
11585+
#endif
1158311586
} else if (ggml_is_quantized(node->src0->type) && node->src1->type == GGML_TYPE_F32) {
1158411587
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS)
1158511588
if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {

llama.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ struct llama_model {
167167
struct llama_kv_cache kv_self;
168168

169169
// the model memory buffer
170-
llama_buffer buf;
170+
llama_ctx_buffer buf;
171171

172172
// model memory mapped file
173173
std::unique_ptr<llama_mmap> mapping;
@@ -228,8 +228,8 @@ struct llama_context {
228228

229229
// memory buffers used to evaluate the model
230230
// TODO: move in llama_state
231-
llama_buffer buf_compute;
232-
llama_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
231+
llama_ctx_buffer buf_compute;
232+
llama_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
233233

234234
int buf_last = 0;
235235
size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };

llama_util.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -405,4 +405,30 @@ struct llama_buffer {
405405
delete[] addr;
406406
}
407407
};
408+
409+
#ifdef GGML_USE_CUBLAS
410+
#include "ggml-cuda.h"
411+
struct llama_ctx_buffer {
412+
uint8_t * addr = NULL;
413+
size_t size = 0;
414+
415+
void resize(size_t size) {
416+
if (addr) {
417+
ggml_cuda_host_free(addr);
418+
}
419+
addr = (uint8_t *) ggml_cuda_host_malloc(size);
420+
this->size = size;
421+
}
422+
423+
~llama_ctx_buffer() {
424+
if (addr) {
425+
ggml_cuda_host_free(addr);
426+
}
427+
}
428+
};
429+
#else
430+
typedef llama_buffer llama_ctx_buffer;
431+
#endif
432+
433+
408434
#endif

0 commit comments

Comments
 (0)