Skip to content

Commit 3ed4588

Browse files
Store layers in VRAM
1 parent d052a0e commit 3ed4588

File tree

8 files changed

+74
-10
lines changed

8 files changed

+74
-10
lines changed

examples/common.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,12 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
271271
params.use_color = true;
272272
} else if (arg == "--mlock") {
273273
params.use_mlock = true;
274+
} else if (arg == "--gpu_layers") {
275+
if (++i >= argc) {
276+
invalid_param = true;
277+
break;
278+
}
279+
params.gpu_layers = std::stoi(argv[i]);
274280
} else if (arg == "--no-mmap") {
275281
params.use_mmap = false;
276282
} else if (arg == "--mtest") {
@@ -406,6 +412,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
406412
if (llama_mmap_supported()) {
407413
fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
408414
}
415+
fprintf(stderr, " --gpu_layers number of layers to store in VRAM");
409416
fprintf(stderr, " --mtest compute maximum memory usage\n");
410417
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
411418
fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
@@ -454,6 +461,7 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
454461
lparams.f16_kv = params.memory_f16;
455462
lparams.use_mmap = params.use_mmap;
456463
lparams.use_mlock = params.use_mlock;
464+
lparams.gpu_layers = params.gpu_layers;
457465
lparams.logits_all = params.perplexity;
458466
lparams.embedding = params.embedding;
459467

examples/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ struct gpt_params {
6868
bool perplexity = false; // compute perplexity over the prompt
6969
bool use_mmap = true; // use mmap for faster loads
7070
bool use_mlock = false; // use mlock to keep model in memory
71+
int gpu_layers = 0; // number of layers to store in VRAM
7172
bool mem_test = false; // compute maximum memory usage
7273
bool verbose_prompt = false; // print prompt tokens before generation
7374
};

ggml-cuda.cu

Lines changed: 34 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,7 @@ static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
349349
}
350350

351351
// buffer pool for cuda
352-
#define MAX_CUDA_BUFFERS 16
352+
#define MAX_CUDA_BUFFERS 256
353353

354354
struct scoped_spin_lock {
355355
std::atomic_flag& lock;
@@ -678,9 +678,15 @@ static void ggml_cuda_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor
678678
float * c_D = d_D + i * d_ne;
679679
char * c_Q = d_Q + i * q_sz;
680680

681-
if (ne11 == 1) {
682-
// copy src0 to device
681+
// copy src0 to device if necessary
682+
if (src0->backend == GGML_BACKEND_CPU) {
683683
CUDA_CHECK(ggml_cuda_h2d_tensor_2d(c_Q, src0, i03, i02, cudaStream2));
684+
} else if (src0->backend == GGML_BACKEND_CUDA) {
685+
c_Q = ((char *) src0->data) + i * q_sz;
686+
} else {
687+
GGML_ASSERT(false);
688+
}
689+
if (ne11 == 1) {
684690
CUDA_CHECK(cudaEventRecord(cudaEvent, cudaStream2));
685691

686692
// copy src1 to device
@@ -696,8 +702,7 @@ static void ggml_cuda_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor
696702
} else {
697703
float * c_X = d_X + i * x_ne;
698704

699-
// copy src0 and convert to fp32 on device
700-
CUDA_CHECK(ggml_cuda_h2d_tensor_2d(c_Q, src0, i03, i02, cudaStream2));
705+
// convert src0 to fp32 on device
701706
to_fp32_cuda(c_Q, c_X, x_ne, cudaStream2);
702707
CUDA_CHECK(cudaGetLastError());
703708
CUDA_CHECK(cudaEventRecord(cudaEvent, cudaStream2));
@@ -742,8 +747,8 @@ bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_te
742747
// TODO: find the optimal values for these
743748
if ((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
744749
src1->type == GGML_TYPE_F32 &&
745-
dst->type == GGML_TYPE_F32) {
746-
750+
dst->type == GGML_TYPE_F32 &&
751+
((ne0 >= 32 && ne1 >= 32 && ne10 >= 32) || src0->backend == GGML_BACKEND_CUDA)) {
747752
return true;
748753
}
749754

@@ -795,3 +800,25 @@ size_t ggml_cuda_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct
795800
return 0;
796801
}
797802
}
803+
804+
void ggml_cuda_transform_tensor(ggml_tensor * tensor) {
805+
const int64_t ne0 = tensor->ne[0];
806+
const int64_t ne1 = tensor->ne[1];
807+
const int64_t ne2 = tensor->ne[2];
808+
const int64_t ne3 = tensor->ne[3];
809+
810+
const ggml_type type = tensor->type;
811+
const size_t q_sz = ggml_type_size(type) * ne0 * ne1 * ne2 * ne3 / ggml_blck_size(type);
812+
813+
size_t q_size;
814+
char * d_Q = (char *) ggml_cuda_pool_malloc(q_sz, &q_size);
815+
816+
cudaStream_t cudaStream2 = g_cudaStreams2[0];
817+
818+
// copy tensor to device
819+
CUDA_CHECK(ggml_cuda_h2d_tensor_2d(d_Q, tensor, 0, 0, cudaStream2));
820+
CUDA_CHECK(cudaDeviceSynchronize());
821+
822+
tensor->data = d_Q;
823+
tensor->backend = GGML_BACKEND_CUDA;
824+
}

ggml-cuda.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ void ggml_cuda_mul_mat(const struct ggml_tensor * src0, const struct ggml_tens
1414
void * ggml_cuda_host_malloc(size_t size);
1515
void ggml_cuda_host_free(void * ptr);
1616

17+
void ggml_cuda_transform_tensor(struct ggml_tensor * tensor);
18+
1719
#ifdef __cplusplus
1820
}
1921
#endif

ggml.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4711,6 +4711,7 @@ struct ggml_tensor * ggml_new_tensor_impl(
47114711

47124712
*result = (struct ggml_tensor) {
47134713
/*.type =*/ type,
4714+
/*.backend =*/ GGML_BACKEND_CPU,
47144715
/*.n_dims =*/ n_dims,
47154716
/*.ne =*/ { 1, 1, 1, 1 },
47164717
/*.nb =*/ { 0, 0, 0, 0 },

ggml.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,11 @@ extern "C" {
243243
GGML_TYPE_COUNT,
244244
};
245245

246+
enum ggml_backend {
247+
GGML_BACKEND_CPU = 0,
248+
GGML_BACKEND_CUDA = 1,
249+
};
250+
246251
// model file types
247252
enum ggml_ftype {
248253
GGML_FTYPE_UNKNOWN = -1,
@@ -323,6 +328,7 @@ extern "C" {
323328
// n-dimensional tensor
324329
struct ggml_tensor {
325330
enum ggml_type type;
331+
enum ggml_backend backend;
326332

327333
int n_dims;
328334
int64_t ne[GGML_MAX_DIMS]; // number of elements
@@ -353,7 +359,7 @@ extern "C" {
353359

354360
char name[32];
355361

356-
char padding[8]; // TODO: remove and add padding to name?
362+
char padding[9]; // TODO: remove and add padding to name?
357363
};
358364

359365
// computation graph

llama.cpp

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@
99
#include "llama.h"
1010

1111
#include "ggml.h"
12+
#ifdef GGML_USE_CUBLAS
13+
#include "ggml-cuda.h"
14+
#endif
1215

1316
#include <array>
1417
#include <ctime>
@@ -815,6 +818,7 @@ struct llama_context_params llama_context_default_params() {
815818
/*.vocab_only =*/ false,
816819
/*.use_mmap =*/ true,
817820
/*.use_mlock =*/ false,
821+
/*.gpu_layers =*/ 0,
818822
/*.embedding =*/ false,
819823
/*.progress_callback =*/ nullptr,
820824
/*.progress_callback_user_data =*/ nullptr,
@@ -877,6 +881,7 @@ static void llama_model_load_internal(
877881
ggml_type memory_type,
878882
bool use_mmap,
879883
bool use_mlock,
884+
int gpu_layers,
880885
bool vocab_only,
881886
llama_progress_callback progress_callback,
882887
void * progress_callback_user_data) {
@@ -1011,6 +1016,18 @@ static void llama_model_load_internal(
10111016
ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL);
10121017

10131018
model.mapping = std::move(ml->mapping);
1019+
#ifdef GGML_USE_CUBLAS
1020+
for (int i = 0; i < std::min(gpu_layers, int(hparams.n_layer)); ++i) {
1021+
auto & layer = model.layers[i];
1022+
ggml_cuda_transform_tensor(layer.wq);
1023+
ggml_cuda_transform_tensor(layer.wk);
1024+
ggml_cuda_transform_tensor(layer.wv);
1025+
ggml_cuda_transform_tensor(layer.wo);
1026+
ggml_cuda_transform_tensor(layer.w1);
1027+
ggml_cuda_transform_tensor(layer.w2);
1028+
ggml_cuda_transform_tensor(layer.w3);
1029+
}
1030+
#endif
10141031

10151032
// loading time will be recalculate after the first eval, so
10161033
// we take page faults deferred by mmap() into consideration
@@ -1024,11 +1041,12 @@ static bool llama_model_load(
10241041
ggml_type memory_type,
10251042
bool use_mmap,
10261043
bool use_mlock,
1044+
int gpu_layers,
10271045
bool vocab_only,
10281046
llama_progress_callback progress_callback,
10291047
void *progress_callback_user_data) {
10301048
try {
1031-
llama_model_load_internal(fname, lctx, n_ctx, memory_type, use_mmap, use_mlock,
1049+
llama_model_load_internal(fname, lctx, n_ctx, memory_type, use_mmap, use_mlock, gpu_layers,
10321050
vocab_only, progress_callback, progress_callback_user_data);
10331051
return true;
10341052
} catch (const std::string & err) {
@@ -2088,7 +2106,7 @@ struct llama_context * llama_init_from_file(
20882106
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
20892107

20902108
if (!llama_model_load(path_model, *ctx, params.n_ctx, memory_type,
2091-
params.use_mmap, params.use_mlock, params.vocab_only,
2109+
params.use_mmap, params.use_mlock, params.gpu_layers, params.vocab_only,
20922110
params.progress_callback, params.progress_callback_user_data)) {
20932111
fprintf(stderr, "%s: failed to load model\n", __func__);
20942112
llama_free(ctx);

llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ extern "C" {
6363
bool vocab_only; // only load the vocabulary, no weights
6464
bool use_mmap; // use mmap if possible
6565
bool use_mlock; // force system to keep model in RAM
66+
int gpu_layers; // number of layers to store in VRAM
6667
bool embedding; // embedding mode only
6768

6869
// called with a progress value between 0 and 1, pass NULL to disable

0 commit comments

Comments
 (0)