Skip to content

Commit 77c1d3a

Browse files
authored
Merge branch 'ggml-org:master' into master
2 parents 9f7c0d2 + 9a390c4 commit 77c1d3a

File tree

23 files changed

+432
-329
lines changed

23 files changed

+432
-329
lines changed

common/arg.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2451,6 +2451,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
24512451
}
24522452
}
24532453
));
2454+
add_opt(common_arg(
2455+
{"--no-op-offload"},
2456+
string_format("disable offloading host tensor operations to device (default: %s)", params.no_op_offload ? "true" : "false"),
2457+
[](common_params & params) {
2458+
params.no_op_offload = true;
2459+
}
2460+
));
24542461
add_opt(common_arg(
24552462
{"--lora"}, "FNAME",
24562463
"path to LoRA adapter (can be repeated to use multiple adapters)",

common/common.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1113,6 +1113,7 @@ struct llama_context_params common_context_params_to_llama(const common_params &
11131113
cparams.offload_kqv = !params.no_kv_offload;
11141114
cparams.flash_attn = params.flash_attn;
11151115
cparams.no_perf = params.no_perf;
1116+
cparams.op_offload = !params.no_op_offload;
11161117

11171118
if (params.reranking) {
11181119
cparams.embeddings = true;

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,7 @@ struct common_params {
336336
bool no_kv_offload = false; // disable KV offloading
337337
bool warmup = true; // warmup run
338338
bool check_tensors = false; // validate tensor data
339+
bool no_op_offload = false; // globally disable offload host tensor operations to device
339340

340341
bool single_turn = false; // single turn chat conversation
341342

ggml/include/ggml-backend.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ extern "C" {
248248
// preferrably to run on the same backend as the buffer
249249
ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
250250
251-
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
251+
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false, true);
252252
253253
// initialize buffers from a max size graph (optional)
254254
reserve_graph = build_graph(sched, max_batch_size);
@@ -289,7 +289,7 @@ extern "C" {
289289
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
290290

291291
// Initialize a backend scheduler, backends with low index are given priority over backends with high index
292-
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
292+
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload);
293293
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
294294

295295
// Initialize backend buffers from a measure graph

ggml/src/ggml-backend.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -674,6 +674,8 @@ struct ggml_backend_sched {
674674
char * context_buffer;
675675
size_t context_buffer_size;
676676

677+
bool op_offload;
678+
677679
int debug;
678680
};
679681

@@ -766,7 +768,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
766768
if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
767769
int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
768770
// check if a backend with higher prio wants to offload the op
769-
if (src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
771+
if (sched->op_offload && src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
770772
for (int b = 0; b < src_backend_id; b++) {
771773
if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
772774
SET_CAUSE(tensor, "1.off");
@@ -1452,7 +1454,8 @@ ggml_backend_sched_t ggml_backend_sched_new(
14521454
ggml_backend_buffer_type_t * bufts,
14531455
int n_backends,
14541456
size_t graph_size,
1455-
bool parallel) {
1457+
bool parallel,
1458+
bool op_offload) {
14561459
GGML_ASSERT(n_backends > 0);
14571460
GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS);
14581461
GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU);
@@ -1497,6 +1500,7 @@ ggml_backend_sched_t ggml_backend_sched_new(
14971500
}
14981501

14991502
sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
1503+
sched->op_offload = op_offload;
15001504

15011505
ggml_backend_sched_reset(sched);
15021506

ggml/src/ggml-cuda/ggml-cuda.cu

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1909,13 +1909,19 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co
19091909
static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
19101910
const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft);
19111911

1912+
// If src0 is a temporary compute buffer it may have some padding that needs to be cleared for mul_mat_vec_q or mul_mat_q.
1913+
// But if src0 is also a view of another tensor then this cannot be done safely because it may overwrite valid tensor data.
1914+
// Therefore, in such cases use cuBLAS.
1915+
const bool bad_padding_clear = ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE
1916+
&& ggml_nbytes(src0) != ggml_backend_buffer_get_alloc_size(src0->buffer, src0) && src0->view_src;
1917+
19121918
bool use_mul_mat_vec = (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16)
19131919
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
19141920
&& src0->ne[0] % 2 == 0 && src1->ne[1] == 1;
1915-
bool use_mul_mat_vec_q = ggml_is_quantized(src0->type)
1921+
bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) && !bad_padding_clear
19161922
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
19171923
&& src1->ne[1] <= MMVQ_MAX_BATCH_SIZE;
1918-
bool use_mul_mat_q = ggml_is_quantized(src0->type)
1924+
bool use_mul_mat_q = ggml_is_quantized(src0->type) && !bad_padding_clear
19191925
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
19201926

19211927
bool any_gpus_with_slow_fp16 = false;

ggml/src/ggml-cuda/mmq.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,11 +91,11 @@ void ggml_cuda_mul_mat_q(
9191

9292
// If src0 is a temporary compute buffer, clear any potential padding.
9393
if (ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE) {
94-
GGML_ASSERT(ggml_is_contiguously_allocated(src0));
95-
GGML_ASSERT(!src0->view_src);
9694
const size_t size_data = ggml_nbytes(src0);
9795
const size_t size_alloc = ggml_backend_buffer_get_alloc_size(src0->buffer, src0);
9896
if (size_alloc > size_data) {
97+
GGML_ASSERT(ggml_is_contiguously_allocated(src0));
98+
GGML_ASSERT(!src0->view_src);
9999
CUDA_CHECK(cudaMemsetAsync((char *) src0->data + size_data, 0, size_alloc - size_data, stream));
100100
}
101101
}

ggml/src/ggml-cuda/mmvq.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -515,11 +515,11 @@ void ggml_cuda_mul_mat_vec_q(
515515

516516
// If src0 is a temporary compute buffer, clear any potential padding.
517517
if (ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE) {
518-
GGML_ASSERT(ggml_is_contiguously_allocated(src0));
519-
GGML_ASSERT(!src0->view_src);
520518
const size_t size_data = ggml_nbytes(src0);
521519
const size_t size_alloc = ggml_backend_buffer_get_alloc_size(src0->buffer, src0);
522520
if (size_alloc > size_data) {
521+
GGML_ASSERT(ggml_is_contiguously_allocated(src0));
522+
GGML_ASSERT(!src0->view_src);
523523
CUDA_CHECK(cudaMemsetAsync((char *) src0->data + size_data, 0, size_alloc - size_data, stream));
524524
}
525525
}

gguf-py/gguf/constants.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,9 @@ class MODEL_TENSOR(IntEnum):
483483
V_ENC_EMBD_PATCH = auto()
484484
V_ENC_EMBD_POS = auto()
485485
V_ENC_ATTN_Q = auto()
486+
V_ENC_ATTN_Q_NORM = auto()
486487
V_ENC_ATTN_K = auto()
488+
V_ENC_ATTN_K_NORM = auto()
487489
V_ENC_ATTN_V = auto()
488490
V_ENC_INPUT_NORM = auto()
489491
V_ENC_OUTPUT = auto()
@@ -742,7 +744,9 @@ class MODEL_TENSOR(IntEnum):
742744
MODEL_TENSOR.V_ENC_EMBD_PATCH: "v.patch_embd",
743745
MODEL_TENSOR.V_ENC_EMBD_POS: "v.position_embd",
744746
MODEL_TENSOR.V_ENC_ATTN_Q: "v.blk.{bid}.attn_q",
747+
MODEL_TENSOR.V_ENC_ATTN_Q_NORM: "v.blk.{bid}.attn_q_norm",
745748
MODEL_TENSOR.V_ENC_ATTN_K: "v.blk.{bid}.attn_k",
749+
MODEL_TENSOR.V_ENC_ATTN_K_NORM: "v.blk.{bid}.attn_k_norm",
746750
MODEL_TENSOR.V_ENC_ATTN_V: "v.blk.{bid}.attn_v",
747751
MODEL_TENSOR.V_ENC_INPUT_NORM: "v.blk.{bid}.ln1",
748752
MODEL_TENSOR.V_ENC_OUTPUT: "v.blk.{bid}.attn_out",
@@ -782,7 +786,9 @@ class MODEL_TENSOR(IntEnum):
782786
MODEL_TENSOR.V_ENC_EMBD_PATCH,
783787
MODEL_TENSOR.V_ENC_EMBD_POS,
784788
MODEL_TENSOR.V_ENC_ATTN_Q,
789+
MODEL_TENSOR.V_ENC_ATTN_Q_NORM,
785790
MODEL_TENSOR.V_ENC_ATTN_K,
791+
MODEL_TENSOR.V_ENC_ATTN_K_NORM,
786792
MODEL_TENSOR.V_ENC_ATTN_V,
787793
MODEL_TENSOR.V_ENC_INPUT_NORM,
788794
MODEL_TENSOR.V_ENC_OUTPUT,

gguf-py/gguf/tensor_mapping.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -938,6 +938,10 @@ class TensorNameMap:
938938
"visual.blocks.{bid}.attn.q", # qwen2vl, generated
939939
),
940940

941+
MODEL_TENSOR.V_ENC_ATTN_Q_NORM: (
942+
"vision_tower.vision_model.encoder.layers.{bid}.attn.q_norm", # InternVL
943+
),
944+
941945
MODEL_TENSOR.V_ENC_ATTN_K: (
942946
"vision_tower.vision_model.encoder.layers.{bid}.self_attn.k_proj",
943947
"vpm.encoder.layers.{bid}.self_attn.k_proj",
@@ -946,6 +950,10 @@ class TensorNameMap:
946950
"visual.blocks.{bid}.attn.k", # qwen2vl, generated
947951
),
948952

953+
MODEL_TENSOR.V_ENC_ATTN_K_NORM: (
954+
"vision_tower.vision_model.encoder.layers.{bid}.attn.k_norm", # InternVL
955+
),
956+
949957
MODEL_TENSOR.V_ENC_ATTN_V: (
950958
"vision_tower.vision_model.encoder.layers.{bid}.self_attn.v_proj",
951959
"vpm.encoder.layers.{bid}.self_attn.v_proj",

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -363,6 +363,7 @@ extern "C" {
363363
bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
364364
bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
365365
bool no_perf; // whether to measure performance timings
366+
bool op_offload; // whether to offload host tensor operations to device
366367
};
367368

368369
// model quantization parameters

scripts/compare-llama-bench.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ def get_rows(properties):
318318

319319
show = []
320320
# Show CPU and/or GPU by default even if the hardware for all results is the same:
321-
if "n_gpu_layers" not in properties_different:
321+
if rows_full and "n_gpu_layers" not in properties_different:
322322
ngl = int(rows_full[0][KEY_PROPERTIES.index("n_gpu_layers")])
323323

324324
if ngl != 99 and "cpu_info" not in properties_different:
@@ -338,6 +338,10 @@ def get_rows(properties):
338338
pass
339339
rows_show = get_rows(show)
340340

341+
if not rows_show:
342+
logger.error(f"No comparable data was found between {name_baseline} and {name_compare}.\n")
343+
sys.exit(1)
344+
341345
table = []
342346
for row in rows_show:
343347
n_prompt = int(row[-5])

src/llama-context.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ llama_context::llama_context(
9393
}
9494

9595
cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
96+
cparams.op_offload = params.op_offload;
9697

9798
const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max;
9899

@@ -243,7 +244,7 @@ llama_context::llama_context(
243244
}
244245
}
245246

246-
sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, pipeline_parallel));
247+
sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, pipeline_parallel, cparams.op_offload));
247248

248249
if (pipeline_parallel) {
249250
LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(sched.get()));
@@ -1871,6 +1872,7 @@ llama_context_params llama_context_default_params() {
18711872
/*.offload_kqv =*/ true,
18721873
/*.flash_attn =*/ false,
18731874
/*.no_perf =*/ true,
1875+
/*.op_offload =*/ true,
18741876
};
18751877

18761878
return result;

src/llama-cparams.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ struct llama_cparams {
3030
bool flash_attn;
3131
bool no_perf;
3232
bool warmup;
33+
bool op_offload;
3334

3435
enum llama_pooling_type pooling_type;
3536

tests/test-opt.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -853,7 +853,7 @@ int main(void) {
853853
backends_modded.insert(backends_modded.end(), backends.begin(), backends.end());
854854

855855
ggml_backend_sched_t backend_sched = ggml_backend_sched_new(
856-
backends_modded.data(), nullptr, backends_modded.size(), GGML_DEFAULT_GRAPH_SIZE, false);
856+
backends_modded.data(), nullptr, backends_modded.size(), GGML_DEFAULT_GRAPH_SIZE, false, true);
857857

858858
printf("Backend %zu/%zu: %s\n", i + 1, dev_count, ggml_backend_dev_name(devs[i]));
859859
printf(" Device description: %s\n", ggml_backend_dev_description(devs[i]));

0 commit comments

Comments
 (0)