Skip to content

Commit 284e665

Browse files
author
Lorenzo Toniazzi
committed
Clean up
1 parent 1734f3f commit 284e665

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

llama.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3986,7 +3986,7 @@ struct llama_model_loader {
39863986
return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size));
39873987
}));
39883988
}
3989-
// TODO LORA allocation of base tensors
3989+
39903990
GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
39913991
if (buf_mmap && cur->data == nullptr) {
39923992
ggml_backend_tensor_alloc(buf_mmap, cur, data);
@@ -5427,7 +5427,7 @@ static bool llm_load_tensors(
54275427
auto ctx_for_layer_split = [&](int i) { return ctx_map.at(model.buft_layer[i].buft_matrix); };
54285428

54295429
model.layers.resize(n_layer);
5430-
// main players model, ml, ctx_input/output, tn (gets name?)
5430+
54315431
const auto tn = LLM_TN(model.arch);
54325432
switch (model.arch) {
54335433
case LLM_ARCH_LLAMA:
@@ -6701,7 +6701,7 @@ static bool llm_load_tensors(
67016701
#endif
67026702
}
67036703
}
6704-
#ifdef GGML_USE_METAL // LORA Use metal on base tensors
6704+
#ifdef GGML_USE_METAL
67056705
else if (ml.use_mmap && use_mmap_buffer && buft == ggml_backend_metal_buffer_type()) {
67066706
for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
67076707
const size_t max_size = ggml_get_max_tensor_size(ctx);

0 commit comments

Comments
 (0)