Skip to content

Commit 5ea66f4

Browse files
slarenggerganov
authored andcommitted
fixes
1 parent 4ef1b01 commit 5ea66f4

File tree

5 files changed

+40
-22
lines changed

5 files changed

+40
-22
lines changed

ggml/include/ggml-backend.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,8 @@ extern "C" {
127127
bool async;
128128
// pinned host buffer
129129
bool host_buffer;
130+
// creating buffers from host ptr
131+
bool buffer_from_host_ptr;
130132
// event synchronization
131133
bool events;
132134
};

ggml/src/ggml-backend.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -463,6 +463,7 @@ enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) {
463463
}
464464

465465
void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) {
466+
memset(props, 0, sizeof(*props));
466467
device->iface.get_props(device, props);
467468
}
468469

@@ -1129,9 +1130,10 @@ static void ggml_backend_cpu_device_get_props(ggml_backend_dev_t dev, struct ggm
11291130
props->type = ggml_backend_cpu_device_get_type(dev);
11301131
ggml_backend_cpu_device_get_memory(dev, &props->memory_free, &props->memory_total);
11311132
props->caps = {
1132-
/* async */ false,
1133-
/* host_buffer */ false,
1134-
/* events */ false,
1133+
/* .async = */ false,
1134+
/* .host_buffer = */ false,
1135+
/* .buffer_from_host_ptr = */ true,
1136+
/* .events = */ false,
11351137
};
11361138
}
11371139

ggml/src/ggml-cuda.cu

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2920,9 +2920,10 @@ static void ggml_backend_cuda_device_get_props(ggml_backend_dev_t dev, ggml_back
29202920
#endif
29212921

29222922
props->caps = {
2923-
/* async */ true,
2924-
/* host_buffer */ host_buffer,
2925-
/* events */ events,
2923+
/* .async = */ true,
2924+
/* .host_buffer = */ host_buffer,
2925+
/* .buffer_from_host_ptr = */ false,
2926+
/* .events = */ events,
29262927
};
29272928
}
29282929

ggml/src/ggml-metal.m

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3567,12 +3567,14 @@ void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
35673567
}
35683568

35693569
static void ggml_backend_metal_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
3570-
// TODO
3571-
*free = 0;
3572-
*total = 0;
3573-
35743570
if (@available(macOS 10.12, iOS 16.0, *)) {
3575-
*total = g_state.mtl_device.recommendedMaxWorkingSetSize;
3571+
id<MTLDevice> device = ggml_backend_metal_get_device();
3572+
*total = device.recommendedMaxWorkingSetSize;
3573+
*free = *total - device.currentAllocatedSize;
3574+
ggml_backend_metal_free_device();
3575+
} else {
3576+
*free = 1;
3577+
*total = 1;
35763578
}
35773579

35783580
GGML_UNUSED(dev);
@@ -3590,9 +3592,10 @@ static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, struct g
35903592
props->type = ggml_backend_metal_device_get_type(dev);
35913593
ggml_backend_metal_device_get_memory(dev, &props->memory_free, &props->memory_total);
35923594
props->caps = (struct ggml_backend_dev_caps) {
3593-
/* async */ false,
3594-
/* host_buffer */ false,
3595-
/* events */ false,
3595+
/* .async = */ false,
3596+
/* .host_buffer = */ false,
3597+
/* .buffer_from_host_ptr = */ true,
3598+
/* .events = */ false,
35963599
};
35973600
}
35983601

src/llama.cpp

Lines changed: 18 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8907,20 +8907,30 @@ static bool llm_load_tensors(
89078907
llama_buf_map bufs;
89088908
bufs.reserve(n_max_backend_buffer);
89098909

8910-
// only the mmap region containing the tensors in the model is mapped to the backend buffer
8911-
// this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
8912-
// this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
8913-
if (ml.use_mmap && use_mmap_buffer && buft == llama_default_buffer_type_cpu(model, true)) {
8910+
// check if this backend device supports buffer_from_host_ptr
8911+
ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
8912+
bool buffer_from_host_ptr_supported = false;
8913+
if (dev) {
8914+
ggml_backend_dev_props props;
8915+
ggml_backend_dev_get_props(dev, &props);
8916+
buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
8917+
}
8918+
8919+
if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported) {
89148920
for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
8921+
// only the mmap region containing the tensors in the model is mapped to the backend buffer
8922+
// this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
8923+
// this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
89158924
void * addr = nullptr;
8916-
size_t first, last;
8925+
size_t first, last; // NOLINT
89178926
ml.get_mapping_range(&first, &last, &addr, idx, ctx);
89188927
if (first >= last) {
89198928
continue;
89208929
}
8921-
ggml_backend_buffer_t buf = ggml_backend_cpu_buffer_from_ptr((char *) addr + first, last - first);
8930+
const size_t max_size = ggml_get_max_tensor_size(ctx);
8931+
ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
89228932
if (buf == nullptr) {
8923-
throw std::runtime_error("unable to allocate backend CPU buffer");
8933+
throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
89248934
}
89258935
model.bufs.push_back(buf);
89268936
bufs.emplace(idx, buf);
@@ -8929,7 +8939,7 @@ static bool llm_load_tensors(
89298939
else {
89308940
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
89318941
if (buf == nullptr) {
8932-
throw std::runtime_error("unable to allocate backend buffer");
8942+
throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
89338943
}
89348944
model.bufs.push_back(buf);
89358945
if (use_mlock && ggml_backend_buffer_is_host(buf)) {

0 commit comments

Comments
 (0)