Skip to content

Commit 85679d3

Browse files
authored
llama : improve output buffer type selection (#10098)
1 parent 1e9f949 commit 85679d3

File tree

1 file changed

+4
-12
lines changed

1 file changed

+4
-12
lines changed

src/llama.cpp

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -17162,18 +17162,10 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
1716217162

1716317163
auto * buft = ggml_backend_cpu_buffer_type();
1716417164
// try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory
17165-
ggml_tensor * output_tensor = lctx.model.output;
17166-
if (!output_tensor) {
17167-
// bert models don't have an output tensor, use the last layer
17168-
output_tensor = lctx.model.layers.back().layer_out_norm;
17169-
}
17170-
if (output_tensor) {
17171-
auto * output_buft = ggml_backend_buffer_get_type(output_tensor->buffer);
17172-
auto * output_dev = ggml_backend_buft_get_device(output_buft);
17173-
auto * output_dev_host_buft = ggml_backend_dev_host_buffer_type(output_dev);
17174-
if (output_dev_host_buft) {
17175-
buft = output_dev_host_buft;
17176-
}
17165+
auto * output_dev = lctx.model.dev_output.dev;
17166+
auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr;
17167+
if (output_dev_host_buft) {
17168+
buft = output_dev_host_buft;
1717717169
}
1717817170
lctx.buf_output = ggml_backend_buft_alloc_buffer(buft, new_size);
1717917171
if (lctx.buf_output == nullptr) {

0 commit comments

Comments
 (0)