Skip to content

Commit feaa384

Browse files
committed
improve graph build time
1 parent 70d26ac commit feaa384

File tree

4 files changed

+38
-22
lines changed

4 files changed

+38
-22
lines changed

examples/train-text-from-scratch/train-text-from-scratch.cpp

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1342,17 +1342,10 @@ struct ggml_tensor * forward_batch_wo_cache_flash_attn(
13421342
// expand the graph nodes without creating leafs.
13431343
struct ggml_tensor * expand(struct ggml_cgraph * g, struct ggml_tensor * t) {
13441344
// check if already visited
1345-
for (int i = 0; i < g->n_nodes; i++) {
1346-
if (g->nodes[i] == t) {
1347-
return t;
1348-
}
1349-
}
1350-
1351-
for (int i = 0; i < g->n_leafs; i++) {
1352-
if (g->leafs[i] == t) {
1353-
return t;
1354-
}
1345+
if (t->visited) {
1346+
return t;
13551347
}
1348+
t->visited = true;
13561349

13571350
for (int i = 0; i < GGML_MAX_SRC; ++i) {
13581351
if (t->src[i]) {

ggml.c

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4594,6 +4594,7 @@ struct ggml_tensor * ggml_new_tensor_impl(
45944594
/*.is_param =*/ false,
45954595
/*.grad =*/ NULL,
45964596
/*.src =*/ { NULL },
4597+
/*.visited =*/ false,
45974598
/*.perf_runs =*/ 0,
45984599
/*.perf_cycles =*/ 0,
45994600
/*.perf_time_us =*/ 0,
@@ -15752,17 +15753,11 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor *
1575215753
}
1575315754

1575415755
// check if already visited
15755-
for (int i = 0; i < cgraph->n_nodes; i++) {
15756-
if (cgraph->nodes[i] == node) {
15757-
return;
15758-
}
15759-
}
15760-
15761-
for (int i = 0; i < cgraph->n_leafs; i++) {
15762-
if (cgraph->leafs[i] == node) {
15763-
return;
15764-
}
15756+
if (node->visited) {
15757+
GGML_ASSERT(cgraph->n_nodes > 0 || cgraph->n_leafs > 0); // to fix this, call ggml_graph_close() after building the graph
15758+
return;
1576515759
}
15760+
node->visited = true;
1576615761

1576715762
for (int i = 0; i < GGML_MAX_SRC; ++i) {
1576815763
if (node->src[i]) {
@@ -15814,13 +15809,28 @@ static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_ten
1581415809
}
1581515810

1581615811
void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) {
15812+
GGML_ASSERT(!cgraph->closed && "graph is closed");
1581715813
ggml_build_forward_impl(cgraph, tensor, true);
1581815814
}
1581915815

15816+
void ggml_graph_close(struct ggml_cgraph * cgraph) {
15817+
if (cgraph->closed) {
15818+
return;
15819+
}
15820+
for (int i = 0; i < cgraph->n_nodes; ++i) {
15821+
cgraph->nodes[i]->visited = false;
15822+
}
15823+
for (int i = 0; i < cgraph->n_leafs; ++i) {
15824+
cgraph->leafs[i]->visited = false;
15825+
}
15826+
cgraph->closed = true;
15827+
}
15828+
1582015829
struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
1582115830
struct ggml_cgraph result = {
1582215831
/*.n_nodes =*/ 0,
1582315832
/*.n_leafs =*/ 0,
15833+
/*.closed =*/ false,
1582415834
/*.nodes =*/ { NULL },
1582515835
/*.grads =*/ { NULL },
1582615836
/*.leafs =*/ { NULL },
@@ -15865,7 +15875,7 @@ struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cg
1586515875

1586615876
if (node->is_param) {
1586715877
GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node);
15868-
ggml_build_forward_impl(&result, node->grad, true);
15878+
ggml_build_forward_expand(&result, node->grad);
1586915879
}
1587015880
}
1587115881

@@ -16135,6 +16145,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
1613516145
}
1613616146

1613716147
struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
16148+
ggml_graph_close(cgraph);
16149+
1613816150
if (n_threads <= 0) {
1613916151
n_threads = GGML_DEFAULT_N_THREADS;
1614016152
}

ggml.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -427,6 +427,8 @@ extern "C" {
427427
struct ggml_tensor * grad;
428428
struct ggml_tensor * src[GGML_MAX_SRC];
429429

430+
bool visited; // used to build graphs
431+
430432
// performance
431433
int perf_runs;
432434
int64_t perf_cycles;
@@ -438,7 +440,7 @@ extern "C" {
438440

439441
void * extra; // extra things e.g. for ggml-cuda.cu
440442

441-
char padding[8];
443+
char padding[4];
442444
};
443445

444446
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
@@ -463,6 +465,7 @@ extern "C" {
463465
struct ggml_cgraph {
464466
int n_nodes;
465467
int n_leafs;
468+
bool closed;
466469

467470
struct ggml_tensor * nodes[GGML_MAX_NODES];
468471
struct ggml_tensor * grads[GGML_MAX_NODES];
@@ -1349,6 +1352,11 @@ extern "C" {
13491352

13501353
GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
13511354

1355+
// resets the visited flag for all the tensors in the graph
1356+
// called by ggml_graph_plan()
1357+
// shouldn't be necessary to call manually except building when building multiple graphs without computing them
1358+
GGML_API void ggml_graph_close(struct ggml_cgraph * cgraph);
1359+
13521360
GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
13531361
GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
13541362

llama.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1701,6 +1701,8 @@ static bool llama_eval_internal(
17011701
// logits -> probs
17021702
//cur = ggml_soft_max_inplace(ctx0, cur);
17031703

1704+
//fprintf(stderr, "graph build time: %.3f ms\n", (ggml_time_us() - t_start_us)/1000.0);
1705+
17041706
// run the computation
17051707
ggml_build_forward_expand(&gf, cur);
17061708

@@ -1710,6 +1712,7 @@ static bool llama_eval_internal(
17101712

17111713
#ifdef GGML_USE_METAL
17121714
if (lctx.ctx_metal && N == 1) {
1715+
ggml_graph_close(&gf); // should only be required for the Metal backend, as ggml_graph_plan() does this automatically
17131716
ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
17141717
ggml_metal_graph_compute(lctx.ctx_metal, &gf);
17151718
ggml_metal_get_tensor (lctx.ctx_metal, cur);

0 commit comments

Comments
 (0)