Skip to content

Commit e819070

Browse files
committed
sync : migrate examples and llama.cpp to dynamic graphs (wip)
1 parent aa7a2c4 commit e819070

File tree

10 files changed

+50
-41
lines changed

10 files changed

+50
-41
lines changed

common/train.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
#include "ggml.h"
1010
#include "llama.h"
1111

12+
#define LLAMA_TRAIN_MAX_NODES 16384
13+
1214
typedef std::string mt19937_state;
1315

1416
struct train_state {

examples/benchmark/benchmark-matmult.cpp

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,8 @@ int main(int argc, char ** argv) {
171171
struct ggml_tensor * m11xm2 = ggml_mul_mat(ctx, m11, m2);
172172

173173
// printf("Creating compute graph\n");
174-
struct ggml_cgraph gf = ggml_build_forward(m11xm2);
174+
struct ggml_cgraph * gf = ggml_new_graph(ctx);
175+
ggml_build_forward_expand(gf, m11xm2);
175176

176177
printf("n_threads=%i\n", benchmark_params.n_threads);
177178

@@ -180,9 +181,9 @@ int main(int argc, char ** argv) {
180181

181182
std::vector<uint8_t> work_buffer;
182183

183-
ggml_graph_compute_helper(work_buffer, &gf, benchmark_params.n_threads);
184+
ggml_graph_compute_helper(work_buffer, gf, benchmark_params.n_threads);
184185

185-
TENSOR_DUMP(gf.nodes[0]);
186+
TENSOR_DUMP(gf->nodes[0]);
186187

187188
printf("\n------ Test 2 - Matrix Mult via %s code\n", ggml_type_name(qtype));
188189

@@ -200,7 +201,8 @@ int main(int argc, char ** argv) {
200201
struct ggml_tensor * q31 = ggml_mul_mat(ctx, q11, m2);
201202

202203
// printf("Creating compute graph\n");
203-
struct ggml_cgraph gf31 = ggml_build_forward(q31);
204+
struct ggml_cgraph * gf31 = ggml_new_graph(ctx);
205+
ggml_build_forward_expand(gf31, q31);
204206

205207
// Set up a second graph computation to make sure we override the CPU cache lines
206208
// printf("Creating new tensor q12 & Running quantize\n");
@@ -211,7 +213,8 @@ int main(int argc, char ** argv) {
211213
struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2);
212214

213215
//printf("Creating compute graph\n");
214-
struct ggml_cgraph gf32 = ggml_build_forward(q32);
216+
struct ggml_cgraph * gf32 = ggml_new_graph(ctx);
217+
ggml_build_forward_expand(gf32, q32);
215218
printf("n_threads=%i\n", benchmark_params.n_threads);
216219

217220
const int dimx = sizex;
@@ -223,7 +226,7 @@ int main(int argc, char ** argv) {
223226

224227

225228
// Let's use the F32 result from above as a reference for the quantized multiplication
226-
float sum_of_F32_reference = tensor_sum_elements(gf.nodes[0]);
229+
float sum_of_F32_reference = tensor_sum_elements(gf->nodes[0]);
227230

228231
printf("Iteration;NThreads; SizeX; SizeY; SizeZ; Required_FLOPS; Elapsed_u_Seconds; gigaFLOPS\n");
229232
printf("=====================================================================================\n");
@@ -233,7 +236,7 @@ int main(int argc, char ** argv) {
233236

234237
long long int start = ggml_time_us();
235238
//printf("Running ggml_graph_compute\n");
236-
ggml_graph_compute_helper(work_buffer, &gf31, benchmark_params.n_threads);
239+
ggml_graph_compute_helper(work_buffer, gf31, benchmark_params.n_threads);
237240

238241
long long int stop = ggml_time_us();
239242
long long int usec = stop-start;
@@ -251,7 +254,7 @@ int main(int argc, char ** argv) {
251254

252255
// Check that the matrix multiplication result is in the right ballpark
253256
// We cannot use the exact value from the F32 multiplication because the quantizuation will be slightly different
254-
float sum_of_Q4_result = tensor_sum_elements(gf31.nodes[0]);
257+
float sum_of_Q4_result = tensor_sum_elements(gf31->nodes[0]);
255258
float delta = std::abs(sum_of_Q4_result - sum_of_F32_reference);
256259
float allowed_delta = (sum_of_F32_reference) / 1000 / 1000; // Let's accept an epsilon of 10^-6
257260

@@ -266,7 +269,7 @@ int main(int argc, char ** argv) {
266269
}
267270

268271
// Running a different graph computation to make sure we override the CPU cache lines
269-
ggml_graph_compute_helper(work_buffer, &gf32, benchmark_params.n_threads);
272+
ggml_graph_compute_helper(work_buffer, gf32, benchmark_params.n_threads);
270273
}
271274
printf("\n");
272275
printf("Average%78.2f\n",gflops_sum/((double)benchmark_params.n_iterations));

examples/export-lora/export-lora.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ static struct lora_data * load_lora(struct lora_info * info) {
240240
}
241241

242242
struct ggml_init_params params_ggml;
243-
params_ggml.mem_size = ggml_tensor_overhead() * GGML_MAX_NODES;
243+
params_ggml.mem_size = ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE;
244244
params_ggml.mem_buffer = NULL;
245245
params_ggml.no_alloc = true;
246246
result->ctx = ggml_init(params_ggml);
@@ -334,7 +334,7 @@ static bool apply_lora(struct ggml_tensor * tensor, struct lora_data * lora, int
334334
float scaling = lora->info.scale * (float)lora->lora_alpha / (float)lora->lora_r;
335335

336336
struct ggml_init_params params;
337-
params.mem_size = GGML_OBJECT_SIZE + GGML_GRAPH_SIZE + ggml_tensor_overhead()*4 + GGML_MEM_ALIGN*5;
337+
params.mem_size = GGML_OBJECT_SIZE + ggml_graph_overhead() + ggml_tensor_overhead()*4 + GGML_MEM_ALIGN*5;
338338
params.mem_buffer = NULL;
339339
params.no_alloc = true;
340340
struct ggml_context * ctx = NULL;

examples/finetune/finetune.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1742,8 +1742,8 @@ int main(int argc, char ** argv) {
17421742

17431743
// context for compute tensors without their data
17441744
size_t estimated_compute_size_wo_data = (
1745-
ggml_tensor_overhead()*GGML_MAX_NODES*2
1746-
+ (GGML_OBJECT_SIZE+GGML_GRAPH_SIZE)*(
1745+
ggml_tensor_overhead()*LLAMA_TRAIN_MAX_NODES*2
1746+
+ (GGML_OBJECT_SIZE+ggml_graph_overhead())*(
17471747
params.common.use_checkpointing ? 3 : 2
17481748
)
17491749
);

examples/llava/clip.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -664,7 +664,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
664664
// measure mem requirement and allocate
665665
{
666666
static const size_t tensor_alignment = 32;
667-
new_clip->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead());
667+
new_clip->buf_compute.resize(ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead());
668668
new_clip->alloc = ggml_allocr_new_measure(tensor_alignment);
669669
clip_image_f32_batch batch;
670670
batch.size = 1;

examples/metal/metal.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ int main(int argc, char ** argv) {
3434
struct ggml_context * ctx_data = NULL;
3535
struct ggml_context * ctx_eval = NULL;
3636

37-
struct ggml_cgraph gf = ggml_graph_import(fname_cgraph, &ctx_data, &ctx_eval);
37+
struct ggml_cgraph * gf = ggml_graph_import(fname_cgraph, &ctx_data, &ctx_eval);
3838

3939
// this allocates all Metal resources and memory buffers
4040
auto * ctx_metal = ggml_metal_init(1);
@@ -46,21 +46,21 @@ int main(int argc, char ** argv) {
4646

4747
// main
4848
{
49-
struct ggml_tensor * input = ggml_graph_get_tensor(&gf, "embd");
49+
struct ggml_tensor * input = ggml_graph_get_tensor(gf, "embd");
5050
*(int32_t *) input->data = 1; // BOS
5151

5252
ggml_metal_set_tensor(ctx_metal, input);
5353

5454
// warmup
55-
ggml_metal_graph_compute(ctx_metal, &gf);
55+
ggml_metal_graph_compute(ctx_metal, gf);
5656

5757
const int n_iter = 16;
5858

5959
const int64_t t0 = ggml_time_us();
6060

6161
// the actual inference happens here
6262
for (int i = 0; i < n_iter; ++i) {
63-
ggml_metal_graph_compute(ctx_metal, &gf);
63+
ggml_metal_graph_compute(ctx_metal, gf);
6464
}
6565

6666
const int64_t t1 = ggml_time_us();
@@ -70,7 +70,7 @@ int main(int argc, char ** argv) {
7070

7171
// debug output
7272
{
73-
struct ggml_tensor * logits = gf.nodes[gf.n_nodes - 1];
73+
struct ggml_tensor * logits = gf->nodes[gf->n_nodes - 1];
7474
ggml_metal_get_tensor(ctx_metal, logits);
7575

7676
float * ptr = (float *) ggml_get_data(logits);

examples/train-text-from-scratch/train-text-from-scratch.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1109,8 +1109,8 @@ int main(int argc, char ** argv) {
11091109

11101110
// context for compute tensors without their data
11111111
size_t estimated_compute_size_wo_data = (
1112-
ggml_tensor_overhead()*GGML_MAX_NODES*2
1113-
+ (GGML_OBJECT_SIZE+GGML_GRAPH_SIZE)*(
1112+
ggml_tensor_overhead()*LLAMA_TRAIN_MAX_NODES*2
1113+
+ (GGML_OBJECT_SIZE+ggml_graph_overhead())*(
11141114
params.common.use_checkpointing ? 3 : 2
11151115
)
11161116
);

ggml-metal.m

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#import "ggml-metal.h"
22

3+
#import "ggml-backend-impl.h"
34
#import "ggml.h"
45

56
#import <Foundation/Foundation.h>
@@ -23,7 +24,7 @@
2324

2425
#define UNUSED(x) (void)(x)
2526

26-
#define GGML_MAX_CONCUR (2*GGML_MAX_NODES)
27+
#define GGML_MAX_CONCUR (2*GGML_DEFAULT_GRAPH_SIZE)
2728

2829
struct ggml_metal_buffer {
2930
const char * name;
@@ -744,6 +745,20 @@ void ggml_metal_graph_compute(
744745
struct ggml_tensor * src1 = gf->nodes[i]->src[1];
745746
struct ggml_tensor * dst = gf->nodes[i];
746747

748+
switch (dst->op) {
749+
case GGML_OP_NONE:
750+
case GGML_OP_RESHAPE:
751+
case GGML_OP_VIEW:
752+
case GGML_OP_TRANSPOSE:
753+
case GGML_OP_PERMUTE:
754+
{
755+
// noop -> next node
756+
} continue;
757+
default:
758+
{
759+
} break;
760+
}
761+
747762
const int64_t ne00 = src0 ? src0->ne[0] : 0;
748763
const int64_t ne01 = src0 ? src0->ne[1] : 0;
749764
const int64_t ne02 = src0 ? src0->ne[2] : 0;
@@ -797,14 +812,6 @@ void ggml_metal_graph_compute(
797812
//}
798813

799814
switch (dst->op) {
800-
case GGML_OP_NONE:
801-
case GGML_OP_RESHAPE:
802-
case GGML_OP_VIEW:
803-
case GGML_OP_TRANSPOSE:
804-
case GGML_OP_PERMUTE:
805-
{
806-
// noop
807-
} break;
808815
case GGML_OP_CONCAT:
809816
{
810817
const int64_t nb = ne00;

ggml.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1733,9 +1733,6 @@ extern "C" {
17331733
GGML_API void ggml_build_forward_expand (struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
17341734
GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep);
17351735

1736-
GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
1737-
GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
1738-
17391736
// graph allocation in a context
17401737
GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false
17411738
GGML_API struct ggml_cgraph * ggml_new_graph_custom (struct ggml_context * ctx, size_t size, bool grads);

llama.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8559,7 +8559,7 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat
85598559
const size_t elt_size = ggml_element_size(kv_self.k);
85608560

85618561
ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
8562-
ggml_cgraph gf{};
8562+
ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
85638563

85648564
ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
85658565
std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
@@ -8577,9 +8577,9 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat
85778577
kv_head, n_embd, n_layer,
85788578
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
85798579

8580-
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
8581-
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d));
8582-
ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
8580+
ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k3d, kout3d));
8581+
ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v3d, vout3d));
8582+
ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1);
85838583

85848584
ggml_free(cpy_ctx);
85858585

@@ -8687,7 +8687,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
86878687
const size_t elt_size = ggml_element_size(kv_self.k);
86888688

86898689
ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
8690-
ggml_cgraph gf{};
8690+
ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
86918691

86928692
ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
86938693
kin3d->data = (void *) inp;
@@ -8705,9 +8705,9 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
87058705
kv_head, n_embd, n_layer,
87068706
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
87078707

8708-
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
8709-
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d));
8710-
ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
8708+
ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin3d, k3d));
8709+
ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin3d, v3d));
8710+
ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1);
87118711

87128712
ggml_free(cpy_ctx);
87138713
}

0 commit comments

Comments
 (0)