Skip to content

Commit a37de23

Browse files
committed
minor: rename ctx as plan; const
1 parent b11ac01 commit a37de23

File tree

2 files changed

+32
-32
lines changed

2 files changed

+32
-32
lines changed

ggml.c

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -15941,13 +15941,13 @@ void clear_numa_thread_affinity(void) {}
1594115941
#endif
1594215942

1594315943
struct ggml_compute_state_shared {
15944-
struct ggml_cgraph * cgraph;
15945-
struct ggml_graph_compute_plan * cgraph_ctx;
15944+
const struct ggml_cgraph * cgraph;
15945+
const struct ggml_graph_compute_plan * plan;
1594615946

1594715947
int64_t perf_node_start_cycles;
1594815948
int64_t perf_node_start_time_us;
1594915949

15950-
int n_threads;
15950+
const int n_threads;
1595115951

1595215952
// synchronization primitives
1595315953
atomic_int n_active; // num active threads
@@ -15971,10 +15971,10 @@ static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const
1597115971

1597215972
static thread_ret_t ggml_graph_compute_thread(void * data) {
1597315973
struct ggml_compute_state * state = (struct ggml_compute_state *) data;
15974-
struct ggml_cgraph * cgraph = state->shared->cgraph;
15974+
const struct ggml_cgraph * cgraph = state->shared->cgraph;
1597515975

15976-
struct ggml_graph_compute_plan * ctx = state->shared->cgraph_ctx;
15977-
const int *n_tasks_arr = ctx->n_tasks;
15976+
const struct ggml_graph_compute_plan * plan = state->shared->plan;
15977+
const int *n_tasks_arr = plan->n_tasks;
1597815978

1597915979
const int n_threads = state->shared->n_threads;
1598015980
set_numa_thread_affinity(state->ith, n_threads);
@@ -15989,8 +15989,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
1598915989
/*.type =*/ GGML_TASK_FINALIZE,
1599015990
/*.ith =*/ 0,
1599115991
/*.nth =*/ 0,
15992-
/*.wsize =*/ ctx->work_size,
15993-
/*.wdata =*/ ctx->work_data,
15992+
/*.wsize =*/ plan->work_size,
15993+
/*.wdata =*/ plan->work_data,
1599415994
};
1599515995

1599615996
if (node_n != -1) {
@@ -16059,8 +16059,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
1605916059
/*.type =*/ GGML_TASK_COMPUTE,
1606016060
/*.ith =*/ state->ith,
1606116061
/*.nth =*/ n_tasks,
16062-
/*.wsize =*/ ctx->work_size,
16063-
/*.wdata =*/ ctx->work_data,
16062+
/*.wsize =*/ plan->work_size,
16063+
/*.wdata =*/ plan->work_data,
1606416064
};
1606516065

1606616066
if (state->ith < n_tasks) {
@@ -16077,9 +16077,9 @@ struct ggml_graph_compute_plan ggml_graph_compute_make_plan(struct ggml_cgraph *
1607716077
n_threads = GGML_DEFAULT_N_THREADS;
1607816078
}
1607916079

16080-
struct ggml_graph_compute_plan ctx;
16081-
memset(&ctx, 0, sizeof(struct ggml_graph_compute_plan));
16082-
int * n_tasks = ctx.n_tasks;
16080+
struct ggml_graph_compute_plan plan;
16081+
memset(&plan, 0, sizeof(struct ggml_graph_compute_plan));
16082+
int * n_tasks = plan.n_tasks;
1608316083
size_t work_size = 0;
1608416084

1608516085
// initialize tasks + work buffer
@@ -16403,35 +16403,35 @@ struct ggml_graph_compute_plan ggml_graph_compute_make_plan(struct ggml_cgraph *
1640316403
work_size += CACHE_LINE_SIZE*(n_threads - 1);
1640416404
}
1640516405

16406-
ctx.n_threads = n_threads;
16407-
ctx.work_size = work_size;
16408-
ctx.work_data = NULL;
16406+
plan.n_threads = n_threads;
16407+
plan.work_size = work_size;
16408+
plan.work_data = NULL;
1640916409

16410-
return ctx;
16410+
return plan;
1641116411
}
1641216412

16413-
void ggml_graph_compute(struct ggml_graph_compute_plan * ctx, struct ggml_cgraph * cgraph) {
16413+
void ggml_graph_compute(struct ggml_graph_compute_plan * plan, struct ggml_cgraph * cgraph) {
1641416414
{
16415-
GGML_ASSERT(ctx);
16416-
GGML_ASSERT(ctx->n_threads > 0);
16415+
GGML_ASSERT(plan);
16416+
GGML_ASSERT(plan->n_threads > 0);
1641716417

16418-
if (ctx->work_size > 0) {
16419-
GGML_ASSERT(ctx->work_data);
16418+
if (plan->work_size > 0) {
16419+
GGML_ASSERT(plan->work_data);
1642016420
}
1642116421

1642216422
for (int i = 0; i < cgraph->n_nodes; ++i) {
1642316423
if (cgraph->nodes[i]->op != GGML_OP_NONE) {
16424-
GGML_ASSERT(ctx->n_tasks[i] > 0);
16424+
GGML_ASSERT(plan->n_tasks[i] > 0);
1642516425
}
1642616426
}
1642716427

1642816428
}
1642916429

16430-
const int n_threads = ctx->n_threads;
16430+
const int n_threads = plan->n_threads;
1643116431

1643216432
struct ggml_compute_state_shared state_shared = {
1643316433
/*.cgraph =*/ cgraph,
16434-
/*.cgraph_ctx =*/ ctx,
16434+
/*.cgraph_plan =*/ plan,
1643516435
/*.perf_node_start_cycles =*/ 0,
1643616436
/*.perf_node_start_time_us =*/ 0,
1643716437
/*.n_threads =*/ n_threads,

ggml.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -66,14 +66,14 @@
6666
// ggml_set_f32(b, 4.0f);
6767
//
6868
// const int n_threads = 1;
69-
// struct ggml_graph_compute_plan ctx = ggml_graph_compute_make_plan(&gf, n_threads);
70-
// if (ctx.work_size > 0) {
71-
// ctx.work_data = malloc(ctx.work_size);
72-
// GGML_ASSERT(ctx.work_data);
69+
// struct ggml_graph_compute_plan plan = ggml_graph_compute_make_plan(&gf, n_threads);
70+
// if (plan.work_size > 0) {
71+
// plan.work_data = malloc(plan.work_size);
72+
// GGML_ASSERT(plan.work_data);
7373
// }
74-
// ggml_graph_compute(&ctx, &gf);
75-
// if (ctx.work_data) {
76-
// free(ctx.work_data);
74+
// ggml_graph_compute(&plan, &gf);
75+
// if (plan.work_data) {
76+
// free(plan.work_data);
7777
// }
7878
//
7979
// printf("f = %f\n", ggml_get_f32_1d(f, 0));

0 commit comments

Comments
 (0)