@@ -28,6 +28,58 @@ void cgroup_bpf_offline(struct cgroup *cgrp)
28
28
percpu_ref_kill (& cgrp -> bpf .refcnt );
29
29
}
30
30
31
+ static void bpf_cgroup_storages_free (struct bpf_cgroup_storage * storages [])
32
+ {
33
+ enum bpf_cgroup_storage_type stype ;
34
+
35
+ for_each_cgroup_storage_type (stype )
36
+ bpf_cgroup_storage_free (storages [stype ]);
37
+ }
38
+
39
+ static int bpf_cgroup_storages_alloc (struct bpf_cgroup_storage * storages [],
40
+ struct bpf_prog * prog )
41
+ {
42
+ enum bpf_cgroup_storage_type stype ;
43
+
44
+ for_each_cgroup_storage_type (stype ) {
45
+ storages [stype ] = bpf_cgroup_storage_alloc (prog , stype );
46
+ if (IS_ERR (storages [stype ])) {
47
+ storages [stype ] = NULL ;
48
+ bpf_cgroup_storages_free (storages );
49
+ return - ENOMEM ;
50
+ }
51
+ }
52
+
53
+ return 0 ;
54
+ }
55
+
56
+ static void bpf_cgroup_storages_assign (struct bpf_cgroup_storage * dst [],
57
+ struct bpf_cgroup_storage * src [])
58
+ {
59
+ enum bpf_cgroup_storage_type stype ;
60
+
61
+ for_each_cgroup_storage_type (stype )
62
+ dst [stype ] = src [stype ];
63
+ }
64
+
65
+ static void bpf_cgroup_storages_link (struct bpf_cgroup_storage * storages [],
66
+ struct cgroup * cgrp ,
67
+ enum bpf_attach_type attach_type )
68
+ {
69
+ enum bpf_cgroup_storage_type stype ;
70
+
71
+ for_each_cgroup_storage_type (stype )
72
+ bpf_cgroup_storage_link (storages [stype ], cgrp , attach_type );
73
+ }
74
+
75
+ static void bpf_cgroup_storages_unlink (struct bpf_cgroup_storage * storages [])
76
+ {
77
+ enum bpf_cgroup_storage_type stype ;
78
+
79
+ for_each_cgroup_storage_type (stype )
80
+ bpf_cgroup_storage_unlink (storages [stype ]);
81
+ }
82
+
31
83
/**
32
84
* cgroup_bpf_release() - put references of all bpf programs and
33
85
* release all cgroup bpf data
@@ -37,7 +89,6 @@ static void cgroup_bpf_release(struct work_struct *work)
37
89
{
38
90
struct cgroup * p , * cgrp = container_of (work , struct cgroup ,
39
91
bpf .release_work );
40
- enum bpf_cgroup_storage_type stype ;
41
92
struct bpf_prog_array * old_array ;
42
93
unsigned int type ;
43
94
@@ -50,10 +101,8 @@ static void cgroup_bpf_release(struct work_struct *work)
50
101
list_for_each_entry_safe (pl , tmp , progs , node ) {
51
102
list_del (& pl -> node );
52
103
bpf_prog_put (pl -> prog );
53
- for_each_cgroup_storage_type (stype ) {
54
- bpf_cgroup_storage_unlink (pl -> storage [stype ]);
55
- bpf_cgroup_storage_free (pl -> storage [stype ]);
56
- }
104
+ bpf_cgroup_storages_unlink (pl -> storage );
105
+ bpf_cgroup_storages_free (pl -> storage );
57
106
kfree (pl );
58
107
static_branch_dec (& cgroup_bpf_enabled_key );
59
108
}
@@ -138,7 +187,7 @@ static int compute_effective_progs(struct cgroup *cgrp,
138
187
enum bpf_attach_type type ,
139
188
struct bpf_prog_array * * array )
140
189
{
141
- enum bpf_cgroup_storage_type stype ;
190
+ struct bpf_prog_array_item * item ;
142
191
struct bpf_prog_array * progs ;
143
192
struct bpf_prog_list * pl ;
144
193
struct cgroup * p = cgrp ;
@@ -166,10 +215,10 @@ static int compute_effective_progs(struct cgroup *cgrp,
166
215
if (!pl -> prog )
167
216
continue ;
168
217
169
- progs -> items [cnt ]. prog = pl -> prog ;
170
- for_each_cgroup_storage_type ( stype )
171
- progs -> items [ cnt ]. cgroup_storage [ stype ] =
172
- pl -> storage [ stype ] ;
218
+ item = & progs -> items [cnt ];
219
+ item -> prog = pl -> prog ;
220
+ bpf_cgroup_storages_assign ( item -> cgroup_storage ,
221
+ pl -> storage ) ;
173
222
cnt ++ ;
174
223
}
175
224
} while ((p = cgroup_parent (p )));
@@ -305,7 +354,6 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
305
354
struct bpf_cgroup_storage * storage [MAX_BPF_CGROUP_STORAGE_TYPE ],
306
355
* old_storage [MAX_BPF_CGROUP_STORAGE_TYPE ] = {NULL };
307
356
struct bpf_prog_list * pl , * replace_pl = NULL ;
308
- enum bpf_cgroup_storage_type stype ;
309
357
int err ;
310
358
311
359
if (((flags & BPF_F_ALLOW_OVERRIDE ) && (flags & BPF_F_ALLOW_MULTI )) ||
@@ -341,65 +389,46 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
341
389
replace_pl = list_first_entry (progs , typeof (* pl ), node );
342
390
}
343
391
344
- for_each_cgroup_storage_type (stype ) {
345
- storage [stype ] = bpf_cgroup_storage_alloc (prog , stype );
346
- if (IS_ERR (storage [stype ])) {
347
- storage [stype ] = NULL ;
348
- for_each_cgroup_storage_type (stype )
349
- bpf_cgroup_storage_free (storage [stype ]);
350
- return - ENOMEM ;
351
- }
352
- }
392
+ if (bpf_cgroup_storages_alloc (storage , prog ))
393
+ return - ENOMEM ;
353
394
354
395
if (replace_pl ) {
355
396
pl = replace_pl ;
356
397
old_prog = pl -> prog ;
357
- for_each_cgroup_storage_type (stype ) {
358
- old_storage [stype ] = pl -> storage [stype ];
359
- bpf_cgroup_storage_unlink (old_storage [stype ]);
360
- }
398
+ bpf_cgroup_storages_unlink (pl -> storage );
399
+ bpf_cgroup_storages_assign (old_storage , pl -> storage );
361
400
} else {
362
401
pl = kmalloc (sizeof (* pl ), GFP_KERNEL );
363
402
if (!pl ) {
364
- for_each_cgroup_storage_type (stype )
365
- bpf_cgroup_storage_free (storage [stype ]);
403
+ bpf_cgroup_storages_free (storage );
366
404
return - ENOMEM ;
367
405
}
368
406
list_add_tail (& pl -> node , progs );
369
407
}
370
408
371
409
pl -> prog = prog ;
372
- for_each_cgroup_storage_type (stype )
373
- pl -> storage [stype ] = storage [stype ];
374
-
410
+ bpf_cgroup_storages_assign (pl -> storage , storage );
375
411
cgrp -> bpf .flags [type ] = saved_flags ;
376
412
377
413
err = update_effective_progs (cgrp , type );
378
414
if (err )
379
415
goto cleanup ;
380
416
381
417
static_branch_inc (& cgroup_bpf_enabled_key );
382
- for_each_cgroup_storage_type (stype ) {
383
- if (!old_storage [stype ])
384
- continue ;
385
- bpf_cgroup_storage_free (old_storage [stype ]);
386
- }
418
+ bpf_cgroup_storages_free (old_storage );
387
419
if (old_prog ) {
388
420
bpf_prog_put (old_prog );
389
421
static_branch_dec (& cgroup_bpf_enabled_key );
390
422
}
391
- for_each_cgroup_storage_type (stype )
392
- bpf_cgroup_storage_link (storage [stype ], cgrp , type );
423
+ bpf_cgroup_storages_link (storage , cgrp , type );
393
424
return 0 ;
394
425
395
426
cleanup :
396
427
/* and cleanup the prog list */
397
428
pl -> prog = old_prog ;
398
- for_each_cgroup_storage_type (stype ) {
399
- bpf_cgroup_storage_free (pl -> storage [stype ]);
400
- pl -> storage [stype ] = old_storage [stype ];
401
- bpf_cgroup_storage_link (old_storage [stype ], cgrp , type );
402
- }
429
+ bpf_cgroup_storages_free (pl -> storage );
430
+ bpf_cgroup_storages_assign (pl -> storage , old_storage );
431
+ bpf_cgroup_storages_link (pl -> storage , cgrp , type );
403
432
if (!replace_pl ) {
404
433
list_del (& pl -> node );
405
434
kfree (pl );
@@ -420,7 +449,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
420
449
enum bpf_attach_type type )
421
450
{
422
451
struct list_head * progs = & cgrp -> bpf .progs [type ];
423
- enum bpf_cgroup_storage_type stype ;
424
452
u32 flags = cgrp -> bpf .flags [type ];
425
453
struct bpf_prog * old_prog = NULL ;
426
454
struct bpf_prog_list * pl ;
@@ -467,10 +495,8 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
467
495
468
496
/* now can actually delete it from this cgroup list */
469
497
list_del (& pl -> node );
470
- for_each_cgroup_storage_type (stype ) {
471
- bpf_cgroup_storage_unlink (pl -> storage [stype ]);
472
- bpf_cgroup_storage_free (pl -> storage [stype ]);
473
- }
498
+ bpf_cgroup_storages_unlink (pl -> storage );
499
+ bpf_cgroup_storages_free (pl -> storage );
474
500
kfree (pl );
475
501
if (list_empty (progs ))
476
502
/* last program was detached, reset flags to zero */
0 commit comments