|
23 | 23 | #include <linux/page-flags.h>
|
24 | 24 |
|
25 | 25 | struct mem_cgroup;
|
| 26 | +struct obj_cgroup; |
26 | 27 | struct page;
|
27 | 28 | struct mm_struct;
|
28 | 29 | struct kmem_cache;
|
@@ -192,6 +193,22 @@ struct memcg_cgwb_frn {
|
192 | 193 | struct wb_completion done; /* tracks in-flight foreign writebacks */
|
193 | 194 | };
|
194 | 195 |
|
| 196 | +/* |
| 197 | + * Bucket for arbitrarily byte-sized objects charged to a memory |
| 198 | + * cgroup. The bucket can be reparented in one piece when the cgroup |
| 199 | + * is destroyed, without having to round up the individual references |
| 200 | + * of all live memory objects in the wild. |
| 201 | + */ |
| 202 | +struct obj_cgroup { |
| 203 | + struct percpu_ref refcnt; |
| 204 | + struct mem_cgroup *memcg; |
| 205 | + atomic_t nr_charged_bytes; |
| 206 | + union { |
| 207 | + struct list_head list; |
| 208 | + struct rcu_head rcu; |
| 209 | + }; |
| 210 | +}; |
| 211 | + |
195 | 212 | /*
|
196 | 213 | * The memory controller data structure. The memory controller controls both
|
197 | 214 | * page cache and RSS per cgroup. We would eventually like to provide
|
@@ -301,6 +318,8 @@ struct mem_cgroup {
|
301 | 318 | int kmemcg_id;
|
302 | 319 | enum memcg_kmem_state kmem_state;
|
303 | 320 | struct list_head kmem_caches;
|
| 321 | + struct obj_cgroup __rcu *objcg; |
| 322 | + struct list_head objcg_list; /* list of inherited objcgs */ |
304 | 323 | #endif
|
305 | 324 |
|
306 | 325 | #ifdef CONFIG_CGROUP_WRITEBACK
|
@@ -416,6 +435,33 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
|
416 | 435 | return css ? container_of(css, struct mem_cgroup, css) : NULL;
|
417 | 436 | }
|
418 | 437 |
|
| 438 | +static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) |
| 439 | +{ |
| 440 | + return percpu_ref_tryget(&objcg->refcnt); |
| 441 | +} |
| 442 | + |
| 443 | +static inline void obj_cgroup_get(struct obj_cgroup *objcg) |
| 444 | +{ |
| 445 | + percpu_ref_get(&objcg->refcnt); |
| 446 | +} |
| 447 | + |
| 448 | +static inline void obj_cgroup_put(struct obj_cgroup *objcg) |
| 449 | +{ |
| 450 | + percpu_ref_put(&objcg->refcnt); |
| 451 | +} |
| 452 | + |
| 453 | +/* |
| 454 | + * After the initialization objcg->memcg is always pointing at |
| 455 | + * a valid memcg, but can be atomically swapped to the parent memcg. |
| 456 | + * |
| 457 | + * The caller must ensure that the returned memcg won't be released: |
| 458 | + * e.g. acquire the rcu_read_lock or css_set_lock. |
| 459 | + */ |
| 460 | +static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) |
| 461 | +{ |
| 462 | + return READ_ONCE(objcg->memcg); |
| 463 | +} |
| 464 | + |
419 | 465 | static inline void mem_cgroup_put(struct mem_cgroup *memcg)
|
420 | 466 | {
|
421 | 467 | if (memcg)
|
@@ -1368,6 +1414,11 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
|
1368 | 1414 | int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
|
1369 | 1415 | void __memcg_kmem_uncharge_page(struct page *page, int order);
|
1370 | 1416 |
|
| 1417 | +struct obj_cgroup *get_obj_cgroup_from_current(void); |
| 1418 | + |
| 1419 | +int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); |
| 1420 | +void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); |
| 1421 | + |
1371 | 1422 | extern struct static_key_false memcg_kmem_enabled_key;
|
1372 | 1423 | extern struct workqueue_struct *memcg_kmem_cache_wq;
|
1373 | 1424 |
|
|
0 commit comments