Skip to content

Commit 748922d

Browse files
inwardvesselhtejun
authored andcommitted
cgroup: use subsystem-specific rstat locks to avoid contention
It is possible to eliminate contention between subsystems when updating/flushing stats by using subsystem-specific locks. Let the existing rstat locks be dedicated to the cgroup base stats and rename them to reflect that. Add similar locks to the cgroup_subsys struct for use with individual subsystems. Lock initialization is done in the new function ss_rstat_init(ss) which replaces cgroup_rstat_boot(void). If NULL is passed to this function, the global base stat locks will be initialized. Otherwise, the subsystem locks will be initialized. Change the existing lock helper functions to accept a reference to a css. Then within these functions, conditionally select the appropriate locks based on the subsystem affiliation of the given css. Add helper functions for this selection routine to avoid repeated code. Signed-off-by: JP Kobryn <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent 5da3bfa commit 748922d

File tree

6 files changed

+91
-38
lines changed

6 files changed

+91
-38
lines changed

block/blk-cgroup.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1074,8 +1074,8 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
10741074
/*
10751075
* For covering concurrent parent blkg update from blkg_release().
10761076
*
1077-
* When flushing from cgroup, cgroup_rstat_lock is always held, so
1078-
* this lock won't cause contention most of time.
1077+
* When flushing from cgroup, the subsystem rstat lock is always held,
1078+
* so this lock won't cause contention most of time.
10791079
*/
10801080
raw_spin_lock_irqsave(&blkg_stat_lock, flags);
10811081

include/linux/cgroup-defs.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,10 @@ struct cgroup_subsys_state {
222222
/*
223223
* A singly-linked list of css structures to be rstat flushed.
224224
* This is a scratch field to be used exclusively by
225-
* css_rstat_flush() and protected by cgroup_rstat_lock.
225+
* css_rstat_flush().
226+
*
227+
* Protected by rstat_base_lock when css is cgroup::self.
228+
* Protected by css->ss->rstat_ss_lock otherwise.
226229
*/
227230
struct cgroup_subsys_state *rstat_flush_next;
228231
};
@@ -362,7 +365,7 @@ struct css_rstat_cpu {
362365
* the css makes it unnecessary for each per-cpu struct to point back
363366
* to the associated css.
364367
*
365-
* Protected by per-cpu cgroup_rstat_cpu_lock.
368+
* Protected by per-cpu css->ss->rstat_ss_cpu_lock.
366369
*/
367370
struct cgroup_subsys_state *updated_children;
368371
struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
@@ -792,6 +795,9 @@ struct cgroup_subsys {
792795
* specifies the mask of subsystems that this one depends on.
793796
*/
794797
unsigned int depends_on;
798+
799+
spinlock_t rstat_ss_lock;
800+
raw_spinlock_t __percpu *rstat_ss_cpu_lock;
795801
};
796802

797803
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;

include/trace/events/cgroup.h

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,11 @@ DECLARE_EVENT_CLASS(cgroup_rstat,
231231
__entry->cpu, __entry->contended)
232232
);
233233

234-
/* Related to global: cgroup_rstat_lock */
234+
/*
235+
* Related to locks:
236+
* global rstat_base_lock for base stats
237+
* cgroup_subsys::rstat_ss_lock for subsystem stats
238+
*/
235239
DEFINE_EVENT(cgroup_rstat, cgroup_rstat_lock_contended,
236240

237241
TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
@@ -253,7 +257,11 @@ DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock,
253257
TP_ARGS(cgrp, cpu, contended)
254258
);
255259

256-
/* Related to per CPU: cgroup_rstat_cpu_lock */
260+
/*
261+
* Related to per CPU locks:
262+
* global rstat_base_cpu_lock for base stats
263+
* cgroup_subsys::rstat_ss_cpu_lock for subsystem stats
264+
*/
257265
DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended,
258266

259267
TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),

kernel/cgroup/cgroup-internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -272,7 +272,7 @@ int cgroup_task_count(const struct cgroup *cgrp);
272272
*/
273273
int css_rstat_init(struct cgroup_subsys_state *css);
274274
void css_rstat_exit(struct cgroup_subsys_state *css);
275-
void cgroup_rstat_boot(void);
275+
int ss_rstat_init(struct cgroup_subsys *ss);
276276
void cgroup_base_stat_cputime_show(struct seq_file *seq);
277277

278278
/*

kernel/cgroup/cgroup.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6088,6 +6088,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
60886088
css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
60896089
BUG_ON(css->id < 0);
60906090

6091+
BUG_ON(ss_rstat_init(ss));
60916092
BUG_ON(css_rstat_init(css));
60926093
}
60936094

@@ -6167,7 +6168,7 @@ int __init cgroup_init(void)
61676168
BUG_ON(cgroup_init_cftypes(NULL, cgroup_psi_files));
61686169
BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
61696170

6170-
cgroup_rstat_boot();
6171+
BUG_ON(ss_rstat_init(NULL));
61716172

61726173
get_user_ns(init_cgroup_ns.user_ns);
61736174

kernel/cgroup/rstat.c

Lines changed: 68 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99

1010
#include <trace/events/cgroup.h>
1111

12-
static DEFINE_SPINLOCK(cgroup_rstat_lock);
13-
static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
12+
static DEFINE_SPINLOCK(rstat_base_lock);
13+
static DEFINE_PER_CPU(raw_spinlock_t, rstat_base_cpu_lock);
1414

1515
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
1616

@@ -26,30 +26,48 @@ static struct cgroup_rstat_base_cpu *cgroup_rstat_base_cpu(
2626
return per_cpu_ptr(cgrp->rstat_base_cpu, cpu);
2727
}
2828

29+
static spinlock_t *ss_rstat_lock(struct cgroup_subsys *ss)
30+
{
31+
if (ss)
32+
return &ss->rstat_ss_lock;
33+
34+
return &rstat_base_lock;
35+
}
36+
37+
static raw_spinlock_t *ss_rstat_cpu_lock(struct cgroup_subsys *ss, int cpu)
38+
{
39+
if (ss)
40+
return per_cpu_ptr(ss->rstat_ss_cpu_lock, cpu);
41+
42+
return per_cpu_ptr(&rstat_base_cpu_lock, cpu);
43+
}
44+
2945
/*
30-
* Helper functions for rstat per CPU lock (cgroup_rstat_cpu_lock).
46+
* Helper functions for rstat per CPU locks.
3147
*
3248
* This makes it easier to diagnose locking issues and contention in
3349
* production environments. The parameter @fast_path determine the
3450
* tracepoints being added, allowing us to diagnose "flush" related
3551
* operations without handling high-frequency fast-path "update" events.
3652
*/
3753
static __always_inline
38-
unsigned long _css_rstat_cpu_lock(raw_spinlock_t *cpu_lock, int cpu,
39-
struct cgroup_subsys_state *css, const bool fast_path)
54+
unsigned long _css_rstat_cpu_lock(struct cgroup_subsys_state *css, int cpu,
55+
const bool fast_path)
4056
{
4157
struct cgroup *cgrp = css->cgroup;
58+
raw_spinlock_t *cpu_lock;
4259
unsigned long flags;
4360
bool contended;
4461

4562
/*
46-
* The _irqsave() is needed because cgroup_rstat_lock is
47-
* spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
48-
* this lock with the _irq() suffix only disables interrupts on
49-
* a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
50-
* interrupts on both configurations. The _irqsave() ensures
51-
* that interrupts are always disabled and later restored.
63+
* The _irqsave() is needed because the locks used for flushing are
64+
* spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring this lock
65+
* with the _irq() suffix only disables interrupts on a non-PREEMPT_RT
66+
* kernel. The raw_spinlock_t below disables interrupts on both
67+
* configurations. The _irqsave() ensures that interrupts are always
68+
* disabled and later restored.
5269
*/
70+
cpu_lock = ss_rstat_cpu_lock(css->ss, cpu);
5371
contended = !raw_spin_trylock_irqsave(cpu_lock, flags);
5472
if (contended) {
5573
if (fast_path)
@@ -69,17 +87,18 @@ unsigned long _css_rstat_cpu_lock(raw_spinlock_t *cpu_lock, int cpu,
6987
}
7088

7189
static __always_inline
72-
void _css_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu,
73-
struct cgroup_subsys_state *css, unsigned long flags,
74-
const bool fast_path)
90+
void _css_rstat_cpu_unlock(struct cgroup_subsys_state *css, int cpu,
91+
unsigned long flags, const bool fast_path)
7592
{
7693
struct cgroup *cgrp = css->cgroup;
94+
raw_spinlock_t *cpu_lock;
7795

7896
if (fast_path)
7997
trace_cgroup_rstat_cpu_unlock_fastpath(cgrp, cpu, false);
8098
else
8199
trace_cgroup_rstat_cpu_unlock(cgrp, cpu, false);
82100

101+
cpu_lock = ss_rstat_cpu_lock(css->ss, cpu);
83102
raw_spin_unlock_irqrestore(cpu_lock, flags);
84103
}
85104

@@ -94,7 +113,6 @@ void _css_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu,
94113
*/
95114
__bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)
96115
{
97-
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
98116
unsigned long flags;
99117

100118
/*
@@ -115,7 +133,7 @@ __bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)
115133
if (data_race(css_rstat_cpu(css, cpu)->updated_next))
116134
return;
117135

118-
flags = _css_rstat_cpu_lock(cpu_lock, cpu, css, true);
136+
flags = _css_rstat_cpu_lock(css, cpu, true);
119137

120138
/* put @css and all ancestors on the corresponding updated lists */
121139
while (true) {
@@ -143,7 +161,7 @@ __bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)
143161
css = parent;
144162
}
145163

146-
_css_rstat_cpu_unlock(cpu_lock, cpu, css, flags, true);
164+
_css_rstat_cpu_unlock(css, cpu, flags, true);
147165
}
148166

149167
/**
@@ -171,11 +189,11 @@ static struct cgroup_subsys_state *css_rstat_push_children(
171189
child->rstat_flush_next = NULL;
172190

173191
/*
174-
* The cgroup_rstat_lock must be held for the whole duration from
192+
* The subsystem rstat lock must be held for the whole duration from
175193
* here as the rstat_flush_next list is being constructed to when
176194
* it is consumed later in css_rstat_flush().
177195
*/
178-
lockdep_assert_held(&cgroup_rstat_lock);
196+
lockdep_assert_held(ss_rstat_lock(head->ss));
179197

180198
/*
181199
* Notation: -> updated_next pointer
@@ -245,12 +263,11 @@ static struct cgroup_subsys_state *css_rstat_push_children(
245263
static struct cgroup_subsys_state *css_rstat_updated_list(
246264
struct cgroup_subsys_state *root, int cpu)
247265
{
248-
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
249266
struct css_rstat_cpu *rstatc = css_rstat_cpu(root, cpu);
250267
struct cgroup_subsys_state *head = NULL, *parent, *child;
251268
unsigned long flags;
252269

253-
flags = _css_rstat_cpu_lock(cpu_lock, cpu, root, false);
270+
flags = _css_rstat_cpu_lock(root, cpu, false);
254271

255272
/* Return NULL if this subtree is not on-list */
256273
if (!rstatc->updated_next)
@@ -287,7 +304,7 @@ static struct cgroup_subsys_state *css_rstat_updated_list(
287304
if (child != root)
288305
head = css_rstat_push_children(head, child, cpu);
289306
unlock_ret:
290-
_css_rstat_cpu_unlock(cpu_lock, cpu, root, flags, false);
307+
_css_rstat_cpu_unlock(root, cpu, flags, false);
291308
return head;
292309
}
293310

@@ -314,7 +331,7 @@ __weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
314331
__bpf_hook_end();
315332

316333
/*
317-
* Helper functions for locking cgroup_rstat_lock.
334+
* Helper functions for locking.
318335
*
319336
* This makes it easier to diagnose locking issues and contention in
320337
* production environments. The parameter @cpu_in_loop indicate lock
@@ -324,27 +341,31 @@ __bpf_hook_end();
324341
*/
325342
static inline void __css_rstat_lock(struct cgroup_subsys_state *css,
326343
int cpu_in_loop)
327-
__acquires(&cgroup_rstat_lock)
344+
__acquires(ss_rstat_lock(css->ss))
328345
{
329346
struct cgroup *cgrp = css->cgroup;
347+
spinlock_t *lock;
330348
bool contended;
331349

332-
contended = !spin_trylock_irq(&cgroup_rstat_lock);
350+
lock = ss_rstat_lock(css->ss);
351+
contended = !spin_trylock_irq(lock);
333352
if (contended) {
334353
trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended);
335-
spin_lock_irq(&cgroup_rstat_lock);
354+
spin_lock_irq(lock);
336355
}
337356
trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended);
338357
}
339358

340359
static inline void __css_rstat_unlock(struct cgroup_subsys_state *css,
341360
int cpu_in_loop)
342-
__releases(&cgroup_rstat_lock)
361+
__releases(ss_rstat_lock(css->ss))
343362
{
344363
struct cgroup *cgrp = css->cgroup;
364+
spinlock_t *lock;
345365

366+
lock = ss_rstat_lock(css->ss);
346367
trace_cgroup_rstat_unlock(cgrp, cpu_in_loop, false);
347-
spin_unlock_irq(&cgroup_rstat_lock);
368+
spin_unlock_irq(lock);
348369
}
349370

350371
/**
@@ -466,12 +487,29 @@ void css_rstat_exit(struct cgroup_subsys_state *css)
466487
css->rstat_cpu = NULL;
467488
}
468489

469-
void __init cgroup_rstat_boot(void)
490+
/**
491+
* ss_rstat_init - subsystem-specific rstat initialization
492+
* @ss: target subsystem
493+
*
494+
* If @ss is NULL, the static locks associated with the base stats
495+
* are initialized. If @ss is non-NULL, the subsystem-specific locks
496+
* are initialized.
497+
*/
498+
int __init ss_rstat_init(struct cgroup_subsys *ss)
470499
{
471500
int cpu;
472501

502+
if (ss) {
503+
ss->rstat_ss_cpu_lock = alloc_percpu(raw_spinlock_t);
504+
if (!ss->rstat_ss_cpu_lock)
505+
return -ENOMEM;
506+
}
507+
508+
spin_lock_init(ss_rstat_lock(ss));
473509
for_each_possible_cpu(cpu)
474-
raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
510+
raw_spin_lock_init(ss_rstat_cpu_lock(ss, cpu));
511+
512+
return 0;
475513
}
476514

477515
/*

0 commit comments

Comments
 (0)