Skip to content

Commit def5b09

Browse files
committed
Merge tag 'cgroup-for-6.16-rc1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup fix from Tejun Heo: "The rstat per-subsystem split change skipped per-cpu allocation on UP configs; however even on UP, depending on config options, the size of the percpu struct may not be zero leading to crashes. Fix it by conditionalizing the per-cpu area allocation and usage on the size of the per-cpu struct" * tag 'cgroup-for-6.16-rc1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: cgroup: adjust criteria for rstat subsystem cpu lock access
2 parents 29e9359 + c853d18 commit def5b09

File tree

1 file changed

+16
-9
lines changed

1 file changed

+16
-9
lines changed

kernel/cgroup/rstat.c

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,20 @@ static spinlock_t *ss_rstat_lock(struct cgroup_subsys *ss)
4747

4848
static raw_spinlock_t *ss_rstat_cpu_lock(struct cgroup_subsys *ss, int cpu)
4949
{
50-
if (ss)
50+
if (ss) {
51+
/*
52+
* Depending on config, the subsystem per-cpu lock type may be an
53+
* empty struct. In enviromnents where this is the case, allocation
54+
* of this field is not performed in ss_rstat_init(). Avoid a
55+
* cpu-based offset relative to NULL by returning early. When the
56+
* lock type is zero in size, the corresponding lock functions are
57+
* no-ops so passing them NULL is acceptable.
58+
*/
59+
if (sizeof(*ss->rstat_ss_cpu_lock) == 0)
60+
return NULL;
61+
5162
return per_cpu_ptr(ss->rstat_ss_cpu_lock, cpu);
63+
}
5264

5365
return per_cpu_ptr(&rstat_base_cpu_lock, cpu);
5466
}
@@ -510,20 +522,15 @@ int __init ss_rstat_init(struct cgroup_subsys *ss)
510522
{
511523
int cpu;
512524

513-
#ifdef CONFIG_SMP
514525
/*
515-
* On uniprocessor machines, arch_spinlock_t is defined as an empty
516-
* struct. Avoid allocating a size of zero by having this block
517-
* excluded in this case. It's acceptable to leave the subsystem locks
518-
* unitialized since the associated lock functions are no-ops in the
519-
* non-smp case.
526+
* Depending on config, the subsystem per-cpu lock type may be an empty
527+
* struct. Avoid allocating a size of zero in this case.
520528
*/
521-
if (ss) {
529+
if (ss && sizeof(*ss->rstat_ss_cpu_lock)) {
522530
ss->rstat_ss_cpu_lock = alloc_percpu(raw_spinlock_t);
523531
if (!ss->rstat_ss_cpu_lock)
524532
return -ENOMEM;
525533
}
526-
#endif
527534

528535
spin_lock_init(ss_rstat_lock(ss));
529536
for_each_possible_cpu(cpu)

0 commit comments

Comments
 (0)