Skip to content

Commit e744070

Browse files
committed
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: Constify function scope static struct sched_param usage sched: Fix strncmp operation sched: Move sched_autogroup_exit() to free_signal_struct() sched: Fix struct autogroup memory leak sched: Mark autogroup_init() __init sched: Consolidate the name of root_task_group and init_task_group
2 parents d004e4d + c9b5f50 commit e744070

File tree

8 files changed

+34
-36
lines changed

8 files changed

+34
-36
lines changed

include/linux/sched.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2511,7 +2511,7 @@ extern void normalize_rt_tasks(void);
25112511

25122512
#ifdef CONFIG_CGROUP_SCHED
25132513

2514-
extern struct task_group init_task_group;
2514+
extern struct task_group root_task_group;
25152515

25162516
extern struct task_group *sched_create_group(struct task_group *parent);
25172517
extern void sched_destroy_group(struct task_group *tg);

kernel/fork.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -169,15 +169,14 @@ EXPORT_SYMBOL(free_task);
169169
static inline void free_signal_struct(struct signal_struct *sig)
170170
{
171171
taskstats_tgid_free(sig);
172+
sched_autogroup_exit(sig);
172173
kmem_cache_free(signal_cachep, sig);
173174
}
174175

175176
static inline void put_signal_struct(struct signal_struct *sig)
176177
{
177-
if (atomic_dec_and_test(&sig->sigcnt)) {
178-
sched_autogroup_exit(sig);
178+
if (atomic_dec_and_test(&sig->sigcnt))
179179
free_signal_struct(sig);
180-
}
181180
}
182181

183182
void __put_task_struct(struct task_struct *tsk)
@@ -1318,7 +1317,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
13181317
}
13191318
bad_fork_cleanup_signal:
13201319
if (!(clone_flags & CLONE_THREAD))
1321-
put_signal_struct(p->signal);
1320+
free_signal_struct(p->signal);
13221321
bad_fork_cleanup_sighand:
13231322
__cleanup_sighand(p->sighand);
13241323
bad_fork_cleanup_fs:

kernel/irq/manage.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -577,7 +577,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
577577
*/
578578
static int irq_thread(void *data)
579579
{
580-
static struct sched_param param = {
580+
static const struct sched_param param = {
581581
.sched_priority = MAX_USER_RT_PRIO/2,
582582
};
583583
struct irqaction *action = data;

kernel/kthread.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
148148
wait_for_completion(&create.done);
149149

150150
if (!IS_ERR(create.result)) {
151-
static struct sched_param param = { .sched_priority = 0 };
151+
static const struct sched_param param = { .sched_priority = 0 };
152152
va_list args;
153153

154154
va_start(args, namefmt);

kernel/sched.c

Lines changed: 22 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -278,14 +278,12 @@ struct task_group {
278278
#endif
279279
};
280280

281-
#define root_task_group init_task_group
282-
283281
/* task_group_lock serializes the addition/removal of task groups */
284282
static DEFINE_SPINLOCK(task_group_lock);
285283

286284
#ifdef CONFIG_FAIR_GROUP_SCHED
287285

288-
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
286+
# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
289287

290288
/*
291289
* A weight of 0 or 1 can cause arithmetics problems.
@@ -298,13 +296,13 @@ static DEFINE_SPINLOCK(task_group_lock);
298296
#define MIN_SHARES 2
299297
#define MAX_SHARES (1UL << 18)
300298

301-
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
299+
static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
302300
#endif
303301

304302
/* Default task group.
305303
* Every task in system belong to this group at bootup.
306304
*/
307-
struct task_group init_task_group;
305+
struct task_group root_task_group;
308306

309307
#endif /* CONFIG_CGROUP_SCHED */
310308

@@ -743,7 +741,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
743741
buf[cnt] = 0;
744742
cmp = strstrip(buf);
745743

746-
if (strncmp(buf, "NO_", 3) == 0) {
744+
if (strncmp(cmp, "NO_", 3) == 0) {
747745
neg = 1;
748746
cmp += 3;
749747
}
@@ -7848,7 +7846,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
78487846
cfs_rq->tg = tg;
78497847

78507848
tg->se[cpu] = se;
7851-
/* se could be NULL for init_task_group */
7849+
/* se could be NULL for root_task_group */
78527850
if (!se)
78537851
return;
78547852

@@ -7908,18 +7906,18 @@ void __init sched_init(void)
79087906
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
79097907

79107908
#ifdef CONFIG_FAIR_GROUP_SCHED
7911-
init_task_group.se = (struct sched_entity **)ptr;
7909+
root_task_group.se = (struct sched_entity **)ptr;
79127910
ptr += nr_cpu_ids * sizeof(void **);
79137911

7914-
init_task_group.cfs_rq = (struct cfs_rq **)ptr;
7912+
root_task_group.cfs_rq = (struct cfs_rq **)ptr;
79157913
ptr += nr_cpu_ids * sizeof(void **);
79167914

79177915
#endif /* CONFIG_FAIR_GROUP_SCHED */
79187916
#ifdef CONFIG_RT_GROUP_SCHED
7919-
init_task_group.rt_se = (struct sched_rt_entity **)ptr;
7917+
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
79207918
ptr += nr_cpu_ids * sizeof(void **);
79217919

7922-
init_task_group.rt_rq = (struct rt_rq **)ptr;
7920+
root_task_group.rt_rq = (struct rt_rq **)ptr;
79237921
ptr += nr_cpu_ids * sizeof(void **);
79247922

79257923
#endif /* CONFIG_RT_GROUP_SCHED */
@@ -7939,13 +7937,13 @@ void __init sched_init(void)
79397937
global_rt_period(), global_rt_runtime());
79407938

79417939
#ifdef CONFIG_RT_GROUP_SCHED
7942-
init_rt_bandwidth(&init_task_group.rt_bandwidth,
7940+
init_rt_bandwidth(&root_task_group.rt_bandwidth,
79437941
global_rt_period(), global_rt_runtime());
79447942
#endif /* CONFIG_RT_GROUP_SCHED */
79457943

79467944
#ifdef CONFIG_CGROUP_SCHED
7947-
list_add(&init_task_group.list, &task_groups);
7948-
INIT_LIST_HEAD(&init_task_group.children);
7945+
list_add(&root_task_group.list, &task_groups);
7946+
INIT_LIST_HEAD(&root_task_group.children);
79497947
autogroup_init(&init_task);
79507948
#endif /* CONFIG_CGROUP_SCHED */
79517949

@@ -7960,34 +7958,34 @@ void __init sched_init(void)
79607958
init_cfs_rq(&rq->cfs, rq);
79617959
init_rt_rq(&rq->rt, rq);
79627960
#ifdef CONFIG_FAIR_GROUP_SCHED
7963-
init_task_group.shares = init_task_group_load;
7961+
root_task_group.shares = root_task_group_load;
79647962
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
79657963
/*
7966-
* How much cpu bandwidth does init_task_group get?
7964+
* How much cpu bandwidth does root_task_group get?
79677965
*
79687966
* In case of task-groups formed thr' the cgroup filesystem, it
79697967
* gets 100% of the cpu resources in the system. This overall
79707968
* system cpu resource is divided among the tasks of
7971-
* init_task_group and its child task-groups in a fair manner,
7969+
* root_task_group and its child task-groups in a fair manner,
79727970
* based on each entity's (task or task-group's) weight
79737971
* (se->load.weight).
79747972
*
7975-
* In other words, if init_task_group has 10 tasks of weight
7973+
* In other words, if root_task_group has 10 tasks of weight
79767974
* 1024) and two child groups A0 and A1 (of weight 1024 each),
79777975
* then A0's share of the cpu resource is:
79787976
*
79797977
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
79807978
*
7981-
* We achieve this by letting init_task_group's tasks sit
7982-
* directly in rq->cfs (i.e init_task_group->se[] = NULL).
7979+
* We achieve this by letting root_task_group's tasks sit
7980+
* directly in rq->cfs (i.e root_task_group->se[] = NULL).
79837981
*/
7984-
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL);
7982+
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
79857983
#endif /* CONFIG_FAIR_GROUP_SCHED */
79867984

79877985
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
79887986
#ifdef CONFIG_RT_GROUP_SCHED
79897987
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
7990-
init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL);
7988+
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
79917989
#endif
79927990

79937991
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
@@ -8379,6 +8377,7 @@ static void free_sched_group(struct task_group *tg)
83798377
{
83808378
free_fair_sched_group(tg);
83818379
free_rt_sched_group(tg);
8380+
autogroup_free(tg);
83828381
kfree(tg);
83838382
}
83848383

@@ -8812,7 +8811,7 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
88128811

88138812
if (!cgrp->parent) {
88148813
/* This is early initialization for the top cgroup */
8815-
return &init_task_group.css;
8814+
return &root_task_group.css;
88168815
}
88178816

88188817
parent = cgroup_tg(cgrp->parent);

kernel/sched_autogroup.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,10 @@ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
99
static struct autogroup autogroup_default;
1010
static atomic_t autogroup_seq_nr;
1111

12-
static void autogroup_init(struct task_struct *init_task)
12+
static void __init autogroup_init(struct task_struct *init_task)
1313
{
14-
autogroup_default.tg = &init_task_group;
15-
init_task_group.autogroup = &autogroup_default;
14+
autogroup_default.tg = &root_task_group;
15+
root_task_group.autogroup = &autogroup_default;
1616
kref_init(&autogroup_default.kref);
1717
init_rwsem(&autogroup_default.lock);
1818
init_task->signal->autogroup = &autogroup_default;
@@ -63,7 +63,7 @@ static inline struct autogroup *autogroup_create(void)
6363
if (!ag)
6464
goto out_fail;
6565

66-
tg = sched_create_group(&init_task_group);
66+
tg = sched_create_group(&root_task_group);
6767

6868
if (IS_ERR(tg))
6969
goto out_free;

kernel/softirq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -853,7 +853,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
853853
cpumask_any(cpu_online_mask));
854854
case CPU_DEAD:
855855
case CPU_DEAD_FROZEN: {
856-
static struct sched_param param = {
856+
static const struct sched_param param = {
857857
.sched_priority = MAX_RT_PRIO-1
858858
};
859859

kernel/trace/trace_selftest.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -558,7 +558,7 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
558558
static int trace_wakeup_test_thread(void *data)
559559
{
560560
/* Make this a RT thread, doesn't need to be too high */
561-
static struct sched_param param = { .sched_priority = 5 };
561+
static const struct sched_param param = { .sched_priority = 5 };
562562
struct completion *x = data;
563563

564564
sched_setscheduler(current, SCHED_FIFO, &param);

0 commit comments

Comments
 (0)