Skip to content

Commit 031299b

Browse files
vireshkrafaeljw
authored andcommitted
cpufreq: governors: Avoid unnecessary per cpu timer interrupts
Following patch has introduced per cpu timers or works for ondemand and conservative governors. commit 2abfa87 Author: Rickard Andersson <[email protected]> Date: Thu Dec 27 14:55:38 2012 +0000 cpufreq: handle SW coordinated CPUs This causes additional unnecessary interrupts on all cpus when the load is recently evaluated by any other cpu. i.e. When load is recently evaluated by cpu x, we don't really need any other cpu to evaluate this load again for the next sampling_rate time. Some sort of code is present to avoid that but we are still getting timer interrupts for all cpus. A good way of avoiding this would be to modify delays for all cpus (policy->cpus) whenever any cpu has evaluated load. This patch does this change and some related code cleanup. Signed-off-by: Viresh Kumar <[email protected]> Signed-off-by: Rafael J. Wysocki <[email protected]>
1 parent 9d44592 commit 031299b

File tree

4 files changed

+42
-19
lines changed

4 files changed

+42
-19
lines changed

drivers/cpufreq/cpufreq_conservative.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,6 @@ static void cs_check_cpu(int cpu, unsigned int load)
107107

108108
static void cs_dbs_timer(struct work_struct *work)
109109
{
110-
struct delayed_work *dw = to_delayed_work(work);
111110
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
112111
struct cs_cpu_dbs_info_s, cdbs.work.work);
113112
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
@@ -116,12 +115,15 @@ static void cs_dbs_timer(struct work_struct *work)
116115
struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
117116
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
118117
int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
118+
bool modify_all = true;
119119

120120
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
121-
if (need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
121+
if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
122+
modify_all = false;
123+
else
122124
dbs_check_cpu(dbs_data, cpu);
123125

124-
schedule_delayed_work_on(smp_processor_id(), dw, delay);
126+
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
125127
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
126128
}
127129

drivers/cpufreq/cpufreq_governor.c

Lines changed: 28 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -178,20 +178,38 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
178178
}
179179
EXPORT_SYMBOL_GPL(dbs_check_cpu);
180180

181-
static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu,
182-
unsigned int sampling_rate)
181+
static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
182+
unsigned int delay)
183183
{
184-
int delay = delay_for_sampling_rate(sampling_rate);
185184
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
186185

187-
schedule_delayed_work_on(cpu, &cdbs->work, delay);
186+
mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
188187
}
189188

190-
static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
189+
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
190+
unsigned int delay, bool all_cpus)
191191
{
192-
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
192+
int i;
193+
194+
if (!all_cpus) {
195+
__gov_queue_work(smp_processor_id(), dbs_data, delay);
196+
} else {
197+
for_each_cpu(i, policy->cpus)
198+
__gov_queue_work(i, dbs_data, delay);
199+
}
200+
}
201+
EXPORT_SYMBOL_GPL(gov_queue_work);
202+
203+
static inline void gov_cancel_work(struct dbs_data *dbs_data,
204+
struct cpufreq_policy *policy)
205+
{
206+
struct cpu_dbs_common_info *cdbs;
207+
int i;
193208

194-
cancel_delayed_work_sync(&cdbs->work);
209+
for_each_cpu(i, policy->cpus) {
210+
cdbs = dbs_data->cdata->get_cpu_cdbs(i);
211+
cancel_delayed_work_sync(&cdbs->work);
212+
}
195213
}
196214

197215
/* Will return if we need to evaluate cpu load again or not */
@@ -380,16 +398,15 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
380398
/* Initiate timer time stamp */
381399
cpu_cdbs->time_stamp = ktime_get();
382400

383-
for_each_cpu(j, policy->cpus)
384-
dbs_timer_init(dbs_data, j, sampling_rate);
401+
gov_queue_work(dbs_data, policy,
402+
delay_for_sampling_rate(sampling_rate), true);
385403
break;
386404

387405
case CPUFREQ_GOV_STOP:
388406
if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
389407
cs_dbs_info->enable = 0;
390408

391-
for_each_cpu(j, policy->cpus)
392-
dbs_timer_exit(dbs_data, j);
409+
gov_cancel_work(dbs_data, policy);
393410

394411
mutex_lock(&dbs_data->mutex);
395412
mutex_destroy(&cpu_cdbs->timer_mutex);

drivers/cpufreq/cpufreq_governor.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -262,4 +262,6 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
262262
unsigned int sampling_rate);
263263
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
264264
struct common_dbs_data *cdata, unsigned int event);
265+
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
266+
unsigned int delay, bool all_cpus);
265267
#endif /* _CPUFREQ_GOVERNER_H */

drivers/cpufreq/cpufreq_ondemand.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,6 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
216216

217217
static void od_dbs_timer(struct work_struct *work)
218218
{
219-
struct delayed_work *dw = to_delayed_work(work);
220219
struct od_cpu_dbs_info_s *dbs_info =
221220
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
222221
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
@@ -225,10 +224,13 @@ static void od_dbs_timer(struct work_struct *work)
225224
struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
226225
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
227226
int delay = 0, sample_type = core_dbs_info->sample_type;
227+
bool modify_all = true;
228228

229229
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
230-
if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate))
230+
if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
231+
modify_all = false;
231232
goto max_delay;
233+
}
232234

233235
/* Common NORMAL_SAMPLE setup */
234236
core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
@@ -250,7 +252,7 @@ static void od_dbs_timer(struct work_struct *work)
250252
delay = delay_for_sampling_rate(od_tuners->sampling_rate
251253
* core_dbs_info->rate_mult);
252254

253-
schedule_delayed_work_on(smp_processor_id(), dw, delay);
255+
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
254256
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
255257
}
256258

@@ -310,8 +312,8 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
310312
cancel_delayed_work_sync(&dbs_info->cdbs.work);
311313
mutex_lock(&dbs_info->cdbs.timer_mutex);
312314

313-
schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
314-
usecs_to_jiffies(new_rate));
315+
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
316+
usecs_to_jiffies(new_rate), true);
315317

316318
}
317319
mutex_unlock(&dbs_info->cdbs.timer_mutex);

0 commit comments

Comments
 (0)