Skip to content

Commit 0219a35

Browse files
committed
workqueue: Factor out need_more_worker() check and worker wake-up
Checking need_more_worker() and calling wake_up_worker() is a repeated pattern. Let's add kick_pool(), which checks need_more_worker() and open-code wake_up_worker(), and replace wake_up_worker() uses. The following conversions aren't one-to-one: * __queue_work() was using __need_more_work() because it knows that pool->worklist isn't empty. Switching to kick_pool() adds an extra list_empty() test. * create_worker() always needs to wake up the newly minted worker whether there's more work to do or not to avoid triggering hung task check on the new task. Keep the current wake_up_process() and still add kick_pool(). This may lead to an extra wakeup which isn't harmful. * pwq_adjust_max_active() was explicitly checking whether it needs to wake up a worker or not to avoid spurious wakeups. As kick_pool() only wakes up a worker when necessary, this explicit check is no longer necessary and dropped. * unbind_workers() now calls kick_pool() instead of wake_up_worker() adding a need_more_worker() test. This avoids spurious wakeups and shouldn't break anything. wake_up_worker() is dropped as kick_pool() replaces all its users. After this patch, all paths that wakes up a non-rescuer worker to initiate work item execution use kick_pool(). This will enable future changes to improve locality. Signed-off-by: Tejun Heo <[email protected]>
1 parent 873eaca commit 0219a35

File tree

1 file changed

+37
-50
lines changed

1 file changed

+37
-50
lines changed

kernel/workqueue.c

Lines changed: 37 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -815,11 +815,6 @@ static bool work_is_canceling(struct work_struct *work)
815815
* they're being called with pool->lock held.
816816
*/
817817

818-
static bool __need_more_worker(struct worker_pool *pool)
819-
{
820-
return !pool->nr_running;
821-
}
822-
823818
/*
824819
* Need to wake up a worker? Called from anything but currently
825820
* running workers.
@@ -830,7 +825,7 @@ static bool __need_more_worker(struct worker_pool *pool)
830825
*/
831826
static bool need_more_worker(struct worker_pool *pool)
832827
{
833-
return !list_empty(&pool->worklist) && __need_more_worker(pool);
828+
return !list_empty(&pool->worklist) && !pool->nr_running;
834829
}
835830

836831
/* Can I start working? Called from busy but !running workers. */
@@ -1100,20 +1095,23 @@ static bool assign_work(struct work_struct *work, struct worker *worker,
11001095
}
11011096

11021097
/**
1103-
* wake_up_worker - wake up an idle worker
1104-
* @pool: worker pool to wake worker from
1105-
*
1106-
* Wake up the first idle worker of @pool.
1098+
* kick_pool - wake up an idle worker if necessary
1099+
* @pool: pool to kick
11071100
*
1108-
* CONTEXT:
1109-
* raw_spin_lock_irq(pool->lock).
1101+
* @pool may have pending work items. Wake up worker if necessary. Returns
1102+
* whether a worker was woken up.
11101103
*/
1111-
static void wake_up_worker(struct worker_pool *pool)
1104+
static bool kick_pool(struct worker_pool *pool)
11121105
{
11131106
struct worker *worker = first_idle_worker(pool);
11141107

1115-
if (likely(worker))
1116-
wake_up_process(worker->task);
1108+
lockdep_assert_held(&pool->lock);
1109+
1110+
if (!need_more_worker(pool) || !worker)
1111+
return false;
1112+
1113+
wake_up_process(worker->task);
1114+
return true;
11171115
}
11181116

11191117
#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
@@ -1281,10 +1279,9 @@ void wq_worker_sleeping(struct task_struct *task)
12811279
}
12821280

12831281
pool->nr_running--;
1284-
if (need_more_worker(pool)) {
1282+
if (kick_pool(pool))
12851283
worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1286-
wake_up_worker(pool);
1287-
}
1284+
12881285
raw_spin_unlock_irq(&pool->lock);
12891286
}
12901287

@@ -1332,10 +1329,8 @@ void wq_worker_tick(struct task_struct *task)
13321329
wq_cpu_intensive_report(worker->current_func);
13331330
pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
13341331

1335-
if (need_more_worker(pool)) {
1332+
if (kick_pool(pool))
13361333
pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1337-
wake_up_worker(pool);
1338-
}
13391334

13401335
raw_spin_unlock(&pool->lock);
13411336
}
@@ -1773,9 +1768,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
17731768
trace_workqueue_activate_work(work);
17741769
pwq->nr_active++;
17751770
insert_work(pwq, work, &pool->worklist, work_flags);
1776-
1777-
if (__need_more_worker(pool))
1778-
wake_up_worker(pool);
1771+
kick_pool(pool);
17791772
} else {
17801773
work_flags |= WORK_STRUCT_INACTIVE;
17811774
insert_work(pwq, work, &pwq->inactive_works, work_flags);
@@ -2181,9 +2174,18 @@ static struct worker *create_worker(struct worker_pool *pool)
21812174

21822175
/* start the newly created worker */
21832176
raw_spin_lock_irq(&pool->lock);
2177+
21842178
worker->pool->nr_workers++;
21852179
worker_enter_idle(worker);
2180+
kick_pool(pool);
2181+
2182+
/*
2183+
* @worker is waiting on a completion in kthread() and will trigger hung
2184+
* check if not woken up soon. As kick_pool() might not have waken it
2185+
* up, wake it up explicitly once more.
2186+
*/
21862187
wake_up_process(worker->task);
2188+
21872189
raw_spin_unlock_irq(&pool->lock);
21882190

21892191
return worker;
@@ -2545,14 +2547,12 @@ __acquires(&pool->lock)
25452547
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
25462548

25472549
/*
2548-
* Wake up another worker if necessary. The condition is always
2549-
* false for normal per-cpu workers since nr_running would always
2550-
* be >= 1 at this point. This is used to chain execution of the
2551-
* pending work items for WORKER_NOT_RUNNING workers such as the
2552-
* UNBOUND and CPU_INTENSIVE ones.
2550+
* Kick @pool if necessary. It's always noop for per-cpu worker pools
2551+
* since nr_running would always be >= 1 at this point. This is used to
2552+
* chain execution of the pending work items for WORKER_NOT_RUNNING
2553+
* workers such as the UNBOUND and CPU_INTENSIVE ones.
25532554
*/
2554-
if (need_more_worker(pool))
2555-
wake_up_worker(pool);
2555+
kick_pool(pool);
25562556

25572557
/*
25582558
* Record the last pool and clear PENDING which should be the last
@@ -2872,12 +2872,10 @@ static int rescuer_thread(void *__rescuer)
28722872
put_pwq(pwq);
28732873

28742874
/*
2875-
* Leave this pool. If need_more_worker() is %true, notify a
2876-
* regular worker; otherwise, we end up with 0 concurrency
2877-
* and stalling the execution.
2875+
* Leave this pool. Notify regular workers; otherwise, we end up
2876+
* with 0 concurrency and stalling the execution.
28782877
*/
2879-
if (need_more_worker(pool))
2880-
wake_up_worker(pool);
2878+
kick_pool(pool);
28812879

28822880
raw_spin_unlock_irq(&pool->lock);
28832881

@@ -4111,24 +4109,13 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
41114109
* is updated and visible.
41124110
*/
41134111
if (!freezable || !workqueue_freezing) {
4114-
bool kick = false;
4115-
41164112
pwq->max_active = wq->saved_max_active;
41174113

41184114
while (!list_empty(&pwq->inactive_works) &&
4119-
pwq->nr_active < pwq->max_active) {
4115+
pwq->nr_active < pwq->max_active)
41204116
pwq_activate_first_inactive(pwq);
4121-
kick = true;
4122-
}
41234117

4124-
/*
4125-
* Need to kick a worker after thawed or an unbound wq's
4126-
* max_active is bumped. In realtime scenarios, always kicking a
4127-
* worker will cause interference on the isolated cpu cores, so
4128-
* let's kick iff work items were activated.
4129-
*/
4130-
if (kick)
4131-
wake_up_worker(pwq->pool);
4118+
kick_pool(pwq->pool);
41324119
} else {
41334120
pwq->max_active = 0;
41344121
}
@@ -5389,7 +5376,7 @@ static void unbind_workers(int cpu)
53895376
* worker blocking could lead to lengthy stalls. Kick off
53905377
* unbound chain execution of currently pending work items.
53915378
*/
5392-
wake_up_worker(pool);
5379+
kick_pool(pool);
53935380

53945381
raw_spin_unlock_irq(&pool->lock);
53955382

0 commit comments

Comments
 (0)