Skip to content

Commit fe089f8

Browse files
committed
workqueue: Not all work insertion needs to wake up a worker
insert_work() always tried to wake up a worker; however, the only time it needs to try to wake up a worker is when a new active work item is queued. When a work item goes on the inactive list or queueing a flush work item, there's no reason to try to wake up a worker. This patch moves the worker wakeup logic out of insert_work() and places it in the active new work item queueing path in __queue_work(). While at it: * __queue_work() is dereferencing pwq->pool repeatedly. Add local variable pool. * Every caller of insert_work() calls debug_work_activate(). Consolidate the invocations into insert_work(). * In __queue_work() pool->watchdog_ts update is relocated slightly. This is to better accommodate future changes. This makes wakeups more precise and will help the planned change to assign work items to workers before waking them up. No behavior changes intended. v2: WARN_ON_ONCE(pool != last_pool) added in __queue_work() to clarify as suggested by Lai. Signed-off-by: Tejun Heo <[email protected]> Cc: Lai Jiangshan <[email protected]>
1 parent c0ab017 commit fe089f8

File tree

1 file changed

+19
-19
lines changed

1 file changed

+19
-19
lines changed

kernel/workqueue.c

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1542,7 +1542,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
15421542
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
15431543
struct list_head *head, unsigned int extra_flags)
15441544
{
1545-
struct worker_pool *pool = pwq->pool;
1545+
debug_work_activate(work);
15461546

15471547
/* record the work call stack in order to print it in KASAN reports */
15481548
kasan_record_aux_stack_noalloc(work);
@@ -1551,9 +1551,6 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
15511551
set_work_pwq(work, pwq, extra_flags);
15521552
list_add_tail(&work->entry, head);
15531553
get_pwq(pwq);
1554-
1555-
if (__need_more_worker(pool))
1556-
wake_up_worker(pool);
15571554
}
15581555

15591556
/*
@@ -1607,8 +1604,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
16071604
struct work_struct *work)
16081605
{
16091606
struct pool_workqueue *pwq;
1610-
struct worker_pool *last_pool;
1611-
struct list_head *worklist;
1607+
struct worker_pool *last_pool, *pool;
16121608
unsigned int work_flags;
16131609
unsigned int req_cpu = cpu;
16141610

@@ -1642,13 +1638,15 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
16421638
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
16431639
}
16441640

1641+
pool = pwq->pool;
1642+
16451643
/*
16461644
* If @work was previously on a different pool, it might still be
16471645
* running there, in which case the work needs to be queued on that
16481646
* pool to guarantee non-reentrancy.
16491647
*/
16501648
last_pool = get_work_pool(work);
1651-
if (last_pool && last_pool != pwq->pool) {
1649+
if (last_pool && last_pool != pool) {
16521650
struct worker *worker;
16531651

16541652
raw_spin_lock(&last_pool->lock);
@@ -1657,13 +1655,15 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
16571655

16581656
if (worker && worker->current_pwq->wq == wq) {
16591657
pwq = worker->current_pwq;
1658+
pool = pwq->pool;
1659+
WARN_ON_ONCE(pool != last_pool);
16601660
} else {
16611661
/* meh... not running there, queue here */
16621662
raw_spin_unlock(&last_pool->lock);
1663-
raw_spin_lock(&pwq->pool->lock);
1663+
raw_spin_lock(&pool->lock);
16641664
}
16651665
} else {
1666-
raw_spin_lock(&pwq->pool->lock);
1666+
raw_spin_lock(&pool->lock);
16671667
}
16681668

16691669
/*
@@ -1676,7 +1676,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
16761676
*/
16771677
if (unlikely(!pwq->refcnt)) {
16781678
if (wq->flags & WQ_UNBOUND) {
1679-
raw_spin_unlock(&pwq->pool->lock);
1679+
raw_spin_unlock(&pool->lock);
16801680
cpu_relax();
16811681
goto retry;
16821682
}
@@ -1695,21 +1695,22 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
16951695
work_flags = work_color_to_flags(pwq->work_color);
16961696

16971697
if (likely(pwq->nr_active < pwq->max_active)) {
1698+
if (list_empty(&pool->worklist))
1699+
pool->watchdog_ts = jiffies;
1700+
16981701
trace_workqueue_activate_work(work);
16991702
pwq->nr_active++;
1700-
worklist = &pwq->pool->worklist;
1701-
if (list_empty(worklist))
1702-
pwq->pool->watchdog_ts = jiffies;
1703+
insert_work(pwq, work, &pool->worklist, work_flags);
1704+
1705+
if (__need_more_worker(pool))
1706+
wake_up_worker(pool);
17031707
} else {
17041708
work_flags |= WORK_STRUCT_INACTIVE;
1705-
worklist = &pwq->inactive_works;
1709+
insert_work(pwq, work, &pwq->inactive_works, work_flags);
17061710
}
17071711

1708-
debug_work_activate(work);
1709-
insert_work(pwq, work, worklist, work_flags);
1710-
17111712
out:
1712-
raw_spin_unlock(&pwq->pool->lock);
1713+
raw_spin_unlock(&pool->lock);
17131714
rcu_read_unlock();
17141715
}
17151716

@@ -3012,7 +3013,6 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
30123013
pwq->nr_in_flight[work_color]++;
30133014
work_flags |= work_color_to_flags(work_color);
30143015

3015-
debug_work_activate(&barr->work);
30163016
insert_work(pwq, &barr->work, head, work_flags);
30173017
}
30183018

0 commit comments

Comments
 (0)