Skip to content

Commit 873eaca

Browse files
committed
workqueue: Factor out work to worker assignment and collision handling
The two work execution paths in worker_thread() and rescuer_thread() use move_linked_works() to claim work items from @pool->worklist. Once claimed, process_schedule_works() is called which invokes process_one_work() on each work item. process_one_work() then uses find_worker_executing_work() to detect and handle collisions - situations where the work item to be executed is still running on another worker. This works fine, but, to improve work execution locality, we want to establish work to worker association earlier and know for sure that the worker is going to excute the work once asssigned, which requires performing collision handling earlier while trying to assign the work item to the worker. This patch introduces assign_work() which assigns a work item to a worker using move_linked_works() and then performs collision handling. As collision handling is handled earlier, process_one_work() no longer needs to worry about them. After the this patch, collision checks for linked work items are skipped, which should be fine as they can't be queued multiple times concurrently. For work items running from rescuers, the timing of collision handling may change but the invariant that the work items go through collision handling before starting execution does not. This patch shouldn't cause noticeable behavior changes, especially given that worker_thread() behavior remains the same. Signed-off-by: Tejun Heo <[email protected]>
1 parent 63c5484 commit 873eaca

File tree

1 file changed

+52
-28
lines changed

1 file changed

+52
-28
lines changed

kernel/workqueue.c

Lines changed: 52 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1025,13 +1025,10 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
10251025
* @head: target list to append @work to
10261026
* @nextp: out parameter for nested worklist walking
10271027
*
1028-
* Schedule linked works starting from @work to @head. Work series to
1029-
* be scheduled starts at @work and includes any consecutive work with
1030-
* WORK_STRUCT_LINKED set in its predecessor.
1031-
*
1032-
* If @nextp is not NULL, it's updated to point to the next work of
1033-
* the last scheduled work. This allows move_linked_works() to be
1034-
* nested inside outer list_for_each_entry_safe().
1028+
* Schedule linked works starting from @work to @head. Work series to be
1029+
* scheduled starts at @work and includes any consecutive work with
1030+
* WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
1031+
* @nextp.
10351032
*
10361033
* CONTEXT:
10371034
* raw_spin_lock_irq(pool->lock).
@@ -1060,6 +1057,48 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
10601057
*nextp = n;
10611058
}
10621059

1060+
/**
1061+
* assign_work - assign a work item and its linked work items to a worker
1062+
* @work: work to assign
1063+
* @worker: worker to assign to
1064+
* @nextp: out parameter for nested worklist walking
1065+
*
1066+
* Assign @work and its linked work items to @worker. If @work is already being
1067+
* executed by another worker in the same pool, it'll be punted there.
1068+
*
1069+
* If @nextp is not NULL, it's updated to point to the next work of the last
1070+
* scheduled work. This allows assign_work() to be nested inside
1071+
* list_for_each_entry_safe().
1072+
*
1073+
* Returns %true if @work was successfully assigned to @worker. %false if @work
1074+
* was punted to another worker already executing it.
1075+
*/
1076+
static bool assign_work(struct work_struct *work, struct worker *worker,
1077+
struct work_struct **nextp)
1078+
{
1079+
struct worker_pool *pool = worker->pool;
1080+
struct worker *collision;
1081+
1082+
lockdep_assert_held(&pool->lock);
1083+
1084+
/*
1085+
* A single work shouldn't be executed concurrently by multiple workers.
1086+
* __queue_work() ensures that @work doesn't jump to a different pool
1087+
* while still running in the previous pool. Here, we should ensure that
1088+
* @work is not executed concurrently by multiple workers from the same
1089+
* pool. Check whether anyone is already processing the work. If so,
1090+
* defer the work to the currently executing one.
1091+
*/
1092+
collision = find_worker_executing_work(pool, work);
1093+
if (unlikely(collision)) {
1094+
move_linked_works(work, &collision->scheduled, nextp);
1095+
return false;
1096+
}
1097+
1098+
move_linked_works(work, &worker->scheduled, nextp);
1099+
return true;
1100+
}
1101+
10631102
/**
10641103
* wake_up_worker - wake up an idle worker
10651104
* @pool: worker pool to wake worker from
@@ -2462,7 +2501,6 @@ __acquires(&pool->lock)
24622501
struct pool_workqueue *pwq = get_work_pwq(work);
24632502
struct worker_pool *pool = worker->pool;
24642503
unsigned long work_data;
2465-
struct worker *collision;
24662504
#ifdef CONFIG_LOCKDEP
24672505
/*
24682506
* It is permissible to free the struct work_struct from
@@ -2479,18 +2517,6 @@ __acquires(&pool->lock)
24792517
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
24802518
raw_smp_processor_id() != pool->cpu);
24812519

2482-
/*
2483-
* A single work shouldn't be executed concurrently by
2484-
* multiple workers on a single cpu. Check whether anyone is
2485-
* already processing the work. If so, defer the work to the
2486-
* currently executing one.
2487-
*/
2488-
collision = find_worker_executing_work(pool, work);
2489-
if (unlikely(collision)) {
2490-
move_linked_works(work, &collision->scheduled, NULL);
2491-
return;
2492-
}
2493-
24942520
/* claim and dequeue */
24952521
debug_work_deactivate(work);
24962522
hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
@@ -2717,8 +2743,8 @@ static int worker_thread(void *__worker)
27172743
list_first_entry(&pool->worklist,
27182744
struct work_struct, entry);
27192745

2720-
move_linked_works(work, &worker->scheduled, NULL);
2721-
process_scheduled_works(worker);
2746+
if (assign_work(work, worker, NULL))
2747+
process_scheduled_works(worker);
27222748
} while (keep_working(pool));
27232749

27242750
worker_set_flags(worker, WORKER_PREP);
@@ -2762,7 +2788,6 @@ static int rescuer_thread(void *__rescuer)
27622788
{
27632789
struct worker *rescuer = __rescuer;
27642790
struct workqueue_struct *wq = rescuer->rescue_wq;
2765-
struct list_head *scheduled = &rescuer->scheduled;
27662791
bool should_stop;
27672792

27682793
set_user_nice(current, RESCUER_NICE_LEVEL);
@@ -2807,15 +2832,14 @@ static int rescuer_thread(void *__rescuer)
28072832
* Slurp in all works issued via this workqueue and
28082833
* process'em.
28092834
*/
2810-
WARN_ON_ONCE(!list_empty(scheduled));
2835+
WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
28112836
list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2812-
if (get_work_pwq(work) == pwq) {
2813-
move_linked_works(work, scheduled, &n);
2837+
if (get_work_pwq(work) == pwq &&
2838+
assign_work(work, rescuer, &n))
28142839
pwq->stats[PWQ_STAT_RESCUED]++;
2815-
}
28162840
}
28172841

2818-
if (!list_empty(scheduled)) {
2842+
if (!list_empty(&rescuer->scheduled)) {
28192843
process_scheduled_works(rescuer);
28202844

28212845
/*

0 commit comments

Comments
 (0)