Skip to content

Commit d6e8978

Browse files
jmberg-intelhtejun
authored andcommitted
workqueue: skip lockdep wq dependency in cancel_work_sync()
In cancel_work_sync(), we can only have one of two cases, even with an ordered workqueue: * the work isn't running, just cancelled before it started * the work is running, but then nothing else can be on the workqueue before it Thus, we need to skip the lockdep workqueue dependency handling, otherwise we get false positive reports from lockdep saying that we have a potential deadlock when the workqueue also has other work items with locking, e.g. work1_function() { mutex_lock(&mutex); ... } work2_function() { /* nothing */ } other_function() { queue_work(ordered_wq, &work1); queue_work(ordered_wq, &work2); mutex_lock(&mutex); cancel_work_sync(&work2); } As described above, this isn't a problem, but lockdep will currently flag it as if cancel_work_sync() was flush_work(), which *is* a problem. Signed-off-by: Johannes Berg <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent 66448bc commit d6e8978

File tree

1 file changed

+22
-15
lines changed

1 file changed

+22
-15
lines changed

kernel/workqueue.c

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2843,7 +2843,8 @@ void drain_workqueue(struct workqueue_struct *wq)
28432843
}
28442844
EXPORT_SYMBOL_GPL(drain_workqueue);
28452845

2846-
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2846+
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2847+
bool from_cancel)
28472848
{
28482849
struct worker *worker = NULL;
28492850
struct worker_pool *pool;
@@ -2885,7 +2886,8 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
28852886
* workqueues the deadlock happens when the rescuer stalls, blocking
28862887
* forward progress.
28872888
*/
2888-
if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) {
2889+
if (!from_cancel &&
2890+
(pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
28892891
lock_map_acquire(&pwq->wq->lockdep_map);
28902892
lock_map_release(&pwq->wq->lockdep_map);
28912893
}
@@ -2896,6 +2898,22 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
28962898
return false;
28972899
}
28982900

2901+
static bool __flush_work(struct work_struct *work, bool from_cancel)
2902+
{
2903+
struct wq_barrier barr;
2904+
2905+
if (WARN_ON(!wq_online))
2906+
return false;
2907+
2908+
if (start_flush_work(work, &barr, from_cancel)) {
2909+
wait_for_completion(&barr.done);
2910+
destroy_work_on_stack(&barr.work);
2911+
return true;
2912+
} else {
2913+
return false;
2914+
}
2915+
}
2916+
28992917
/**
29002918
* flush_work - wait for a work to finish executing the last queueing instance
29012919
* @work: the work to flush
@@ -2909,18 +2927,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
29092927
*/
29102928
bool flush_work(struct work_struct *work)
29112929
{
2912-
struct wq_barrier barr;
2913-
2914-
if (WARN_ON(!wq_online))
2915-
return false;
2916-
2917-
if (start_flush_work(work, &barr)) {
2918-
wait_for_completion(&barr.done);
2919-
destroy_work_on_stack(&barr.work);
2920-
return true;
2921-
} else {
2922-
return false;
2923-
}
2930+
return __flush_work(work, false);
29242931
}
29252932
EXPORT_SYMBOL_GPL(flush_work);
29262933

@@ -2986,7 +2993,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
29862993
* isn't executing.
29872994
*/
29882995
if (wq_online)
2989-
flush_work(work);
2996+
__flush_work(work, true);
29902997

29912998
clear_work_data(work);
29922999

0 commit comments

Comments
 (0)