@@ -2497,15 +2497,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
2497
2497
INIT_WORK_ONSTACK (& barr -> work , wq_barrier_func );
2498
2498
__set_bit (WORK_STRUCT_PENDING_BIT , work_data_bits (& barr -> work ));
2499
2499
2500
- /*
2501
- * Explicitly init the crosslock for wq_barrier::done, make its lock
2502
- * key a subkey of the corresponding work. As a result we won't
2503
- * build a dependency between wq_barrier::done and unrelated work.
2504
- */
2505
- lockdep_init_map_crosslock ((struct lockdep_map * )& barr -> done .map ,
2506
- "(complete)wq_barr::done" ,
2507
- target -> lockdep_map .key , 1 );
2508
- __init_completion (& barr -> done );
2500
+ init_completion_map (& barr -> done , & target -> lockdep_map );
2501
+
2509
2502
barr -> task = current ;
2510
2503
2511
2504
/*
@@ -2611,16 +2604,13 @@ void flush_workqueue(struct workqueue_struct *wq)
2611
2604
struct wq_flusher this_flusher = {
2612
2605
.list = LIST_HEAD_INIT (this_flusher .list ),
2613
2606
.flush_color = -1 ,
2614
- .done = COMPLETION_INITIALIZER_ONSTACK (this_flusher .done ),
2607
+ .done = COMPLETION_INITIALIZER_ONSTACK_MAP (this_flusher .done , wq -> lockdep_map ),
2615
2608
};
2616
2609
int next_color ;
2617
2610
2618
2611
if (WARN_ON (!wq_online ))
2619
2612
return ;
2620
2613
2621
- lock_map_acquire (& wq -> lockdep_map );
2622
- lock_map_release (& wq -> lockdep_map );
2623
-
2624
2614
mutex_lock (& wq -> mutex );
2625
2615
2626
2616
/*
@@ -2883,9 +2873,6 @@ bool flush_work(struct work_struct *work)
2883
2873
if (WARN_ON (!wq_online ))
2884
2874
return false;
2885
2875
2886
- lock_map_acquire (& work -> lockdep_map );
2887
- lock_map_release (& work -> lockdep_map );
2888
-
2889
2876
if (start_flush_work (work , & barr )) {
2890
2877
wait_for_completion (& barr .done );
2891
2878
destroy_work_on_stack (& barr .work );
0 commit comments