@@ -258,11 +258,11 @@ struct pool_workqueue {
258
258
259
259
/*
260
260
* Release of unbound pwq is punted to a kthread_worker. See put_pwq()
261
- * and pwq_unbound_release_workfn () for details. pool_workqueue itself
262
- * is also RCU protected so that the first pwq can be determined without
261
+ * and pwq_release_workfn () for details. pool_workqueue itself is also
262
+ * RCU protected so that the first pwq can be determined without
263
263
* grabbing wq->mutex.
264
264
*/
265
- struct kthread_work unbound_release_work ;
265
+ struct kthread_work release_work ;
266
266
struct rcu_head rcu ;
267
267
} __aligned (1 << WORK_STRUCT_FLAG_BITS );
268
268
@@ -321,7 +321,7 @@ struct workqueue_struct {
321
321
322
322
/* hot fields used during command issue, aligned to cacheline */
323
323
unsigned int flags ____cacheline_aligned ; /* WQ: WQ_* flags */
324
- struct pool_workqueue __percpu * cpu_pwq ; /* I: per-cpu pwqs */
324
+ struct pool_workqueue __percpu * * cpu_pwq ; /* I: per-cpu pwqs */
325
325
struct pool_workqueue __rcu * numa_pwq_tbl []; /* PWR: unbound pwqs indexed by node */
326
326
};
327
327
@@ -1370,13 +1370,11 @@ static void put_pwq(struct pool_workqueue *pwq)
1370
1370
lockdep_assert_held (& pwq -> pool -> lock );
1371
1371
if (likely (-- pwq -> refcnt ))
1372
1372
return ;
1373
- if (WARN_ON_ONCE (!(pwq -> wq -> flags & WQ_UNBOUND )))
1374
- return ;
1375
1373
/*
1376
1374
* @pwq can't be released under pool->lock, bounce to a dedicated
1377
1375
* kthread_worker to avoid A-A deadlocks.
1378
1376
*/
1379
- kthread_queue_work (pwq_release_worker , & pwq -> unbound_release_work );
1377
+ kthread_queue_work (pwq_release_worker , & pwq -> release_work );
1380
1378
}
1381
1379
1382
1380
/**
@@ -1685,7 +1683,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1685
1683
} else {
1686
1684
if (req_cpu == WORK_CPU_UNBOUND )
1687
1685
cpu = raw_smp_processor_id ();
1688
- pwq = per_cpu_ptr (wq -> cpu_pwq , cpu );
1686
+ pwq = * per_cpu_ptr (wq -> cpu_pwq , cpu );
1689
1687
}
1690
1688
1691
1689
pool = pwq -> pool ;
@@ -4004,31 +4002,30 @@ static void rcu_free_pwq(struct rcu_head *rcu)
4004
4002
* Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
4005
4003
* refcnt and needs to be destroyed.
4006
4004
*/
4007
- static void pwq_unbound_release_workfn (struct kthread_work * work )
4005
+ static void pwq_release_workfn (struct kthread_work * work )
4008
4006
{
4009
4007
struct pool_workqueue * pwq = container_of (work , struct pool_workqueue ,
4010
- unbound_release_work );
4008
+ release_work );
4011
4009
struct workqueue_struct * wq = pwq -> wq ;
4012
4010
struct worker_pool * pool = pwq -> pool ;
4013
4011
bool is_last = false;
4014
4012
4015
4013
/*
4016
- * when @pwq is not linked, it doesn't hold any reference to the
4014
+ * When @pwq is not linked, it doesn't hold any reference to the
4017
4015
* @wq, and @wq is invalid to access.
4018
4016
*/
4019
4017
if (!list_empty (& pwq -> pwqs_node )) {
4020
- if (WARN_ON_ONCE (!(wq -> flags & WQ_UNBOUND )))
4021
- return ;
4022
-
4023
4018
mutex_lock (& wq -> mutex );
4024
4019
list_del_rcu (& pwq -> pwqs_node );
4025
4020
is_last = list_empty (& wq -> pwqs );
4026
4021
mutex_unlock (& wq -> mutex );
4027
4022
}
4028
4023
4029
- mutex_lock (& wq_pool_mutex );
4030
- put_unbound_pool (pool );
4031
- mutex_unlock (& wq_pool_mutex );
4024
+ if (wq -> flags & WQ_UNBOUND ) {
4025
+ mutex_lock (& wq_pool_mutex );
4026
+ put_unbound_pool (pool );
4027
+ mutex_unlock (& wq_pool_mutex );
4028
+ }
4032
4029
4033
4030
call_rcu (& pwq -> rcu , rcu_free_pwq );
4034
4031
@@ -4112,8 +4109,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
4112
4109
INIT_LIST_HEAD (& pwq -> inactive_works );
4113
4110
INIT_LIST_HEAD (& pwq -> pwqs_node );
4114
4111
INIT_LIST_HEAD (& pwq -> mayday_node );
4115
- kthread_init_work (& pwq -> unbound_release_work ,
4116
- pwq_unbound_release_workfn );
4112
+ kthread_init_work (& pwq -> release_work , pwq_release_workfn );
4117
4113
}
4118
4114
4119
4115
/* sync @pwq with the current state of its associated wq and link it */
@@ -4514,20 +4510,25 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4514
4510
int cpu , ret ;
4515
4511
4516
4512
if (!(wq -> flags & WQ_UNBOUND )) {
4517
- wq -> cpu_pwq = alloc_percpu (struct pool_workqueue );
4513
+ wq -> cpu_pwq = alloc_percpu (struct pool_workqueue * );
4518
4514
if (!wq -> cpu_pwq )
4519
- return - ENOMEM ;
4515
+ goto enomem ;
4520
4516
4521
4517
for_each_possible_cpu (cpu ) {
4522
- struct pool_workqueue * pwq =
4518
+ struct pool_workqueue * * pwq_p =
4523
4519
per_cpu_ptr (wq -> cpu_pwq , cpu );
4524
- struct worker_pool * cpu_pools =
4525
- per_cpu ( cpu_worker_pools , cpu );
4520
+ struct worker_pool * pool =
4521
+ & ( per_cpu_ptr ( cpu_worker_pools , cpu )[ highpri ] );
4526
4522
4527
- init_pwq (pwq , wq , & cpu_pools [highpri ]);
4523
+ * pwq_p = kmem_cache_alloc_node (pwq_cache , GFP_KERNEL ,
4524
+ pool -> node );
4525
+ if (!* pwq_p )
4526
+ goto enomem ;
4527
+
4528
+ init_pwq (* pwq_p , wq , pool );
4528
4529
4529
4530
mutex_lock (& wq -> mutex );
4530
- link_pwq (pwq );
4531
+ link_pwq (* pwq_p );
4531
4532
mutex_unlock (& wq -> mutex );
4532
4533
}
4533
4534
return 0 ;
@@ -4546,6 +4547,15 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4546
4547
cpus_read_unlock ();
4547
4548
4548
4549
return ret ;
4550
+
4551
+ enomem :
4552
+ if (wq -> cpu_pwq ) {
4553
+ for_each_possible_cpu (cpu )
4554
+ kfree (* per_cpu_ptr (wq -> cpu_pwq , cpu ));
4555
+ free_percpu (wq -> cpu_pwq );
4556
+ wq -> cpu_pwq = NULL ;
4557
+ }
4558
+ return - ENOMEM ;
4549
4559
}
4550
4560
4551
4561
static int wq_clamp_max_active (int max_active , unsigned int flags ,
@@ -4719,7 +4729,7 @@ static bool pwq_busy(struct pool_workqueue *pwq)
4719
4729
void destroy_workqueue (struct workqueue_struct * wq )
4720
4730
{
4721
4731
struct pool_workqueue * pwq ;
4722
- int node ;
4732
+ int cpu , node ;
4723
4733
4724
4734
/*
4725
4735
* Remove it from sysfs first so that sanity check failure doesn't
@@ -4779,12 +4789,8 @@ void destroy_workqueue(struct workqueue_struct *wq)
4779
4789
mutex_unlock (& wq_pool_mutex );
4780
4790
4781
4791
if (!(wq -> flags & WQ_UNBOUND )) {
4782
- wq_unregister_lockdep (wq );
4783
- /*
4784
- * The base ref is never dropped on per-cpu pwqs. Directly
4785
- * schedule RCU free.
4786
- */
4787
- call_rcu (& wq -> rcu , rcu_free_wq );
4792
+ for_each_possible_cpu (cpu )
4793
+ put_pwq_unlocked (* per_cpu_ptr (wq -> cpu_pwq , cpu ));
4788
4794
} else {
4789
4795
/*
4790
4796
* We're the sole accessor of @wq at this point. Directly
@@ -4901,7 +4907,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4901
4907
cpu = smp_processor_id ();
4902
4908
4903
4909
if (!(wq -> flags & WQ_UNBOUND ))
4904
- pwq = per_cpu_ptr (wq -> cpu_pwq , cpu );
4910
+ pwq = * per_cpu_ptr (wq -> cpu_pwq , cpu );
4905
4911
else
4906
4912
pwq = unbound_pwq_by_node (wq , cpu_to_node (cpu ));
4907
4913
0 commit comments