@@ -4611,84 +4611,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4611
4611
pool -> attrs -> cpumask ) < 0 );
4612
4612
}
4613
4613
4614
- /*
4615
- * Workqueues should be brought up before normal priority CPU notifiers.
4616
- * This will be registered high priority CPU notifier.
4617
- */
4618
- static int workqueue_cpu_up_callback (struct notifier_block * nfb ,
4619
- unsigned long action ,
4620
- void * hcpu )
4614
+ int workqueue_prepare_cpu (unsigned int cpu )
4615
+ {
4616
+ struct worker_pool * pool ;
4617
+
4618
+ for_each_cpu_worker_pool (pool , cpu ) {
4619
+ if (pool -> nr_workers )
4620
+ continue ;
4621
+ if (!create_worker (pool ))
4622
+ return - ENOMEM ;
4623
+ }
4624
+ return 0 ;
4625
+ }
4626
+
4627
+ int workqueue_online_cpu (unsigned int cpu )
4621
4628
{
4622
- int cpu = (unsigned long )hcpu ;
4623
4629
struct worker_pool * pool ;
4624
4630
struct workqueue_struct * wq ;
4625
4631
int pi ;
4626
4632
4627
- switch (action & ~CPU_TASKS_FROZEN ) {
4628
- case CPU_UP_PREPARE :
4629
- for_each_cpu_worker_pool (pool , cpu ) {
4630
- if (pool -> nr_workers )
4631
- continue ;
4632
- if (!create_worker (pool ))
4633
- return NOTIFY_BAD ;
4634
- }
4635
- break ;
4636
-
4637
- case CPU_DOWN_FAILED :
4638
- case CPU_ONLINE :
4639
- mutex_lock (& wq_pool_mutex );
4633
+ mutex_lock (& wq_pool_mutex );
4640
4634
4641
- for_each_pool (pool , pi ) {
4642
- mutex_lock (& pool -> attach_mutex );
4635
+ for_each_pool (pool , pi ) {
4636
+ mutex_lock (& pool -> attach_mutex );
4643
4637
4644
- if (pool -> cpu == cpu )
4645
- rebind_workers (pool );
4646
- else if (pool -> cpu < 0 )
4647
- restore_unbound_workers_cpumask (pool , cpu );
4638
+ if (pool -> cpu == cpu )
4639
+ rebind_workers (pool );
4640
+ else if (pool -> cpu < 0 )
4641
+ restore_unbound_workers_cpumask (pool , cpu );
4648
4642
4649
- mutex_unlock (& pool -> attach_mutex );
4650
- }
4643
+ mutex_unlock (& pool -> attach_mutex );
4644
+ }
4651
4645
4652
- /* update NUMA affinity of unbound workqueues */
4653
- list_for_each_entry (wq , & workqueues , list )
4654
- wq_update_unbound_numa (wq , cpu , true);
4646
+ /* update NUMA affinity of unbound workqueues */
4647
+ list_for_each_entry (wq , & workqueues , list )
4648
+ wq_update_unbound_numa (wq , cpu , true);
4655
4649
4656
- mutex_unlock (& wq_pool_mutex );
4657
- break ;
4658
- }
4659
- return NOTIFY_OK ;
4650
+ mutex_unlock (& wq_pool_mutex );
4651
+ return 0 ;
4660
4652
}
4661
4653
4662
- /*
4663
- * Workqueues should be brought down after normal priority CPU notifiers.
4664
- * This will be registered as low priority CPU notifier.
4665
- */
4666
- static int workqueue_cpu_down_callback (struct notifier_block * nfb ,
4667
- unsigned long action ,
4668
- void * hcpu )
4654
+ int workqueue_offline_cpu (unsigned int cpu )
4669
4655
{
4670
- int cpu = (unsigned long )hcpu ;
4671
4656
struct work_struct unbind_work ;
4672
4657
struct workqueue_struct * wq ;
4673
4658
4674
- switch (action & ~CPU_TASKS_FROZEN ) {
4675
- case CPU_DOWN_PREPARE :
4676
- /* unbinding per-cpu workers should happen on the local CPU */
4677
- INIT_WORK_ONSTACK (& unbind_work , wq_unbind_fn );
4678
- queue_work_on (cpu , system_highpri_wq , & unbind_work );
4679
-
4680
- /* update NUMA affinity of unbound workqueues */
4681
- mutex_lock (& wq_pool_mutex );
4682
- list_for_each_entry (wq , & workqueues , list )
4683
- wq_update_unbound_numa (wq , cpu , false);
4684
- mutex_unlock (& wq_pool_mutex );
4685
-
4686
- /* wait for per-cpu unbinding to finish */
4687
- flush_work (& unbind_work );
4688
- destroy_work_on_stack (& unbind_work );
4689
- break ;
4690
- }
4691
- return NOTIFY_OK ;
4659
+ /* unbinding per-cpu workers should happen on the local CPU */
4660
+ INIT_WORK_ONSTACK (& unbind_work , wq_unbind_fn );
4661
+ queue_work_on (cpu , system_highpri_wq , & unbind_work );
4662
+
4663
+ /* update NUMA affinity of unbound workqueues */
4664
+ mutex_lock (& wq_pool_mutex );
4665
+ list_for_each_entry (wq , & workqueues , list )
4666
+ wq_update_unbound_numa (wq , cpu , false);
4667
+ mutex_unlock (& wq_pool_mutex );
4668
+
4669
+ /* wait for per-cpu unbinding to finish */
4670
+ flush_work (& unbind_work );
4671
+ destroy_work_on_stack (& unbind_work );
4672
+ return 0 ;
4692
4673
}
4693
4674
4694
4675
#ifdef CONFIG_SMP
@@ -5490,9 +5471,6 @@ static int __init init_workqueues(void)
5490
5471
5491
5472
pwq_cache = KMEM_CACHE (pool_workqueue , SLAB_PANIC );
5492
5473
5493
- cpu_notifier (workqueue_cpu_up_callback , CPU_PRI_WORKQUEUE_UP );
5494
- hotcpu_notifier (workqueue_cpu_down_callback , CPU_PRI_WORKQUEUE_DOWN );
5495
-
5496
5474
wq_numa_init ();
5497
5475
5498
5476
/* initialize CPU pools */
0 commit comments