22
22
23
23
#include <linux/lockdep.h>
24
24
25
+ static void rcu_exp_handler (void * unused );
26
+
25
27
/*
26
28
* Record the start of an expedited grace period.
27
29
*/
@@ -344,15 +346,13 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
344
346
{
345
347
int cpu ;
346
348
unsigned long flags ;
347
- smp_call_func_t func ;
348
349
unsigned long mask_ofl_test ;
349
350
unsigned long mask_ofl_ipi ;
350
351
int ret ;
351
352
struct rcu_exp_work * rewp =
352
353
container_of (wp , struct rcu_exp_work , rew_work );
353
354
struct rcu_node * rnp = container_of (rewp , struct rcu_node , rew );
354
355
355
- func = rewp -> rew_func ;
356
356
raw_spin_lock_irqsave_rcu_node (rnp , flags );
357
357
358
358
/* Each pass checks a CPU for identity, offline, and idle. */
@@ -396,7 +396,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
396
396
mask_ofl_test |= mask ;
397
397
continue ;
398
398
}
399
- ret = smp_call_function_single (cpu , func , NULL , 0 );
399
+ ret = smp_call_function_single (cpu , rcu_exp_handler , NULL , 0 );
400
400
if (!ret ) {
401
401
mask_ofl_ipi &= ~mask ;
402
402
continue ;
@@ -426,7 +426,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
426
426
* Select the nodes that the upcoming expedited grace period needs
427
427
* to wait for.
428
428
*/
429
- static void sync_rcu_exp_select_cpus (smp_call_func_t func )
429
+ static void sync_rcu_exp_select_cpus (void )
430
430
{
431
431
int cpu ;
432
432
struct rcu_node * rnp ;
@@ -440,7 +440,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
440
440
rnp -> exp_need_flush = false;
441
441
if (!READ_ONCE (rnp -> expmask ))
442
442
continue ; /* Avoid early boot non-existent wq. */
443
- rnp -> rew .rew_func = func ;
444
443
if (!READ_ONCE (rcu_par_gp_wq ) ||
445
444
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
446
445
rcu_is_last_leaf_node (rnp )) {
@@ -580,10 +579,10 @@ static void rcu_exp_wait_wake(unsigned long s)
580
579
* Common code to drive an expedited grace period forward, used by
581
580
* workqueues and mid-boot-time tasks.
582
581
*/
583
- static void rcu_exp_sel_wait_wake (smp_call_func_t func , unsigned long s )
582
+ static void rcu_exp_sel_wait_wake (unsigned long s )
584
583
{
585
584
/* Initialize the rcu_node tree in preparation for the wait. */
586
- sync_rcu_exp_select_cpus (func );
585
+ sync_rcu_exp_select_cpus ();
587
586
588
587
/* Wait and clean up, including waking everyone. */
589
588
rcu_exp_wait_wake (s );
@@ -597,14 +596,14 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
597
596
struct rcu_exp_work * rewp ;
598
597
599
598
rewp = container_of (wp , struct rcu_exp_work , rew_work );
600
- rcu_exp_sel_wait_wake (rewp -> rew_func , rewp -> rew_s );
599
+ rcu_exp_sel_wait_wake (rewp -> rew_s );
601
600
}
602
601
603
602
/*
604
603
* Given a smp_call_function() handler, kick off the specified
605
604
* implementation of expedited grace period.
606
605
*/
607
- static void _synchronize_rcu_expedited (smp_call_func_t func )
606
+ static void _synchronize_rcu_expedited (void )
608
607
{
609
608
struct rcu_data * rdp ;
610
609
struct rcu_exp_work rew ;
@@ -625,10 +624,9 @@ static void _synchronize_rcu_expedited(smp_call_func_t func)
625
624
/* Ensure that load happens before action based on it. */
626
625
if (unlikely (rcu_scheduler_active == RCU_SCHEDULER_INIT )) {
627
626
/* Direct call during scheduler init and early_initcalls(). */
628
- rcu_exp_sel_wait_wake (func , s );
627
+ rcu_exp_sel_wait_wake (s );
629
628
} else {
630
629
/* Marshall arguments & schedule the expedited grace period. */
631
- rew .rew_func = func ;
632
630
rew .rew_s = s ;
633
631
INIT_WORK_ONSTACK (& rew .rew_work , wait_rcu_exp_gp );
634
632
queue_work (rcu_gp_wq , & rew .rew_work );
@@ -654,7 +652,7 @@ static void _synchronize_rcu_expedited(smp_call_func_t func)
654
652
* ->expmask fields in the rcu_node tree. Otherwise, immediately
655
653
* report the quiescent state.
656
654
*/
657
- static void sync_rcu_exp_handler (void * unused )
655
+ static void rcu_exp_handler (void * unused )
658
656
{
659
657
unsigned long flags ;
660
658
struct rcu_data * rdp = this_cpu_ptr (& rcu_data );
@@ -760,14 +758,14 @@ void synchronize_rcu_expedited(void)
760
758
761
759
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE )
762
760
return ;
763
- _synchronize_rcu_expedited (sync_rcu_exp_handler );
761
+ _synchronize_rcu_expedited ();
764
762
}
765
763
EXPORT_SYMBOL_GPL (synchronize_rcu_expedited );
766
764
767
765
#else /* #ifdef CONFIG_PREEMPT_RCU */
768
766
769
767
/* Invoked on each online non-idle CPU for expedited quiescent state. */
770
- static void sync_sched_exp_handler (void * unused )
768
+ static void rcu_exp_handler (void * unused )
771
769
{
772
770
struct rcu_data * rdp ;
773
771
struct rcu_node * rnp ;
@@ -799,7 +797,7 @@ static void sync_sched_exp_online_cleanup(int cpu)
799
797
rnp = rdp -> mynode ;
800
798
if (!(READ_ONCE (rnp -> expmask ) & rdp -> grpmask ))
801
799
return ;
802
- ret = smp_call_function_single (cpu , sync_sched_exp_handler , NULL , 0 );
800
+ ret = smp_call_function_single (cpu , rcu_exp_handler , NULL , 0 );
803
801
WARN_ON_ONCE (ret );
804
802
}
805
803
@@ -835,7 +833,7 @@ void synchronize_rcu_expedited(void)
835
833
if (rcu_blocking_is_gp ())
836
834
return ;
837
835
838
- _synchronize_rcu_expedited (sync_sched_exp_handler );
836
+ _synchronize_rcu_expedited ();
839
837
}
840
838
EXPORT_SYMBOL_GPL (synchronize_rcu_expedited );
841
839
0 commit comments