Skip to content

Commit 142d106

Browse files
author
Paul E. McKenney
committed
rcu: Determine expedited-GP IPI handler at build time
Back when there could be multiple RCU flavors running in the same kernel at the same time, it was necessary to specify the expedited grace-period IPI handler at runtime. Now that there is only one RCU flavor, the IPI handler can be determined at build time. There is therefore no longer any reason for the RCU-preempt and RCU-sched IPI handlers to have different names, nor is there any reason to pass these handlers in function arguments and in the data structures enclosing workqueues. This commit therefore makes all these changes, pushing the specification of the expedited grace-period IPI handler down to the point of use. Signed-off-by: Paul E. McKenney <[email protected]>
1 parent c46f497 commit 142d106

File tree

4 files changed

+38
-37
lines changed

4 files changed

+38
-37
lines changed

Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg

Lines changed: 11 additions & 7 deletions
Loading

Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,10 @@ <h2><a name="RCU-preempt Expedited Grace Periods">
7272
in quiescent states.
7373
Otherwise, the expedited grace period will use
7474
<tt>smp_call_function_single()</tt> to send the CPU an IPI, which
75-
is handled by <tt>sync_rcu_exp_handler()</tt>.
75+
is handled by <tt>rcu_exp_handler()</tt>.
7676

7777
<p>
78-
However, because this is preemptible RCU, <tt>sync_rcu_exp_handler()</tt>
78+
However, because this is preemptible RCU, <tt>rcu_exp_handler()</tt>
7979
can check to see if the CPU is currently running in an RCU read-side
8080
critical section.
8181
If not, the handler can immediately report a quiescent state.
@@ -145,19 +145,18 @@ <h2><a name="RCU-sched Expedited Grace Periods">
145145
<p><img src="ExpSchedFlow.svg" alt="ExpSchedFlow.svg" width="55%">
146146

147147
<p>
148-
As with RCU-preempt's <tt>synchronize_rcu_expedited()</tt>,
148+
As with RCU-preempt, RCU-sched's
149149
<tt>synchronize_sched_expedited()</tt> ignores offline and
150150
idle CPUs, again because they are in remotely detectable
151151
quiescent states.
152-
However, the <tt>synchronize_rcu_expedited()</tt> handler
153-
is <tt>sync_sched_exp_handler()</tt>, and because the
152+
However, because the
154153
<tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt>
155154
leave no trace of their invocation, in general it is not possible to tell
156155
whether or not the current CPU is in an RCU read-side critical section.
157-
The best that <tt>sync_sched_exp_handler()</tt> can do is to check
156+
The best that RCU-sched's <tt>rcu_exp_handler()</tt> can do is to check
158157
for idle, on the off-chance that the CPU went idle while the IPI
159158
was in flight.
160-
If the CPU is idle, then <tt>sync_sched_exp_handler()</tt> reports
159+
If the CPU is idle, then <tt>rcu_exp_handler()</tt> reports
161160
the quiescent state.
162161

163162
<p> Otherwise, the handler forces a future context switch by setting the
@@ -298,19 +297,18 @@ <h3><a name="Idle-CPU Checks">Idle-CPU Checks</a></h3>
298297
idle CPUs in the mask passed to <tt>rcu_report_exp_cpu_mult()</tt>.
299298

300299
<p>
301-
For RCU-sched, there is an additional check for idle in the IPI
302-
handler, <tt>sync_sched_exp_handler()</tt>.
300+
For RCU-sched, there is an additional check:
303301
If the IPI has interrupted the idle loop, then
304-
<tt>sync_sched_exp_handler()</tt> invokes <tt>rcu_report_exp_rdp()</tt>
302+
<tt>rcu_exp_handler()</tt> invokes <tt>rcu_report_exp_rdp()</tt>
305303
to report the corresponding quiescent state.
306304

307305
<p>
308306
For RCU-preempt, there is no specific check for idle in the
309-
IPI handler (<tt>sync_rcu_exp_handler()</tt>), but because
307+
IPI handler (<tt>rcu_exp_handler()</tt>), but because
310308
RCU read-side critical sections are not permitted within the
311-
idle loop, if <tt>sync_rcu_exp_handler()</tt> sees that the CPU is within
309+
idle loop, if <tt>rcu_exp_handler()</tt> sees that the CPU is within
312310
RCU read-side critical section, the CPU cannot possibly be idle.
313-
Otherwise, <tt>sync_rcu_exp_handler()</tt> invokes
311+
Otherwise, <tt>rcu_exp_handler()</tt> invokes
314312
<tt>rcu_report_exp_rdp()</tt> to report the corresponding quiescent
315313
state, regardless of whether or not that quiescent state was due to
316314
the CPU being idle.
@@ -625,6 +623,8 @@ <h3><a name="Mid-Boot Operation">Mid-boot operation</a></h3>
625623
<p>
626624
With this refinement, synchronous grace periods can now be used from
627625
task context pretty much any time during the life of the kernel.
626+
That is, aside from some points in the suspend, hibernate, or shutdown
627+
code path.
628628

629629
<h3><a name="Summary">
630630
Summary</a></h3>

kernel/rcu/tree.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636

3737
/* Communicate arguments to a workqueue handler. */
3838
struct rcu_exp_work {
39-
smp_call_func_t rew_func;
4039
unsigned long rew_s;
4140
struct work_struct rew_work;
4241
};

kernel/rcu/tree_exp.h

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222

2323
#include <linux/lockdep.h>
2424

25+
static void rcu_exp_handler(void *unused);
26+
2527
/*
2628
* Record the start of an expedited grace period.
2729
*/
@@ -344,15 +346,13 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
344346
{
345347
int cpu;
346348
unsigned long flags;
347-
smp_call_func_t func;
348349
unsigned long mask_ofl_test;
349350
unsigned long mask_ofl_ipi;
350351
int ret;
351352
struct rcu_exp_work *rewp =
352353
container_of(wp, struct rcu_exp_work, rew_work);
353354
struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
354355

355-
func = rewp->rew_func;
356356
raw_spin_lock_irqsave_rcu_node(rnp, flags);
357357

358358
/* Each pass checks a CPU for identity, offline, and idle. */
@@ -396,7 +396,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
396396
mask_ofl_test |= mask;
397397
continue;
398398
}
399-
ret = smp_call_function_single(cpu, func, NULL, 0);
399+
ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
400400
if (!ret) {
401401
mask_ofl_ipi &= ~mask;
402402
continue;
@@ -426,7 +426,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
426426
* Select the nodes that the upcoming expedited grace period needs
427427
* to wait for.
428428
*/
429-
static void sync_rcu_exp_select_cpus(smp_call_func_t func)
429+
static void sync_rcu_exp_select_cpus(void)
430430
{
431431
int cpu;
432432
struct rcu_node *rnp;
@@ -440,7 +440,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
440440
rnp->exp_need_flush = false;
441441
if (!READ_ONCE(rnp->expmask))
442442
continue; /* Avoid early boot non-existent wq. */
443-
rnp->rew.rew_func = func;
444443
if (!READ_ONCE(rcu_par_gp_wq) ||
445444
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
446445
rcu_is_last_leaf_node(rnp)) {
@@ -580,10 +579,10 @@ static void rcu_exp_wait_wake(unsigned long s)
580579
* Common code to drive an expedited grace period forward, used by
581580
* workqueues and mid-boot-time tasks.
582581
*/
583-
static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s)
582+
static void rcu_exp_sel_wait_wake(unsigned long s)
584583
{
585584
/* Initialize the rcu_node tree in preparation for the wait. */
586-
sync_rcu_exp_select_cpus(func);
585+
sync_rcu_exp_select_cpus();
587586

588587
/* Wait and clean up, including waking everyone. */
589588
rcu_exp_wait_wake(s);
@@ -597,14 +596,14 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
597596
struct rcu_exp_work *rewp;
598597

599598
rewp = container_of(wp, struct rcu_exp_work, rew_work);
600-
rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s);
599+
rcu_exp_sel_wait_wake(rewp->rew_s);
601600
}
602601

603602
/*
604603
* Given a smp_call_function() handler, kick off the specified
605604
* implementation of expedited grace period.
606605
*/
607-
static void _synchronize_rcu_expedited(smp_call_func_t func)
606+
static void _synchronize_rcu_expedited(void)
608607
{
609608
struct rcu_data *rdp;
610609
struct rcu_exp_work rew;
@@ -625,10 +624,9 @@ static void _synchronize_rcu_expedited(smp_call_func_t func)
625624
/* Ensure that load happens before action based on it. */
626625
if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
627626
/* Direct call during scheduler init and early_initcalls(). */
628-
rcu_exp_sel_wait_wake(func, s);
627+
rcu_exp_sel_wait_wake(s);
629628
} else {
630629
/* Marshall arguments & schedule the expedited grace period. */
631-
rew.rew_func = func;
632630
rew.rew_s = s;
633631
INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
634632
queue_work(rcu_gp_wq, &rew.rew_work);
@@ -654,7 +652,7 @@ static void _synchronize_rcu_expedited(smp_call_func_t func)
654652
* ->expmask fields in the rcu_node tree. Otherwise, immediately
655653
* report the quiescent state.
656654
*/
657-
static void sync_rcu_exp_handler(void *unused)
655+
static void rcu_exp_handler(void *unused)
658656
{
659657
unsigned long flags;
660658
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -760,14 +758,14 @@ void synchronize_rcu_expedited(void)
760758

761759
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
762760
return;
763-
_synchronize_rcu_expedited(sync_rcu_exp_handler);
761+
_synchronize_rcu_expedited();
764762
}
765763
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
766764

767765
#else /* #ifdef CONFIG_PREEMPT_RCU */
768766

769767
/* Invoked on each online non-idle CPU for expedited quiescent state. */
770-
static void sync_sched_exp_handler(void *unused)
768+
static void rcu_exp_handler(void *unused)
771769
{
772770
struct rcu_data *rdp;
773771
struct rcu_node *rnp;
@@ -799,7 +797,7 @@ static void sync_sched_exp_online_cleanup(int cpu)
799797
rnp = rdp->mynode;
800798
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
801799
return;
802-
ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0);
800+
ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
803801
WARN_ON_ONCE(ret);
804802
}
805803

@@ -835,7 +833,7 @@ void synchronize_rcu_expedited(void)
835833
if (rcu_blocking_is_gp())
836834
return;
837835

838-
_synchronize_rcu_expedited(sync_sched_exp_handler);
836+
_synchronize_rcu_expedited();
839837
}
840838
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
841839

0 commit comments

Comments
 (0)