Skip to content

Commit e5bc3af

Browse files
author
Paul E. McKenney
committed
rcu: Consolidate PREEMPT and !PREEMPT synchronize_rcu()
Now that rcu_blocking_is_gp() makes the correct immediate-return decision for both PREEMPT and !PREEMPT, a single implementation of synchronize_rcu() will work correctly under both configurations. This commit therefore eliminates a few lines of code by consolidating the two implementations of synchronize_rcu(). Signed-off-by: Paul E. McKenney <[email protected]>
1 parent 3cd4ca4 commit e5bc3af

File tree

3 files changed

+73
-91
lines changed

3 files changed

+73
-91
lines changed

kernel/rcu/tree.c

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2950,6 +2950,79 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
29502950
}
29512951
EXPORT_SYMBOL_GPL(kfree_call_rcu);
29522952

2953+
/*
2954+
* During early boot, any blocking grace-period wait automatically
2955+
* implies a grace period. Later on, this is never the case for PREEMPT.
2956+
*
2957+
* Howevr, because a context switch is a grace period for !PREEMPT, any
2958+
* blocking grace-period wait automatically implies a grace period if
2959+
* there is only one CPU online at any point time during execution of
2960+
* either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
2961+
* occasionally incorrectly indicate that there are multiple CPUs online
2962+
* when there was in fact only one the whole time, as this just adds some
2963+
* overhead: RCU still operates correctly.
2964+
*/
2965+
static int rcu_blocking_is_gp(void)
2966+
{
2967+
int ret;
2968+
2969+
if (IS_ENABLED(CONFIG_PREEMPT))
2970+
return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
2971+
might_sleep(); /* Check for RCU read-side critical section. */
2972+
preempt_disable();
2973+
ret = num_online_cpus() <= 1;
2974+
preempt_enable();
2975+
return ret;
2976+
}
2977+
2978+
/**
2979+
* synchronize_rcu - wait until a grace period has elapsed.
2980+
*
2981+
* Control will return to the caller some time after a full grace
2982+
* period has elapsed, in other words after all currently executing RCU
2983+
* read-side critical sections have completed. Note, however, that
2984+
* upon return from synchronize_rcu(), the caller might well be executing
2985+
* concurrently with new RCU read-side critical sections that began while
2986+
* synchronize_rcu() was waiting. RCU read-side critical sections are
2987+
* delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
2988+
* In addition, regions of code across which interrupts, preemption, or
2989+
* softirqs have been disabled also serve as RCU read-side critical
2990+
* sections. This includes hardware interrupt handlers, softirq handlers,
2991+
* and NMI handlers.
2992+
*
2993+
* Note that this guarantee implies further memory-ordering guarantees.
2994+
* On systems with more than one CPU, when synchronize_rcu() returns,
2995+
* each CPU is guaranteed to have executed a full memory barrier since
2996+
* the end of its last RCU read-side critical section whose beginning
2997+
* preceded the call to synchronize_rcu(). In addition, each CPU having
2998+
* an RCU read-side critical section that extends beyond the return from
2999+
* synchronize_rcu() is guaranteed to have executed a full memory barrier
3000+
* after the beginning of synchronize_rcu() and before the beginning of
3001+
* that RCU read-side critical section. Note that these guarantees include
3002+
* CPUs that are offline, idle, or executing in user mode, as well as CPUs
3003+
* that are executing in the kernel.
3004+
*
3005+
* Furthermore, if CPU A invoked synchronize_rcu(), which returned
3006+
* to its caller on CPU B, then both CPU A and CPU B are guaranteed
3007+
* to have executed a full memory barrier during the execution of
3008+
* synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3009+
* again only if the system has more than one CPU).
3010+
*/
3011+
void synchronize_rcu(void)
3012+
{
3013+
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3014+
lock_is_held(&rcu_lock_map) ||
3015+
lock_is_held(&rcu_sched_lock_map),
3016+
"Illegal synchronize_rcu() in RCU read-side critical section");
3017+
if (rcu_blocking_is_gp())
3018+
return;
3019+
if (rcu_gp_is_expedited())
3020+
synchronize_rcu_expedited();
3021+
else
3022+
wait_rcu_gp(call_rcu);
3023+
}
3024+
EXPORT_SYMBOL_GPL(synchronize_rcu);
3025+
29533026
/**
29543027
* get_state_synchronize_rcu - Snapshot current RCU state
29553028
*

kernel/rcu/tree_exp.h

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -643,33 +643,6 @@ static void _synchronize_rcu_expedited(void)
643643
mutex_unlock(&rcu_state.exp_mutex);
644644
}
645645

646-
/*
647-
* During early boot, any blocking grace-period wait automatically
648-
* implies a grace period. Later on, this is never the case for PREEMPT.
649-
*
650-
* Howevr, because a context switch is a grace period for !PREEMPT, any
651-
* blocking grace-period wait automatically implies a grace period if
652-
* there is only one CPU online at any point time during execution of
653-
* either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
654-
* occasionally incorrectly indicate that there are multiple CPUs online
655-
* when there was in fact only one the whole time, as this just adds some
656-
* overhead: RCU still operates correctly.
657-
*/
658-
static int rcu_blocking_is_gp(void)
659-
{
660-
int ret;
661-
662-
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
663-
return true;
664-
if (IS_ENABLED(CONFIG_PREEMPT))
665-
return false;
666-
might_sleep(); /* Check for RCU read-side critical section. */
667-
preempt_disable();
668-
ret = num_online_cpus() <= 1;
669-
preempt_enable();
670-
return ret;
671-
}
672-
673646
#ifdef CONFIG_PREEMPT_RCU
674647

675648
/*

kernel/rcu/tree_plugin.h

Lines changed: 0 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -825,54 +825,6 @@ static void rcu_flavor_check_callbacks(int user)
825825
t->rcu_read_unlock_special.b.need_qs = true;
826826
}
827827

828-
/**
829-
* synchronize_rcu - wait until a grace period has elapsed.
830-
*
831-
* Control will return to the caller some time after a full grace
832-
* period has elapsed, in other words after all currently executing RCU
833-
* read-side critical sections have completed. Note, however, that
834-
* upon return from synchronize_rcu(), the caller might well be executing
835-
* concurrently with new RCU read-side critical sections that began while
836-
* synchronize_rcu() was waiting. RCU read-side critical sections are
837-
* delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
838-
* In addition, regions of code across which interrupts, preemption, or
839-
* softirqs have been disabled also serve as RCU read-side critical
840-
* sections. This includes hardware interrupt handlers, softirq handlers,
841-
* and NMI handlers.
842-
*
843-
* Note that this guarantee implies further memory-ordering guarantees.
844-
* On systems with more than one CPU, when synchronize_rcu() returns,
845-
* each CPU is guaranteed to have executed a full memory barrier since
846-
* the end of its last RCU read-side critical section whose beginning
847-
* preceded the call to synchronize_rcu(). In addition, each CPU having
848-
* an RCU read-side critical section that extends beyond the return from
849-
* synchronize_rcu() is guaranteed to have executed a full memory barrier
850-
* after the beginning of synchronize_rcu() and before the beginning of
851-
* that RCU read-side critical section. Note that these guarantees include
852-
* CPUs that are offline, idle, or executing in user mode, as well as CPUs
853-
* that are executing in the kernel.
854-
*
855-
* Furthermore, if CPU A invoked synchronize_rcu(), which returned
856-
* to its caller on CPU B, then both CPU A and CPU B are guaranteed
857-
* to have executed a full memory barrier during the execution of
858-
* synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
859-
* again only if the system has more than one CPU).
860-
*/
861-
void synchronize_rcu(void)
862-
{
863-
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
864-
lock_is_held(&rcu_lock_map) ||
865-
lock_is_held(&rcu_sched_lock_map),
866-
"Illegal synchronize_rcu() in RCU read-side critical section");
867-
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
868-
return;
869-
if (rcu_gp_is_expedited())
870-
synchronize_rcu_expedited();
871-
else
872-
wait_rcu_gp(call_rcu);
873-
}
874-
EXPORT_SYMBOL_GPL(synchronize_rcu);
875-
876828
/*
877829
* Check for a task exiting while in a preemptible-RCU read-side
878830
* critical section, clean up if so. No need to issue warnings,
@@ -1115,22 +1067,6 @@ static void rcu_flavor_check_callbacks(int user)
11151067
}
11161068
}
11171069

1118-
/* PREEMPT=n implementation of synchronize_rcu(). */
1119-
void synchronize_rcu(void)
1120-
{
1121-
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
1122-
lock_is_held(&rcu_lock_map) ||
1123-
lock_is_held(&rcu_sched_lock_map),
1124-
"Illegal synchronize_rcu() in RCU read-side critical section");
1125-
if (rcu_blocking_is_gp())
1126-
return;
1127-
if (rcu_gp_is_expedited())
1128-
synchronize_rcu_expedited();
1129-
else
1130-
wait_rcu_gp(call_rcu);
1131-
}
1132-
EXPORT_SYMBOL_GPL(synchronize_rcu);
1133-
11341070
/*
11351071
* Because preemptible RCU does not exist, tasks cannot possibly exit
11361072
* while in preemptible RCU read-side critical sections.

0 commit comments

Comments
 (0)