@@ -2950,6 +2950,79 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
2950
2950
}
2951
2951
EXPORT_SYMBOL_GPL (kfree_call_rcu );
2952
2952
2953
+ /*
2954
+ * During early boot, any blocking grace-period wait automatically
2955
+ * implies a grace period. Later on, this is never the case for PREEMPT.
2956
+ *
2957
+ * Howevr, because a context switch is a grace period for !PREEMPT, any
2958
+ * blocking grace-period wait automatically implies a grace period if
2959
+ * there is only one CPU online at any point time during execution of
2960
+ * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
2961
+ * occasionally incorrectly indicate that there are multiple CPUs online
2962
+ * when there was in fact only one the whole time, as this just adds some
2963
+ * overhead: RCU still operates correctly.
2964
+ */
2965
+ static int rcu_blocking_is_gp (void )
2966
+ {
2967
+ int ret ;
2968
+
2969
+ if (IS_ENABLED (CONFIG_PREEMPT ))
2970
+ return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE ;
2971
+ might_sleep (); /* Check for RCU read-side critical section. */
2972
+ preempt_disable ();
2973
+ ret = num_online_cpus () <= 1 ;
2974
+ preempt_enable ();
2975
+ return ret ;
2976
+ }
2977
+
2978
+ /**
2979
+ * synchronize_rcu - wait until a grace period has elapsed.
2980
+ *
2981
+ * Control will return to the caller some time after a full grace
2982
+ * period has elapsed, in other words after all currently executing RCU
2983
+ * read-side critical sections have completed. Note, however, that
2984
+ * upon return from synchronize_rcu(), the caller might well be executing
2985
+ * concurrently with new RCU read-side critical sections that began while
2986
+ * synchronize_rcu() was waiting. RCU read-side critical sections are
2987
+ * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
2988
+ * In addition, regions of code across which interrupts, preemption, or
2989
+ * softirqs have been disabled also serve as RCU read-side critical
2990
+ * sections. This includes hardware interrupt handlers, softirq handlers,
2991
+ * and NMI handlers.
2992
+ *
2993
+ * Note that this guarantee implies further memory-ordering guarantees.
2994
+ * On systems with more than one CPU, when synchronize_rcu() returns,
2995
+ * each CPU is guaranteed to have executed a full memory barrier since
2996
+ * the end of its last RCU read-side critical section whose beginning
2997
+ * preceded the call to synchronize_rcu(). In addition, each CPU having
2998
+ * an RCU read-side critical section that extends beyond the return from
2999
+ * synchronize_rcu() is guaranteed to have executed a full memory barrier
3000
+ * after the beginning of synchronize_rcu() and before the beginning of
3001
+ * that RCU read-side critical section. Note that these guarantees include
3002
+ * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3003
+ * that are executing in the kernel.
3004
+ *
3005
+ * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3006
+ * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3007
+ * to have executed a full memory barrier during the execution of
3008
+ * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3009
+ * again only if the system has more than one CPU).
3010
+ */
3011
+ void synchronize_rcu (void )
3012
+ {
3013
+ RCU_LOCKDEP_WARN (lock_is_held (& rcu_bh_lock_map ) ||
3014
+ lock_is_held (& rcu_lock_map ) ||
3015
+ lock_is_held (& rcu_sched_lock_map ),
3016
+ "Illegal synchronize_rcu() in RCU read-side critical section" );
3017
+ if (rcu_blocking_is_gp ())
3018
+ return ;
3019
+ if (rcu_gp_is_expedited ())
3020
+ synchronize_rcu_expedited ();
3021
+ else
3022
+ wait_rcu_gp (call_rcu );
3023
+ }
3024
+ EXPORT_SYMBOL_GPL (synchronize_rcu );
3025
+
2953
3026
/**
2954
3027
* get_state_synchronize_rcu - Snapshot current RCU state
2955
3028
*
0 commit comments