|
@@ -2228,10 +2228,28 @@ static inline int rcu_blocking_is_gp(void)
|
|
|
* rcu_read_lock_sched().
|
|
|
*
|
|
|
* This means that all preempt_disable code sequences, including NMI and
|
|
|
- * hardware-interrupt handlers, in progress on entry will have completed
|
|
|
- * before this primitive returns. However, this does not guarantee that
|
|
|
- * softirq handlers will have completed, since in some kernels, these
|
|
|
- * handlers can run in process context, and can block.
|
|
|
+ * non-threaded hardware-interrupt handlers, in progress on entry will
|
|
|
+ * have completed before this primitive returns. However, this does not
|
|
|
+ * guarantee that softirq handlers will have completed, since in some
|
|
|
+ * kernels, these handlers can run in process context, and can block.
|
|
|
+ *
|
|
|
+ * Note that this guarantee implies further memory-ordering guarantees.
|
|
|
+ * On systems with more than one CPU, when synchronize_sched() returns,
|
|
|
+ * each CPU is guaranteed to have executed a full memory barrier since the
|
|
|
+ * end of its last RCU-sched read-side critical section whose beginning
|
|
|
+ * preceded the call to synchronize_sched(). In addition, each CPU having
|
|
|
+ * an RCU read-side critical section that extends beyond the return from
|
|
|
+ * synchronize_sched() is guaranteed to have executed a full memory barrier
|
|
|
+ * after the beginning of synchronize_sched() and before the beginning of
|
|
|
+ * that RCU read-side critical section. Note that these guarantees include
|
|
|
+ * CPUs that are offline, idle, or executing in user mode, as well as CPUs
|
|
|
+ * that are executing in the kernel.
|
|
|
+ *
|
|
|
+ * Furthermore, if CPU A invoked synchronize_sched(), which returned
|
|
|
+ * to its caller on CPU B, then both CPU A and CPU B are guaranteed
|
|
|
+ * to have executed a full memory barrier during the execution of
|
|
|
+ * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
|
|
|
+ * again only if the system has more than one CPU).
|
|
|
*
|
|
|
* This primitive provides the guarantees made by the (now removed)
|
|
|
* synchronize_kernel() API. In contrast, synchronize_rcu() only
|
|
@@ -2259,6 +2277,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
|
|
|
* read-side critical sections have completed. RCU read-side critical
|
|
|
* sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
|
|
|
* and may be nested.
|
|
|
+ *
|
|
|
+ * See the description of synchronize_sched() for more detailed information
|
|
|
+ * on memory ordering guarantees.
|
|
|
*/
|
|
|
void synchronize_rcu_bh(void)
|
|
|
{
|