|
@@ -44,7 +44,6 @@
|
|
|
#include <linux/cpu.h>
|
|
|
#include <linux/mutex.h>
|
|
|
#include <linux/module.h>
|
|
|
-#include <linux/kernel_stat.h>
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
static struct lock_class_key rcu_lock_key;
|
|
@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map =
|
|
|
EXPORT_SYMBOL_GPL(rcu_lock_map);
|
|
|
#endif
|
|
|
|
|
|
-int rcu_scheduler_active __read_mostly;
|
|
|
-
|
|
|
/*
|
|
|
* Awaken the corresponding synchronize_rcu() instance now that a
|
|
|
* grace period has elapsed.
|
|
@@ -66,104 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head)
|
|
|
rcu = container_of(head, struct rcu_synchronize, head);
|
|
|
complete(&rcu->completion);
|
|
|
}
|
|
|
-
|
|
|
-#ifndef CONFIG_TINY_RCU
|
|
|
-
|
|
|
-#ifdef CONFIG_TREE_PREEMPT_RCU
|
|
|
-
|
|
|
-/**
|
|
|
- * synchronize_rcu - wait until a grace period has elapsed.
|
|
|
- *
|
|
|
- * Control will return to the caller some time after a full grace
|
|
|
- * period has elapsed, in other words after all currently executing RCU
|
|
|
- * read-side critical sections have completed. RCU read-side critical
|
|
|
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
|
|
|
- * and may be nested.
|
|
|
- */
|
|
|
-void synchronize_rcu(void)
|
|
|
-{
|
|
|
- struct rcu_synchronize rcu;
|
|
|
-
|
|
|
- if (!rcu_scheduler_active)
|
|
|
- return;
|
|
|
-
|
|
|
- init_completion(&rcu.completion);
|
|
|
- /* Will wake me after RCU finished. */
|
|
|
- call_rcu(&rcu.head, wakeme_after_rcu);
|
|
|
- /* Wait for it. */
|
|
|
- wait_for_completion(&rcu.completion);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(synchronize_rcu);
|
|
|
-
|
|
|
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
|
|
-
|
|
|
-/**
|
|
|
- * synchronize_sched - wait until an rcu-sched grace period has elapsed.
|
|
|
- *
|
|
|
- * Control will return to the caller some time after a full rcu-sched
|
|
|
- * grace period has elapsed, in other words after all currently executing
|
|
|
- * rcu-sched read-side critical sections have completed. These read-side
|
|
|
- * critical sections are delimited by rcu_read_lock_sched() and
|
|
|
- * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
|
|
|
- * local_irq_disable(), and so on may be used in place of
|
|
|
- * rcu_read_lock_sched().
|
|
|
- *
|
|
|
- * This means that all preempt_disable code sequences, including NMI and
|
|
|
- * hardware-interrupt handlers, in progress on entry will have completed
|
|
|
- * before this primitive returns. However, this does not guarantee that
|
|
|
- * softirq handlers will have completed, since in some kernels, these
|
|
|
- * handlers can run in process context, and can block.
|
|
|
- *
|
|
|
- * This primitive provides the guarantees made by the (now removed)
|
|
|
- * synchronize_kernel() API. In contrast, synchronize_rcu() only
|
|
|
- * guarantees that rcu_read_lock() sections will have completed.
|
|
|
- * In "classic RCU", these two guarantees happen to be one and
|
|
|
- * the same, but can differ in realtime RCU implementations.
|
|
|
- */
|
|
|
-void synchronize_sched(void)
|
|
|
-{
|
|
|
- struct rcu_synchronize rcu;
|
|
|
-
|
|
|
- if (rcu_blocking_is_gp())
|
|
|
- return;
|
|
|
-
|
|
|
- init_completion(&rcu.completion);
|
|
|
- /* Will wake me after RCU finished. */
|
|
|
- call_rcu_sched(&rcu.head, wakeme_after_rcu);
|
|
|
- /* Wait for it. */
|
|
|
- wait_for_completion(&rcu.completion);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(synchronize_sched);
|
|
|
-
|
|
|
-/**
|
|
|
- * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
|
|
|
- *
|
|
|
- * Control will return to the caller some time after a full rcu_bh grace
|
|
|
- * period has elapsed, in other words after all currently executing rcu_bh
|
|
|
- * read-side critical sections have completed. RCU read-side critical
|
|
|
- * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
|
|
|
- * and may be nested.
|
|
|
- */
|
|
|
-void synchronize_rcu_bh(void)
|
|
|
-{
|
|
|
- struct rcu_synchronize rcu;
|
|
|
-
|
|
|
- if (rcu_blocking_is_gp())
|
|
|
- return;
|
|
|
-
|
|
|
- init_completion(&rcu.completion);
|
|
|
- /* Will wake me after RCU finished. */
|
|
|
- call_rcu_bh(&rcu.head, wakeme_after_rcu);
|
|
|
- /* Wait for it. */
|
|
|
- wait_for_completion(&rcu.completion);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
|
|
|
-
|
|
|
-#endif /* #ifndef CONFIG_TINY_RCU */
|
|
|
-
|
|
|
-void rcu_scheduler_starting(void)
|
|
|
-{
|
|
|
- WARN_ON(num_online_cpus() != 1);
|
|
|
- WARN_ON(nr_context_switches() > 0);
|
|
|
- rcu_scheduler_active = 1;
|
|
|
-}
|