|
@@ -53,16 +53,8 @@ struct lockdep_map rcu_lock_map =
|
|
EXPORT_SYMBOL_GPL(rcu_lock_map);
|
|
EXPORT_SYMBOL_GPL(rcu_lock_map);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
|
|
|
|
-static atomic_t rcu_barrier_cpu_count;
|
|
|
|
-static DEFINE_MUTEX(rcu_barrier_mutex);
|
|
|
|
-static struct completion rcu_barrier_completion;
|
|
|
|
int rcu_scheduler_active __read_mostly;
|
|
int rcu_scheduler_active __read_mostly;
|
|
|
|
|
|
-static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
|
|
|
|
-static struct rcu_head rcu_migrate_head[3];
|
|
|
|
-static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Awaken the corresponding synchronize_rcu() instance now that a
|
|
* Awaken the corresponding synchronize_rcu() instance now that a
|
|
* grace period has elapsed.
|
|
* grace period has elapsed.
|
|
@@ -165,120 +157,10 @@ void synchronize_rcu_bh(void)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
|
|
|
|
|
|
-static void rcu_barrier_callback(struct rcu_head *notused)
|
|
|
|
-{
|
|
|
|
- if (atomic_dec_and_test(&rcu_barrier_cpu_count))
|
|
|
|
- complete(&rcu_barrier_completion);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Called with preemption disabled, and from cross-cpu IRQ context.
|
|
|
|
- */
|
|
|
|
-static void rcu_barrier_func(void *type)
|
|
|
|
-{
|
|
|
|
- int cpu = smp_processor_id();
|
|
|
|
- struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
|
|
|
|
- void (*call_rcu_func)(struct rcu_head *head,
|
|
|
|
- void (*func)(struct rcu_head *head));
|
|
|
|
-
|
|
|
|
- atomic_inc(&rcu_barrier_cpu_count);
|
|
|
|
- call_rcu_func = type;
|
|
|
|
- call_rcu_func(head, rcu_barrier_callback);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void wait_migrated_callbacks(void)
|
|
|
|
-{
|
|
|
|
- wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
|
|
|
|
- smp_mb(); /* In case we didn't sleep. */
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Orchestrate the specified type of RCU barrier, waiting for all
|
|
|
|
- * RCU callbacks of the specified type to complete.
|
|
|
|
- */
|
|
|
|
-static void _rcu_barrier(void (*call_rcu_func)(struct rcu_head *head,
|
|
|
|
- void (*func)(struct rcu_head *head)))
|
|
|
|
-{
|
|
|
|
- BUG_ON(in_interrupt());
|
|
|
|
- /* Take cpucontrol mutex to protect against CPU hotplug */
|
|
|
|
- mutex_lock(&rcu_barrier_mutex);
|
|
|
|
- init_completion(&rcu_barrier_completion);
|
|
|
|
- /*
|
|
|
|
- * Initialize rcu_barrier_cpu_count to 1, then invoke
|
|
|
|
- * rcu_barrier_func() on each CPU, so that each CPU also has
|
|
|
|
- * incremented rcu_barrier_cpu_count. Only then is it safe to
|
|
|
|
- * decrement rcu_barrier_cpu_count -- otherwise the first CPU
|
|
|
|
- * might complete its grace period before all of the other CPUs
|
|
|
|
- * did their increment, causing this function to return too
|
|
|
|
- * early.
|
|
|
|
- */
|
|
|
|
- atomic_set(&rcu_barrier_cpu_count, 1);
|
|
|
|
- on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
|
|
|
|
- if (atomic_dec_and_test(&rcu_barrier_cpu_count))
|
|
|
|
- complete(&rcu_barrier_completion);
|
|
|
|
- wait_for_completion(&rcu_barrier_completion);
|
|
|
|
- mutex_unlock(&rcu_barrier_mutex);
|
|
|
|
- wait_migrated_callbacks();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
|
|
|
|
- */
|
|
|
|
-void rcu_barrier(void)
|
|
|
|
-{
|
|
|
|
- _rcu_barrier(call_rcu);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(rcu_barrier);
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
|
|
|
|
- */
|
|
|
|
-void rcu_barrier_bh(void)
|
|
|
|
-{
|
|
|
|
- _rcu_barrier(call_rcu_bh);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(rcu_barrier_bh);
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
|
|
|
|
- */
|
|
|
|
-void rcu_barrier_sched(void)
|
|
|
|
-{
|
|
|
|
- _rcu_barrier(call_rcu_sched);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(rcu_barrier_sched);
|
|
|
|
-
|
|
|
|
-static void rcu_migrate_callback(struct rcu_head *notused)
|
|
|
|
-{
|
|
|
|
- if (atomic_dec_and_test(&rcu_migrate_type_count))
|
|
|
|
- wake_up(&rcu_migrate_wq);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
|
|
static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
|
|
unsigned long action, void *hcpu)
|
|
unsigned long action, void *hcpu)
|
|
{
|
|
{
|
|
- rcu_cpu_notify(self, action, hcpu);
|
|
|
|
- if (action == CPU_DYING) {
|
|
|
|
- /*
|
|
|
|
- * preempt_disable() in on_each_cpu() prevents stop_machine(),
|
|
|
|
- * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
|
|
|
|
- * returns, all online cpus have queued rcu_barrier_func(),
|
|
|
|
- * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
|
|
|
|
- *
|
|
|
|
- * These callbacks ensure _rcu_barrier() waits for all
|
|
|
|
- * RCU callbacks of the specified type to complete.
|
|
|
|
- */
|
|
|
|
- atomic_set(&rcu_migrate_type_count, 3);
|
|
|
|
- call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
|
|
|
|
- call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
|
|
|
|
- call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
|
|
|
|
- } else if (action == CPU_DOWN_PREPARE) {
|
|
|
|
- /* Don't need to wait until next removal operation. */
|
|
|
|
- /* rcu_migrate_head is protected by cpu_add_remove_lock */
|
|
|
|
- wait_migrated_callbacks();
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return NOTIFY_OK;
|
|
|
|
|
|
+ return rcu_cpu_notify(self, action, hcpu);
|
|
}
|
|
}
|
|
|
|
|
|
void __init rcu_init(void)
|
|
void __init rcu_init(void)
|