|
@@ -40,10 +40,10 @@
|
|
|
static struct task_struct *rcu_kthread_task;
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
|
|
|
static unsigned long have_rcu_kthread_work;
|
|
|
-static void invoke_rcu_kthread(void);
|
|
|
|
|
|
/* Forward declarations for rcutiny_plugin.h. */
|
|
|
struct rcu_ctrlblk;
|
|
|
+static void invoke_rcu_kthread(void);
|
|
|
static void rcu_process_callbacks(struct rcu_ctrlblk *rcp);
|
|
|
static int rcu_kthread(void *arg);
|
|
|
static void __call_rcu(struct rcu_head *head,
|
|
@@ -79,26 +79,31 @@ void rcu_exit_nohz(void)
|
|
|
#endif /* #ifdef CONFIG_NO_HZ */
|
|
|
|
|
|
/*
|
|
|
- * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc().
|
|
|
- * Also disable irqs to avoid confusion due to interrupt handlers
|
|
|
+ * Helper function for rcu_sched_qs() and rcu_bh_qs().
|
|
|
+ * Also irqs are disabled to avoid confusion due to interrupt handlers
|
|
|
* invoking call_rcu().
|
|
|
*/
|
|
|
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
if (rcp->rcucblist != NULL &&
|
|
|
rcp->donetail != rcp->curtail) {
|
|
|
rcp->donetail = rcp->curtail;
|
|
|
- local_irq_restore(flags);
|
|
|
return 1;
|
|
|
}
|
|
|
- local_irq_restore(flags);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Wake up rcu_kthread() to process callbacks now eligible for invocation
|
|
|
+ * or to boost readers.
|
|
|
+ */
|
|
|
+static void invoke_rcu_kthread(void)
|
|
|
+{
|
|
|
+ have_rcu_kthread_work = 1;
|
|
|
+ wake_up(&rcu_kthread_wq);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Record an rcu quiescent state. And an rcu_bh quiescent state while we
|
|
|
* are at it, given that any rcu quiescent state is also an rcu_bh
|
|
@@ -106,9 +111,13 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
|
|
|
*/
|
|
|
void rcu_sched_qs(int cpu)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
|
|
|
rcu_qsctr_help(&rcu_bh_ctrlblk))
|
|
|
invoke_rcu_kthread();
|
|
|
+ local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -116,8 +125,12 @@ void rcu_sched_qs(int cpu)
|
|
|
*/
|
|
|
void rcu_bh_qs(int cpu)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
if (rcu_qsctr_help(&rcu_bh_ctrlblk))
|
|
|
invoke_rcu_kthread();
|
|
|
+ local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -207,20 +220,6 @@ static int rcu_kthread(void *arg)
|
|
|
return 0; /* Not reached, but needed to shut gcc up. */
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Wake up rcu_kthread() to process callbacks now eligible for invocation
|
|
|
- * or to boost readers.
|
|
|
- */
|
|
|
-static void invoke_rcu_kthread(void)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
- have_rcu_kthread_work = 1;
|
|
|
- wake_up(&rcu_kthread_wq);
|
|
|
- local_irq_restore(flags);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Wait for a grace period to elapse. But it is illegal to invoke
|
|
|
* synchronize_sched() from within an RCU read-side critical section.
|