|
@@ -132,6 +132,7 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
|
|
|
RCU_TRACE(.rcb.name = "rcu_preempt")
|
|
|
};
|
|
|
|
|
|
+static void rcu_read_unlock_special(struct task_struct *t);
|
|
|
static int rcu_preempted_readers_exp(void);
|
|
|
static void rcu_report_exp_done(void);
|
|
|
|
|
@@ -146,6 +147,16 @@ static int rcu_cpu_blocking_cur_gp(void)
|
|
|
/*
|
|
|
* Check for a running RCU reader. Because there is only one CPU,
|
|
|
* there can be but one running RCU reader at a time. ;-)
|
|
|
+ *
|
|
|
+ * Returns zero if there are no running readers. Returns a positive
|
|
|
+ * number if there is at least one reader within its RCU read-side
|
|
|
+ * critical section. Returns a negative number if an outermost reader
|
|
|
+ * is in the midst of exiting from its RCU read-side critical section
|
|
|
+ *
|
|
|
+ * Returns zero if there are no running readers. Returns a positive
|
|
|
+ * number if there is at least one reader within its RCU read-side
|
|
|
+ * critical section. Returns a negative number if an outermost reader
|
|
|
+ * is in the midst of exiting from its RCU read-side critical section.
|
|
|
*/
|
|
|
static int rcu_preempt_running_reader(void)
|
|
|
{
|
|
@@ -475,7 +486,7 @@ void rcu_preempt_note_context_switch(void)
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags); /* must exclude scheduler_tick(). */
|
|
|
- if (rcu_preempt_running_reader() &&
|
|
|
+ if (rcu_preempt_running_reader() > 0 &&
|
|
|
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
|
|
|
|
|
|
/* Possibly blocking in an RCU read-side critical section. */
|
|
@@ -494,6 +505,13 @@ void rcu_preempt_note_context_switch(void)
|
|
|
list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
|
|
|
if (rcu_cpu_blocking_cur_gp())
|
|
|
rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
|
|
|
+ } else if (rcu_preempt_running_reader() < 0 &&
|
|
|
+ t->rcu_read_unlock_special) {
|
|
|
+ /*
|
|
|
+ * Complete exit from RCU read-side critical section on
|
|
|
+ * behalf of preempted instance of __rcu_read_unlock().
|
|
|
+ */
|
|
|
+ rcu_read_unlock_special(t);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -618,13 +636,22 @@ void __rcu_read_unlock(void)
|
|
|
struct task_struct *t = current;
|
|
|
|
|
|
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
|
|
|
- --t->rcu_read_lock_nesting;
|
|
|
- barrier(); /* decrement before load of ->rcu_read_unlock_special */
|
|
|
- if (t->rcu_read_lock_nesting == 0 &&
|
|
|
- unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
|
|
- rcu_read_unlock_special(t);
|
|
|
+ if (t->rcu_read_lock_nesting != 1)
|
|
|
+ --t->rcu_read_lock_nesting;
|
|
|
+ else {
|
|
|
+ t->rcu_read_lock_nesting = INT_MIN;
|
|
|
+ barrier(); /* assign before ->rcu_read_unlock_special load */
|
|
|
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
|
|
+ rcu_read_unlock_special(t);
|
|
|
+ barrier(); /* ->rcu_read_unlock_special load before assign */
|
|
|
+ t->rcu_read_lock_nesting = 0;
|
|
|
+ }
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
|
- WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
|
|
|
+ {
|
|
|
+ int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
|
|
+
|
|
|
+ WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
|
|
+ }
|
|
|
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
|
@@ -649,7 +676,7 @@ static void rcu_preempt_check_callbacks(void)
|
|
|
invoke_rcu_callbacks();
|
|
|
if (rcu_preempt_gp_in_progress() &&
|
|
|
rcu_cpu_blocking_cur_gp() &&
|
|
|
- rcu_preempt_running_reader())
|
|
|
+ rcu_preempt_running_reader() > 0)
|
|
|
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
|
|
|
}
|
|
|
|