|
@@ -284,7 +284,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
|
|
|
* notify RCU core processing or task having blocked during the RCU
|
|
|
* read-side critical section.
|
|
|
*/
|
|
|
-static void rcu_read_unlock_special(struct task_struct *t)
|
|
|
+static noinline void rcu_read_unlock_special(struct task_struct *t)
|
|
|
{
|
|
|
int empty;
|
|
|
int empty_exp;
|
|
@@ -391,11 +391,11 @@ void __rcu_read_unlock(void)
|
|
|
struct task_struct *t = current;
|
|
|
|
|
|
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
|
|
|
- --t->rcu_read_lock_nesting;
|
|
|
- barrier(); /* decrement before load of ->rcu_read_unlock_special */
|
|
|
- if (t->rcu_read_lock_nesting == 0 &&
|
|
|
- unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
|
|
- rcu_read_unlock_special(t);
|
|
|
+ if (--t->rcu_read_lock_nesting == 0) {
|
|
|
+ barrier(); /* decr before ->rcu_read_unlock_special load */
|
|
|
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
|
|
+ rcu_read_unlock_special(t);
|
|
|
+ }
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
|
WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
|
|
|
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|