|
@@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
|
|
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
|
|
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
|
|
static struct rcu_state *rcu_state = &rcu_preempt_state;
|
|
static struct rcu_state *rcu_state = &rcu_preempt_state;
|
|
|
|
|
|
|
|
+static void rcu_read_unlock_special(struct task_struct *t);
|
|
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
|
|
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu)
|
|
struct rcu_data *rdp;
|
|
struct rcu_data *rdp;
|
|
struct rcu_node *rnp;
|
|
struct rcu_node *rnp;
|
|
|
|
|
|
- if (t->rcu_read_lock_nesting &&
|
|
|
|
|
|
+ if (t->rcu_read_lock_nesting > 0 &&
|
|
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
|
|
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
|
|
|
|
|
|
/* Possibly blocking in an RCU read-side critical section. */
|
|
/* Possibly blocking in an RCU read-side critical section. */
|
|
@@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu)
|
|
rnp->gp_tasks = &t->rcu_node_entry;
|
|
rnp->gp_tasks = &t->rcu_node_entry;
|
|
}
|
|
}
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
+ } else if (t->rcu_read_lock_nesting < 0 &&
|
|
|
|
+ t->rcu_read_unlock_special) {
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Complete exit from RCU read-side critical section on
|
|
|
|
+ * behalf of preempted instance of __rcu_read_unlock().
|
|
|
|
+ */
|
|
|
|
+ rcu_read_unlock_special(t);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -284,7 +293,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
|
|
* notify RCU core processing or task having blocked during the RCU
|
|
* notify RCU core processing or task having blocked during the RCU
|
|
* read-side critical section.
|
|
* read-side critical section.
|
|
*/
|
|
*/
|
|
-static void rcu_read_unlock_special(struct task_struct *t)
|
|
|
|
|
|
+static noinline void rcu_read_unlock_special(struct task_struct *t)
|
|
{
|
|
{
|
|
int empty;
|
|
int empty;
|
|
int empty_exp;
|
|
int empty_exp;
|
|
@@ -309,7 +318,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
|
}
|
|
}
|
|
|
|
|
|
/* Hardware IRQ handlers cannot block. */
|
|
/* Hardware IRQ handlers cannot block. */
|
|
- if (in_irq()) {
|
|
|
|
|
|
+ if (in_irq() || in_serving_softirq()) {
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -342,6 +351,11 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
|
#ifdef CONFIG_RCU_BOOST
|
|
#ifdef CONFIG_RCU_BOOST
|
|
if (&t->rcu_node_entry == rnp->boost_tasks)
|
|
if (&t->rcu_node_entry == rnp->boost_tasks)
|
|
rnp->boost_tasks = np;
|
|
rnp->boost_tasks = np;
|
|
|
|
+ /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
|
|
|
|
+ if (t->rcu_boosted) {
|
|
|
|
+ special |= RCU_READ_UNLOCK_BOOSTED;
|
|
|
|
+ t->rcu_boosted = 0;
|
|
|
|
+ }
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
#endif /* #ifdef CONFIG_RCU_BOOST */
|
|
t->rcu_blocked_node = NULL;
|
|
t->rcu_blocked_node = NULL;
|
|
|
|
|
|
@@ -358,7 +372,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
|
#ifdef CONFIG_RCU_BOOST
|
|
#ifdef CONFIG_RCU_BOOST
|
|
/* Unboost if we were boosted. */
|
|
/* Unboost if we were boosted. */
|
|
if (special & RCU_READ_UNLOCK_BOOSTED) {
|
|
if (special & RCU_READ_UNLOCK_BOOSTED) {
|
|
- t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
|
|
|
|
rt_mutex_unlock(t->rcu_boost_mutex);
|
|
rt_mutex_unlock(t->rcu_boost_mutex);
|
|
t->rcu_boost_mutex = NULL;
|
|
t->rcu_boost_mutex = NULL;
|
|
}
|
|
}
|
|
@@ -387,13 +400,22 @@ void __rcu_read_unlock(void)
|
|
struct task_struct *t = current;
|
|
struct task_struct *t = current;
|
|
|
|
|
|
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
|
|
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
|
|
- --t->rcu_read_lock_nesting;
|
|
|
|
- barrier(); /* decrement before load of ->rcu_read_unlock_special */
|
|
|
|
- if (t->rcu_read_lock_nesting == 0 &&
|
|
|
|
- unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
|
|
|
- rcu_read_unlock_special(t);
|
|
|
|
|
|
+ if (t->rcu_read_lock_nesting != 1)
|
|
|
|
+ --t->rcu_read_lock_nesting;
|
|
|
|
+ else {
|
|
|
|
+ t->rcu_read_lock_nesting = INT_MIN;
|
|
|
|
+ barrier(); /* assign before ->rcu_read_unlock_special load */
|
|
|
|
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
|
|
|
|
+ rcu_read_unlock_special(t);
|
|
|
|
+ barrier(); /* ->rcu_read_unlock_special load before assign */
|
|
|
|
+ t->rcu_read_lock_nesting = 0;
|
|
|
|
+ }
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
- WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
|
|
|
|
|
|
+ {
|
|
|
|
+ int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
|
|
|
|
+
|
|
|
|
+ WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
|
|
|
|
+ }
|
|
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
|
#endif /* #ifdef CONFIG_PROVE_LOCKING */
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
|
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
|
@@ -589,7 +611,8 @@ static void rcu_preempt_check_callbacks(int cpu)
|
|
rcu_preempt_qs(cpu);
|
|
rcu_preempt_qs(cpu);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
- if (per_cpu(rcu_preempt_data, cpu).qs_pending)
|
|
|
|
|
|
+ if (t->rcu_read_lock_nesting > 0 &&
|
|
|
|
+ per_cpu(rcu_preempt_data, cpu).qs_pending)
|
|
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
|
|
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -695,9 +718,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
|
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
for (;;) {
|
|
for (;;) {
|
|
- if (!sync_rcu_preempt_exp_done(rnp))
|
|
|
|
|
|
+ if (!sync_rcu_preempt_exp_done(rnp)) {
|
|
|
|
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
break;
|
|
break;
|
|
|
|
+ }
|
|
if (rnp->parent == NULL) {
|
|
if (rnp->parent == NULL) {
|
|
|
|
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
wake_up(&sync_rcu_preempt_exp_wq);
|
|
wake_up(&sync_rcu_preempt_exp_wq);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -707,7 +733,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
raw_spin_lock(&rnp->lock); /* irqs already disabled */
|
|
raw_spin_lock(&rnp->lock); /* irqs already disabled */
|
|
rnp->expmask &= ~mask;
|
|
rnp->expmask &= ~mask;
|
|
}
|
|
}
|
|
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1174,7 +1199,7 @@ static int rcu_boost(struct rcu_node *rnp)
|
|
t = container_of(tb, struct task_struct, rcu_node_entry);
|
|
t = container_of(tb, struct task_struct, rcu_node_entry);
|
|
rt_mutex_init_proxy_locked(&mtx, t);
|
|
rt_mutex_init_proxy_locked(&mtx, t);
|
|
t->rcu_boost_mutex = &mtx;
|
|
t->rcu_boost_mutex = &mtx;
|
|
- t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
|
|
|
|
|
|
+ t->rcu_boosted = 1;
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
|
|
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
|
|
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
|
|
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
|