|
@@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
|
|
|
rcu_irq_exit(); \
|
|
|
} while (0)
|
|
|
|
|
|
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
|
|
|
+extern bool __rcu_is_watching(void);
|
|
|
+#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
|
|
|
+
|
|
|
/*
|
|
|
* Infrastructure to implement the synchronize_() primitives in
|
|
|
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
|
|
@@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
|
|
|
}
|
|
|
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
|
|
|
|
|
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
|
|
|
-extern int rcu_is_cpu_idle(void);
|
|
|
-#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
|
|
|
-
|
|
|
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
|
|
|
bool rcu_lockdep_current_cpu_online(void);
|
|
|
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
|
|
@@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void)
|
|
|
{
|
|
|
if (!debug_lockdep_rcu_enabled())
|
|
|
return 1;
|
|
|
- if (rcu_is_cpu_idle())
|
|
|
+ if (!rcu_is_watching())
|
|
|
return 0;
|
|
|
if (!rcu_lockdep_current_cpu_online())
|
|
|
return 0;
|
|
@@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void)
|
|
|
|
|
|
if (!debug_lockdep_rcu_enabled())
|
|
|
return 1;
|
|
|
- if (rcu_is_cpu_idle())
|
|
|
+ if (!rcu_is_watching())
|
|
|
return 0;
|
|
|
if (!rcu_lockdep_current_cpu_online())
|
|
|
return 0;
|
|
@@ -771,7 +771,7 @@ static inline void rcu_read_lock(void)
|
|
|
__rcu_read_lock();
|
|
|
__acquire(RCU);
|
|
|
rcu_lock_acquire(&rcu_lock_map);
|
|
|
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
|
|
+ rcu_lockdep_assert(rcu_is_watching(),
|
|
|
"rcu_read_lock() used illegally while idle");
|
|
|
}
|
|
|
|
|
@@ -792,7 +792,7 @@ static inline void rcu_read_lock(void)
|
|
|
*/
|
|
|
static inline void rcu_read_unlock(void)
|
|
|
{
|
|
|
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
|
|
+ rcu_lockdep_assert(rcu_is_watching(),
|
|
|
"rcu_read_unlock() used illegally while idle");
|
|
|
rcu_lock_release(&rcu_lock_map);
|
|
|
__release(RCU);
|
|
@@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void)
|
|
|
local_bh_disable();
|
|
|
__acquire(RCU_BH);
|
|
|
rcu_lock_acquire(&rcu_bh_lock_map);
|
|
|
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
|
|
+ rcu_lockdep_assert(rcu_is_watching(),
|
|
|
"rcu_read_lock_bh() used illegally while idle");
|
|
|
}
|
|
|
|
|
@@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void)
|
|
|
*/
|
|
|
static inline void rcu_read_unlock_bh(void)
|
|
|
{
|
|
|
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
|
|
+ rcu_lockdep_assert(rcu_is_watching(),
|
|
|
"rcu_read_unlock_bh() used illegally while idle");
|
|
|
rcu_lock_release(&rcu_bh_lock_map);
|
|
|
__release(RCU_BH);
|
|
@@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void)
|
|
|
preempt_disable();
|
|
|
__acquire(RCU_SCHED);
|
|
|
rcu_lock_acquire(&rcu_sched_lock_map);
|
|
|
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
|
|
+ rcu_lockdep_assert(rcu_is_watching(),
|
|
|
"rcu_read_lock_sched() used illegally while idle");
|
|
|
}
|
|
|
|
|
@@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
|
|
|
*/
|
|
|
static inline void rcu_read_unlock_sched(void)
|
|
|
{
|
|
|
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
|
|
+ rcu_lockdep_assert(rcu_is_watching(),
|
|
|
"rcu_read_unlock_sched() used illegally while idle");
|
|
|
rcu_lock_release(&rcu_sched_lock_map);
|
|
|
__release(RCU_SCHED);
|