|
@@ -228,6 +228,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
|
|
+#ifdef CONFIG_PROVE_RCU
|
|
|
+extern int rcu_is_cpu_idle(void);
|
|
|
+#else /* !CONFIG_PROVE_RCU */
|
|
|
+static inline int rcu_is_cpu_idle(void)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif /* else !CONFIG_PROVE_RCU */
|
|
|
+
|
|
|
extern struct lockdep_map rcu_lock_map;
|
|
|
# define rcu_read_acquire() \
|
|
|
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
|
|
@@ -262,6 +271,8 @@ static inline int rcu_read_lock_held(void)
|
|
|
{
|
|
|
if (!debug_lockdep_rcu_enabled())
|
|
|
return 1;
|
|
|
+ if (rcu_is_cpu_idle())
|
|
|
+ return 0;
|
|
|
return lock_is_held(&rcu_lock_map);
|
|
|
}
|
|
|
|
|
@@ -285,6 +296,19 @@ extern int rcu_read_lock_bh_held(void);
|
|
|
*
|
|
|
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
|
|
|
* and while lockdep is disabled.
|
|
|
+ *
|
|
|
+ * Note that if the CPU is in the idle loop from an RCU point of
|
|
|
+ * view (ie: that we are in the section between rcu_idle_enter() and
|
|
|
+ * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
|
|
|
+ * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
|
|
|
+ * that are in such a section, considering these as in extended quiescent
|
|
|
+ * state, so such a CPU is effectively never in an RCU read-side critical
|
|
|
+ * section regardless of what RCU primitives it invokes. This state of
|
|
|
+ * affairs is required --- we need to keep an RCU-free window in idle
|
|
|
+ * where the CPU may possibly enter into low power mode. This way we can
|
|
|
+ * notice an extended quiescent state to other CPUs that started a grace
|
|
|
+ * period. Otherwise we would delay any grace period as long as we run in
|
|
|
+ * the idle task.
|
|
|
*/
|
|
|
#ifdef CONFIG_PREEMPT_COUNT
|
|
|
static inline int rcu_read_lock_sched_held(void)
|
|
@@ -293,6 +317,8 @@ static inline int rcu_read_lock_sched_held(void)
|
|
|
|
|
|
if (!debug_lockdep_rcu_enabled())
|
|
|
return 1;
|
|
|
+ if (rcu_is_cpu_idle())
|
|
|
+ return 0;
|
|
|
if (debug_locks)
|
|
|
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
|
|
|
return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
|