Browse Source

sched: Fix raciness in runqueue_is_locked()

runqueue_is_locked() is unavoidably racy due to a poor interface design.
It does

	cpu = get_cpu()
	ret = some_perpcu_thing(cpu);
	put_cpu(cpu);
	return ret;

Its return value is unreliable.

Fix.

Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <200909191855.n8JItiko022148@imap1.linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Andrew Morton 15 years ago
parent
commit
89f19f04dc
3 changed files with 10 additions and 10 deletions
  1. 1 1
      include/linux/sched.h
  2. 2 8
      kernel/sched.c
  3. 7 1
      kernel/trace/trace.c

+ 1 - 1
include/linux/sched.h

@@ -257,7 +257,7 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
 extern void init_idle(struct task_struct *idle, int cpu);
 extern void init_idle(struct task_struct *idle, int cpu);
 extern void init_idle_bootup_task(struct task_struct *idle);
 extern void init_idle_bootup_task(struct task_struct *idle);
 
 
-extern int runqueue_is_locked(void);
+extern int runqueue_is_locked(int cpu);
 extern void task_rq_unlock_wait(struct task_struct *p);
 extern void task_rq_unlock_wait(struct task_struct *p);
 
 
 extern cpumask_var_t nohz_cpu_mask;
 extern cpumask_var_t nohz_cpu_mask;

+ 2 - 8
kernel/sched.c

@@ -681,15 +681,9 @@ inline void update_rq_clock(struct rq *rq)
  * This interface allows printk to be called with the runqueue lock
  * This interface allows printk to be called with the runqueue lock
  * held and know whether or not it is OK to wake up the klogd.
  * held and know whether or not it is OK to wake up the klogd.
  */
  */
-int runqueue_is_locked(void)
+int runqueue_is_locked(int cpu)
 {
 {
-	int cpu = get_cpu();
-	struct rq *rq = cpu_rq(cpu);
-	int ret;
-
-	ret = spin_is_locked(&rq->lock);
-	put_cpu();
-	return ret;
+	return spin_is_locked(&cpu_rq(cpu)->lock);
 }
 }
 
 
 /*
 /*

+ 7 - 1
kernel/trace/trace.c

@@ -275,12 +275,18 @@ static DEFINE_SPINLOCK(tracing_start_lock);
  */
  */
 void trace_wake_up(void)
 void trace_wake_up(void)
 {
 {
+	int cpu;
+
+	if (trace_flags & TRACE_ITER_BLOCK)
+		return;
 	/*
 	/*
 	 * The runqueue_is_locked() can fail, but this is the best we
 	 * The runqueue_is_locked() can fail, but this is the best we
 	 * have for now:
 	 * have for now:
 	 */
 	 */
-	if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
+	cpu = get_cpu();
+	if (!runqueue_is_locked(cpu))
 		wake_up(&trace_wait);
 		wake_up(&trace_wait);
+	put_cpu();
 }
 }
 
 
 static int __init set_buf_size(char *str)
 static int __init set_buf_size(char *str)