Browse Source

sched: Replace rq->bkl_count with rq->rq_sched_info.bkl_count

Now rq->rq_sched_info.bkl_count is not used for rq, scroll
rq->bkl_count into it. Thus we can save some space for rq.

Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1294991859-13246-1-git-send-email-yong.zhang0@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Yong Zhang 14 years ago
parent
commit
fce2097983
2 changed files with 4 additions and 5 deletions
  1. 1 4
      kernel/sched.c
  2. 3 1
      kernel/sched_debug.c

+ 1 - 4
kernel/sched.c

@@ -553,9 +553,6 @@ struct rq {
 	/* try_to_wake_up() stats */
 	/* try_to_wake_up() stats */
 	unsigned int ttwu_count;
 	unsigned int ttwu_count;
 	unsigned int ttwu_local;
 	unsigned int ttwu_local;
-
-	/* BKL stats */
-	unsigned int bkl_count;
 #endif
 #endif
 };
 };
 
 
@@ -3887,7 +3884,7 @@ static inline void schedule_debug(struct task_struct *prev)
 	schedstat_inc(this_rq(), sched_count);
 	schedstat_inc(this_rq(), sched_count);
 #ifdef CONFIG_SCHEDSTATS
 #ifdef CONFIG_SCHEDSTATS
 	if (unlikely(prev->lock_depth >= 0)) {
 	if (unlikely(prev->lock_depth >= 0)) {
-		schedstat_inc(this_rq(), bkl_count);
+		schedstat_inc(this_rq(), rq_sched_info.bkl_count);
 		schedstat_inc(prev, sched_info.bkl_count);
 		schedstat_inc(prev, sched_info.bkl_count);
 	}
 	}
 #endif
 #endif

+ 3 - 1
kernel/sched_debug.c

@@ -296,9 +296,11 @@ static void print_cpu(struct seq_file *m, int cpu)
 	P(ttwu_count);
 	P(ttwu_count);
 	P(ttwu_local);
 	P(ttwu_local);
 
 
-	P(bkl_count);
+	SEQ_printf(m, "  .%-30s: %d\n", "bkl_count",
+				rq->rq_sched_info.bkl_count);
 
 
 #undef P
 #undef P
+#undef P64
 #endif
 #endif
 	spin_lock_irqsave(&sched_debug_lock, flags);
 	spin_lock_irqsave(&sched_debug_lock, flags);
 	print_cfs_stats(m, cpu);
 	print_cfs_stats(m, cpu);