Browse Source

sched: use constants if !CONFIG_SCHED_DEBUG

use constants if !CONFIG_SCHED_DEBUG.

this speeds up the code and reduces code-size:

    text    data     bss     dec     hex filename
   27464    3014      16   30494    771e sched.o.before
   26929    3010      20   29959    7507 sched.o.after

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Ingo Molnar 17 năm trước cách đây
mục cha
commit
2bd8e6d422
3 tập tin đã thay đổi với 24 bổ sung15 xóa
  1. 4 1
      include/linux/sched.h
  2. 0 6
      kernel/sched.c
  3. 20 8
      kernel/sched_fair.c

+ 4 - 1
include/linux/sched.h

@@ -1402,15 +1402,18 @@ static inline void idle_task_exit(void) {}
 
 
 extern void sched_idle_next(void);
 extern void sched_idle_next(void);
 
 
+#ifdef CONFIG_SCHED_DEBUG
 extern unsigned int sysctl_sched_latency;
 extern unsigned int sysctl_sched_latency;
 extern unsigned int sysctl_sched_min_granularity;
 extern unsigned int sysctl_sched_min_granularity;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_batch_wakeup_granularity;
 extern unsigned int sysctl_sched_batch_wakeup_granularity;
 extern unsigned int sysctl_sched_stat_granularity;
 extern unsigned int sysctl_sched_stat_granularity;
 extern unsigned int sysctl_sched_runtime_limit;
 extern unsigned int sysctl_sched_runtime_limit;
-extern unsigned int sysctl_sched_compat_yield;
 extern unsigned int sysctl_sched_child_runs_first;
 extern unsigned int sysctl_sched_child_runs_first;
 extern unsigned int sysctl_sched_features;
 extern unsigned int sysctl_sched_features;
+#endif
+
+extern unsigned int sysctl_sched_compat_yield;
 
 
 #ifdef CONFIG_RT_MUTEXES
 #ifdef CONFIG_RT_MUTEXES
 extern int rt_mutex_getprio(struct task_struct *p);
 extern int rt_mutex_getprio(struct task_struct *p);

+ 0 - 6
kernel/sched.c

@@ -1658,12 +1658,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
 	put_cpu();
 	put_cpu();
 }
 }
 
 
-/*
- * After fork, child runs first. (default) If set to 0 then
- * parent will (try to) run first.
- */
-unsigned int __read_mostly sysctl_sched_child_runs_first = 1;
-
 /*
 /*
  * wake_up_new_task - wake up a newly created task for the first time.
  * wake_up_new_task - wake up a newly created task for the first time.
  *
  *

+ 20 - 8
kernel/sched_fair.c

@@ -20,6 +20,15 @@
  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  */
  */
 
 
+/*
+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
+ */
+#ifdef CONFIG_SCHED_DEBUG
+# define const_debug __read_mostly
+#else
+# define const_debug static const
+#endif
+
 /*
 /*
  * Targeted preemption latency for CPU-bound tasks:
  * Targeted preemption latency for CPU-bound tasks:
  * (default: 20ms, units: nanoseconds)
  * (default: 20ms, units: nanoseconds)
@@ -34,7 +43,13 @@
  * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
  * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
  * Targeted preemption latency for CPU-bound tasks:
  * Targeted preemption latency for CPU-bound tasks:
  */
  */
-unsigned int sysctl_sched_latency __read_mostly = 20000000ULL;
+const_debug unsigned int sysctl_sched_latency = 20000000ULL;
+
+/*
+ * After fork, child runs first. (default) If set to 0 then
+ * parent will (try to) run first.
+ */
+const_debug unsigned int sysctl_sched_child_runs_first = 1;
 
 
 /*
 /*
  * Minimal preemption granularity for CPU-bound tasks:
  * Minimal preemption granularity for CPU-bound tasks:
@@ -58,7 +73,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
  * and reduces their over-scheduling. Synchronous workloads will still
  * and reduces their over-scheduling. Synchronous workloads will still
  * have immediate wakeup/sleep latencies.
  * have immediate wakeup/sleep latencies.
  */
  */
-unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;
+const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
 
 
 /*
 /*
  * SCHED_OTHER wake-up granularity.
  * SCHED_OTHER wake-up granularity.
@@ -68,13 +83,10 @@ unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;
  * and reduces their over-scheduling. Synchronous workloads will still
  * and reduces their over-scheduling. Synchronous workloads will still
  * have immediate wakeup/sleep latencies.
  * have immediate wakeup/sleep latencies.
  */
  */
-unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL;
+const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
 
 
-unsigned int sysctl_sched_stat_granularity __read_mostly;
+const_debug unsigned int sysctl_sched_stat_granularity;
 
 
-/*
- * Initialized in sched_init_granularity() [to 5 times the base granularity]:
- */
 unsigned int sysctl_sched_runtime_limit __read_mostly;
 unsigned int sysctl_sched_runtime_limit __read_mostly;
 
 
 /*
 /*
@@ -89,7 +101,7 @@ enum {
 	SCHED_FEAT_SKIP_INITIAL		= 32,
 	SCHED_FEAT_SKIP_INITIAL		= 32,
 };
 };
 
 
-unsigned int sysctl_sched_features __read_mostly =
+const_debug unsigned int sysctl_sched_features =
 		SCHED_FEAT_FAIR_SLEEPERS	*1 |
 		SCHED_FEAT_FAIR_SLEEPERS	*1 |
 		SCHED_FEAT_SLEEPER_AVG		*0 |
 		SCHED_FEAT_SLEEPER_AVG		*0 |
 		SCHED_FEAT_SLEEPER_LOAD_AVG	*1 |
 		SCHED_FEAT_SLEEPER_LOAD_AVG	*1 |