|
@@ -668,6 +668,8 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
|
|
*/
|
|
*/
|
|
unsigned int sysctl_sched_rt_period = 1000000;
|
|
unsigned int sysctl_sched_rt_period = 1000000;
|
|
|
|
|
|
|
|
+static __read_mostly int scheduler_running;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* part of the period that we allow rt tasks to run in us.
|
|
* part of the period that we allow rt tasks to run in us.
|
|
* default: 0.95s
|
|
* default: 0.95s
|
|
@@ -689,14 +691,16 @@ unsigned long long cpu_clock(int cpu)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
struct rq *rq;
|
|
struct rq *rq;
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- rq = cpu_rq(cpu);
|
|
|
|
/*
|
|
/*
|
|
* Only call sched_clock() if the scheduler has already been
|
|
* Only call sched_clock() if the scheduler has already been
|
|
* initialized (some code might call cpu_clock() very early):
|
|
* initialized (some code might call cpu_clock() very early):
|
|
*/
|
|
*/
|
|
- if (rq->idle)
|
|
|
|
- update_rq_clock(rq);
|
|
|
|
|
|
+ if (unlikely(!scheduler_running))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ local_irq_save(flags);
|
|
|
|
+ rq = cpu_rq(cpu);
|
|
|
|
+ update_rq_clock(rq);
|
|
now = rq->clock;
|
|
now = rq->clock;
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
|
|
|
|
@@ -7284,6 +7288,8 @@ void __init sched_init(void)
|
|
* During early bootup we pretend to be a normal task:
|
|
* During early bootup we pretend to be a normal task:
|
|
*/
|
|
*/
|
|
current->sched_class = &fair_sched_class;
|
|
current->sched_class = &fair_sched_class;
|
|
|
|
+
|
|
|
|
+ scheduler_running = 1;
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
|
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|