|
@@ -74,16 +74,6 @@
|
|
|
#include <asm/tlb.h>
|
|
|
#include <asm/irq_regs.h>
|
|
|
|
|
|
-/*
|
|
|
- * Scheduler clock - returns current time in nanosec units.
|
|
|
- * This is default implementation.
|
|
|
- * Architectures and sub-architectures can override this.
|
|
|
- */
|
|
|
-unsigned long long __attribute__((weak)) sched_clock(void)
|
|
|
-{
|
|
|
- return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Convert user-nice values [ -20 ... 0 ... 19 ]
|
|
|
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
|
|
@@ -557,13 +547,7 @@ struct rq {
|
|
|
unsigned long next_balance;
|
|
|
struct mm_struct *prev_mm;
|
|
|
|
|
|
- u64 clock, prev_clock_raw;
|
|
|
- s64 clock_max_delta;
|
|
|
-
|
|
|
- unsigned int clock_warps, clock_overflows, clock_underflows;
|
|
|
- u64 idle_clock;
|
|
|
- unsigned int clock_deep_idle_events;
|
|
|
- u64 tick_timestamp;
|
|
|
+ u64 clock;
|
|
|
|
|
|
atomic_t nr_iowait;
|
|
|
|
|
@@ -628,82 +612,6 @@ static inline int cpu_of(struct rq *rq)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NO_HZ
|
|
|
-static inline bool nohz_on(int cpu)
|
|
|
-{
|
|
|
- return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
|
|
|
-}
|
|
|
-
|
|
|
-static inline u64 max_skipped_ticks(struct rq *rq)
|
|
|
-{
|
|
|
- return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void update_last_tick_seen(struct rq *rq)
|
|
|
-{
|
|
|
- rq->last_tick_seen = jiffies;
|
|
|
-}
|
|
|
-#else
|
|
|
-static inline u64 max_skipped_ticks(struct rq *rq)
|
|
|
-{
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void update_last_tick_seen(struct rq *rq)
|
|
|
-{
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
-/*
|
|
|
- * Update the per-runqueue clock, as finegrained as the platform can give
|
|
|
- * us, but without assuming monotonicity, etc.:
|
|
|
- */
|
|
|
-static void __update_rq_clock(struct rq *rq)
|
|
|
-{
|
|
|
- u64 prev_raw = rq->prev_clock_raw;
|
|
|
- u64 now = sched_clock();
|
|
|
- s64 delta = now - prev_raw;
|
|
|
- u64 clock = rq->clock;
|
|
|
-
|
|
|
-#ifdef CONFIG_SCHED_DEBUG
|
|
|
- WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
|
|
|
-#endif
|
|
|
- /*
|
|
|
- * Protect against sched_clock() occasionally going backwards:
|
|
|
- */
|
|
|
- if (unlikely(delta < 0)) {
|
|
|
- clock++;
|
|
|
- rq->clock_warps++;
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * Catch too large forward jumps too:
|
|
|
- */
|
|
|
- u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
|
|
|
- u64 max_time = rq->tick_timestamp + max_jump;
|
|
|
-
|
|
|
- if (unlikely(clock + delta > max_time)) {
|
|
|
- if (clock < max_time)
|
|
|
- clock = max_time;
|
|
|
- else
|
|
|
- clock++;
|
|
|
- rq->clock_overflows++;
|
|
|
- } else {
|
|
|
- if (unlikely(delta > rq->clock_max_delta))
|
|
|
- rq->clock_max_delta = delta;
|
|
|
- clock += delta;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- rq->prev_clock_raw = now;
|
|
|
- rq->clock = clock;
|
|
|
-}
|
|
|
-
|
|
|
-static void update_rq_clock(struct rq *rq)
|
|
|
-{
|
|
|
- if (likely(smp_processor_id() == cpu_of(rq)))
|
|
|
- __update_rq_clock(rq);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
|
|
|
* See detach_destroy_domains: synchronize_sched for details.
|
|
@@ -719,6 +627,11 @@ static void update_rq_clock(struct rq *rq)
|
|
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
|
|
|
|
+static inline void update_rq_clock(struct rq *rq)
|
|
|
+{
|
|
|
+ rq->clock = sched_clock_cpu(cpu_of(rq));
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
|
|
|
*/
|
|
@@ -935,7 +848,6 @@ static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu)
|
|
|
static unsigned long long __cpu_clock(int cpu)
|
|
|
{
|
|
|
unsigned long long now;
|
|
|
- struct rq *rq;
|
|
|
|
|
|
/*
|
|
|
* Only call sched_clock() if the scheduler has already been
|
|
@@ -944,9 +856,7 @@ static unsigned long long __cpu_clock(int cpu)
|
|
|
if (unlikely(!scheduler_running))
|
|
|
return 0;
|
|
|
|
|
|
- rq = cpu_rq(cpu);
|
|
|
- update_rq_clock(rq);
|
|
|
- now = rq->clock;
|
|
|
+ now = sched_clock_cpu(cpu);
|
|
|
|
|
|
return now;
|
|
|
}
|
|
@@ -1120,45 +1030,6 @@ static struct rq *this_rq_lock(void)
|
|
|
return rq;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * We are going deep-idle (irqs are disabled):
|
|
|
- */
|
|
|
-void sched_clock_idle_sleep_event(void)
|
|
|
-{
|
|
|
- struct rq *rq = cpu_rq(smp_processor_id());
|
|
|
-
|
|
|
- WARN_ON(!irqs_disabled());
|
|
|
- spin_lock(&rq->lock);
|
|
|
- __update_rq_clock(rq);
|
|
|
- spin_unlock(&rq->lock);
|
|
|
- rq->clock_deep_idle_events++;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
|
|
|
-
|
|
|
-/*
|
|
|
- * We just idled delta nanoseconds (called with irqs disabled):
|
|
|
- */
|
|
|
-void sched_clock_idle_wakeup_event(u64 delta_ns)
|
|
|
-{
|
|
|
- struct rq *rq = cpu_rq(smp_processor_id());
|
|
|
- u64 now = sched_clock();
|
|
|
-
|
|
|
- WARN_ON(!irqs_disabled());
|
|
|
- rq->idle_clock += delta_ns;
|
|
|
- /*
|
|
|
- * Override the previous timestamp and ignore all
|
|
|
- * sched_clock() deltas that occured while we idled,
|
|
|
- * and use the PM-provided delta_ns to advance the
|
|
|
- * rq clock:
|
|
|
- */
|
|
|
- spin_lock(&rq->lock);
|
|
|
- rq->prev_clock_raw = now;
|
|
|
- rq->clock += delta_ns;
|
|
|
- spin_unlock(&rq->lock);
|
|
|
- touch_softlockup_watchdog();
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
|
|
|
-
|
|
|
static void __resched_task(struct task_struct *p, int tif_bit);
|
|
|
|
|
|
static inline void resched_task(struct task_struct *p)
|
|
@@ -1283,7 +1154,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
|
|
|
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
|
|
|
|
|
|
spin_lock(&rq->lock);
|
|
|
- __update_rq_clock(rq);
|
|
|
+ update_rq_clock(rq);
|
|
|
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
|
|
|
spin_unlock(&rq->lock);
|
|
|
|
|
@@ -4476,19 +4347,11 @@ void scheduler_tick(void)
|
|
|
int cpu = smp_processor_id();
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
struct task_struct *curr = rq->curr;
|
|
|
- u64 next_tick = rq->tick_timestamp + TICK_NSEC;
|
|
|
+
|
|
|
+ sched_clock_tick();
|
|
|
|
|
|
spin_lock(&rq->lock);
|
|
|
- __update_rq_clock(rq);
|
|
|
- /*
|
|
|
- * Let rq->clock advance by at least TICK_NSEC:
|
|
|
- */
|
|
|
- if (unlikely(rq->clock < next_tick)) {
|
|
|
- rq->clock = next_tick;
|
|
|
- rq->clock_underflows++;
|
|
|
- }
|
|
|
- rq->tick_timestamp = rq->clock;
|
|
|
- update_last_tick_seen(rq);
|
|
|
+ update_rq_clock(rq);
|
|
|
update_cpu_load(rq);
|
|
|
curr->sched_class->task_tick(rq, curr, 0);
|
|
|
spin_unlock(&rq->lock);
|
|
@@ -4642,7 +4505,7 @@ need_resched_nonpreemptible:
|
|
|
* Do the rq-clock update outside the rq lock:
|
|
|
*/
|
|
|
local_irq_disable();
|
|
|
- __update_rq_clock(rq);
|
|
|
+ update_rq_clock(rq);
|
|
|
spin_lock(&rq->lock);
|
|
|
clear_tsk_need_resched(prev);
|
|
|
|
|
@@ -8226,8 +8089,6 @@ void __init sched_init(void)
|
|
|
spin_lock_init(&rq->lock);
|
|
|
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
|
|
|
rq->nr_running = 0;
|
|
|
- rq->clock = 1;
|
|
|
- update_last_tick_seen(rq);
|
|
|
init_cfs_rq(&rq->cfs, rq);
|
|
|
init_rt_rq(&rq->rt, rq);
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
@@ -8371,6 +8232,7 @@ EXPORT_SYMBOL(__might_sleep);
|
|
|
static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
|
{
|
|
|
int on_rq;
|
|
|
+
|
|
|
update_rq_clock(rq);
|
|
|
on_rq = p->se.on_rq;
|
|
|
if (on_rq)
|
|
@@ -8402,7 +8264,6 @@ void normalize_rt_tasks(void)
|
|
|
p->se.sleep_start = 0;
|
|
|
p->se.block_start = 0;
|
|
|
#endif
|
|
|
- task_rq(p)->clock = 0;
|
|
|
|
|
|
if (!rt_task(p)) {
|
|
|
/*
|