|
@@ -74,16 +74,6 @@
|
|
#include <asm/tlb.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/irq_regs.h>
|
|
#include <asm/irq_regs.h>
|
|
|
|
|
|
-/*
|
|
|
|
- * Scheduler clock - returns current time in nanosec units.
|
|
|
|
- * This is default implementation.
|
|
|
|
- * Architectures and sub-architectures can override this.
|
|
|
|
- */
|
|
|
|
-unsigned long long __attribute__((weak)) sched_clock(void)
|
|
|
|
-{
|
|
|
|
- return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Convert user-nice values [ -20 ... 0 ... 19 ]
|
|
* Convert user-nice values [ -20 ... 0 ... 19 ]
|
|
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
|
|
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
|
|
@@ -242,6 +232,12 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * sched_domains_mutex serializes calls to arch_init_sched_domains,
|
|
|
|
+ * detach_destroy_domains and partition_sched_domains.
|
|
|
|
+ */
|
|
|
|
+static DEFINE_MUTEX(sched_domains_mutex);
|
|
|
|
+
|
|
#ifdef CONFIG_GROUP_SCHED
|
|
#ifdef CONFIG_GROUP_SCHED
|
|
|
|
|
|
#include <linux/cgroup.h>
|
|
#include <linux/cgroup.h>
|
|
@@ -308,9 +304,6 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
|
|
*/
|
|
*/
|
|
static DEFINE_SPINLOCK(task_group_lock);
|
|
static DEFINE_SPINLOCK(task_group_lock);
|
|
|
|
|
|
-/* doms_cur_mutex serializes access to doms_cur[] array */
|
|
|
|
-static DEFINE_MUTEX(doms_cur_mutex);
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_USER_SCHED
|
|
#ifdef CONFIG_USER_SCHED
|
|
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
|
|
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
|
|
@@ -318,7 +311,13 @@ static DEFINE_MUTEX(doms_cur_mutex);
|
|
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
|
|
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems.
|
|
|
|
+ * (The default weight is 1024 - so there's no practical
|
|
|
|
+ * limitation from this.)
|
|
|
|
+ */
|
|
#define MIN_SHARES 2
|
|
#define MIN_SHARES 2
|
|
|
|
+#define MAX_SHARES (ULONG_MAX - 1)
|
|
|
|
|
|
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
|
|
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
|
|
#endif
|
|
#endif
|
|
@@ -358,21 +357,9 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void lock_doms_cur(void)
|
|
|
|
-{
|
|
|
|
- mutex_lock(&doms_cur_mutex);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void unlock_doms_cur(void)
|
|
|
|
-{
|
|
|
|
- mutex_unlock(&doms_cur_mutex);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
#else
|
|
#else
|
|
|
|
|
|
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
|
|
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
|
|
-static inline void lock_doms_cur(void) { }
|
|
|
|
-static inline void unlock_doms_cur(void) { }
|
|
|
|
|
|
|
|
#endif /* CONFIG_GROUP_SCHED */
|
|
#endif /* CONFIG_GROUP_SCHED */
|
|
|
|
|
|
@@ -560,13 +547,7 @@ struct rq {
|
|
unsigned long next_balance;
|
|
unsigned long next_balance;
|
|
struct mm_struct *prev_mm;
|
|
struct mm_struct *prev_mm;
|
|
|
|
|
|
- u64 clock, prev_clock_raw;
|
|
|
|
- s64 clock_max_delta;
|
|
|
|
-
|
|
|
|
- unsigned int clock_warps, clock_overflows, clock_underflows;
|
|
|
|
- u64 idle_clock;
|
|
|
|
- unsigned int clock_deep_idle_events;
|
|
|
|
- u64 tick_timestamp;
|
|
|
|
|
|
+ u64 clock;
|
|
|
|
|
|
atomic_t nr_iowait;
|
|
atomic_t nr_iowait;
|
|
|
|
|
|
@@ -631,82 +612,6 @@ static inline int cpu_of(struct rq *rq)
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NO_HZ
|
|
|
|
-static inline bool nohz_on(int cpu)
|
|
|
|
-{
|
|
|
|
- return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline u64 max_skipped_ticks(struct rq *rq)
|
|
|
|
-{
|
|
|
|
- return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void update_last_tick_seen(struct rq *rq)
|
|
|
|
-{
|
|
|
|
- rq->last_tick_seen = jiffies;
|
|
|
|
-}
|
|
|
|
-#else
|
|
|
|
-static inline u64 max_skipped_ticks(struct rq *rq)
|
|
|
|
-{
|
|
|
|
- return 1;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void update_last_tick_seen(struct rq *rq)
|
|
|
|
-{
|
|
|
|
-}
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Update the per-runqueue clock, as finegrained as the platform can give
|
|
|
|
- * us, but without assuming monotonicity, etc.:
|
|
|
|
- */
|
|
|
|
-static void __update_rq_clock(struct rq *rq)
|
|
|
|
-{
|
|
|
|
- u64 prev_raw = rq->prev_clock_raw;
|
|
|
|
- u64 now = sched_clock();
|
|
|
|
- s64 delta = now - prev_raw;
|
|
|
|
- u64 clock = rq->clock;
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_SCHED_DEBUG
|
|
|
|
- WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
|
|
|
|
-#endif
|
|
|
|
- /*
|
|
|
|
- * Protect against sched_clock() occasionally going backwards:
|
|
|
|
- */
|
|
|
|
- if (unlikely(delta < 0)) {
|
|
|
|
- clock++;
|
|
|
|
- rq->clock_warps++;
|
|
|
|
- } else {
|
|
|
|
- /*
|
|
|
|
- * Catch too large forward jumps too:
|
|
|
|
- */
|
|
|
|
- u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
|
|
|
|
- u64 max_time = rq->tick_timestamp + max_jump;
|
|
|
|
-
|
|
|
|
- if (unlikely(clock + delta > max_time)) {
|
|
|
|
- if (clock < max_time)
|
|
|
|
- clock = max_time;
|
|
|
|
- else
|
|
|
|
- clock++;
|
|
|
|
- rq->clock_overflows++;
|
|
|
|
- } else {
|
|
|
|
- if (unlikely(delta > rq->clock_max_delta))
|
|
|
|
- rq->clock_max_delta = delta;
|
|
|
|
- clock += delta;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- rq->prev_clock_raw = now;
|
|
|
|
- rq->clock = clock;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void update_rq_clock(struct rq *rq)
|
|
|
|
-{
|
|
|
|
- if (likely(smp_processor_id() == cpu_of(rq)))
|
|
|
|
- __update_rq_clock(rq);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
|
|
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
|
|
* See detach_destroy_domains: synchronize_sched for details.
|
|
* See detach_destroy_domains: synchronize_sched for details.
|
|
@@ -722,6 +627,11 @@ static void update_rq_clock(struct rq *rq)
|
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
|
|
|
|
|
|
+static inline void update_rq_clock(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ rq->clock = sched_clock_cpu(cpu_of(rq));
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
|
|
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
|
|
*/
|
|
*/
|
|
@@ -757,14 +667,14 @@ const_debug unsigned int sysctl_sched_features =
|
|
#define SCHED_FEAT(name, enabled) \
|
|
#define SCHED_FEAT(name, enabled) \
|
|
#name ,
|
|
#name ,
|
|
|
|
|
|
-__read_mostly char *sched_feat_names[] = {
|
|
|
|
|
|
+static __read_mostly char *sched_feat_names[] = {
|
|
#include "sched_features.h"
|
|
#include "sched_features.h"
|
|
NULL
|
|
NULL
|
|
};
|
|
};
|
|
|
|
|
|
#undef SCHED_FEAT
|
|
#undef SCHED_FEAT
|
|
|
|
|
|
-int sched_feat_open(struct inode *inode, struct file *filp)
|
|
|
|
|
|
+static int sched_feat_open(struct inode *inode, struct file *filp)
|
|
{
|
|
{
|
|
filp->private_data = inode->i_private;
|
|
filp->private_data = inode->i_private;
|
|
return 0;
|
|
return 0;
|
|
@@ -899,7 +809,7 @@ static inline u64 global_rt_runtime(void)
|
|
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
|
|
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
|
|
}
|
|
}
|
|
|
|
|
|
-static const unsigned long long time_sync_thresh = 100000;
|
|
|
|
|
|
+unsigned long long time_sync_thresh = 100000;
|
|
|
|
|
|
static DEFINE_PER_CPU(unsigned long long, time_offset);
|
|
static DEFINE_PER_CPU(unsigned long long, time_offset);
|
|
static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
|
|
static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
|
|
@@ -913,11 +823,14 @@ static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
|
|
static DEFINE_SPINLOCK(time_sync_lock);
|
|
static DEFINE_SPINLOCK(time_sync_lock);
|
|
static unsigned long long prev_global_time;
|
|
static unsigned long long prev_global_time;
|
|
|
|
|
|
-static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
|
|
|
|
|
|
+static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&time_sync_lock, flags);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We want this inlined, to not get tracer function calls
|
|
|
|
+ * in this critical section:
|
|
|
|
+ */
|
|
|
|
+ spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_);
|
|
|
|
+ __raw_spin_lock(&time_sync_lock.raw_lock);
|
|
|
|
|
|
if (time < prev_global_time) {
|
|
if (time < prev_global_time) {
|
|
per_cpu(time_offset, cpu) += prev_global_time - time;
|
|
per_cpu(time_offset, cpu) += prev_global_time - time;
|
|
@@ -926,7 +839,8 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
|
|
prev_global_time = time;
|
|
prev_global_time = time;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_unlock_irqrestore(&time_sync_lock, flags);
|
|
|
|
|
|
+ __raw_spin_unlock(&time_sync_lock.raw_lock);
|
|
|
|
+ spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_);
|
|
|
|
|
|
return time;
|
|
return time;
|
|
}
|
|
}
|
|
@@ -934,8 +848,6 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
|
|
static unsigned long long __cpu_clock(int cpu)
|
|
static unsigned long long __cpu_clock(int cpu)
|
|
{
|
|
{
|
|
unsigned long long now;
|
|
unsigned long long now;
|
|
- unsigned long flags;
|
|
|
|
- struct rq *rq;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* Only call sched_clock() if the scheduler has already been
|
|
* Only call sched_clock() if the scheduler has already been
|
|
@@ -944,11 +856,7 @@ static unsigned long long __cpu_clock(int cpu)
|
|
if (unlikely(!scheduler_running))
|
|
if (unlikely(!scheduler_running))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- rq = cpu_rq(cpu);
|
|
|
|
- update_rq_clock(rq);
|
|
|
|
- now = rq->clock;
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
|
|
+ now = sched_clock_cpu(cpu);
|
|
|
|
|
|
return now;
|
|
return now;
|
|
}
|
|
}
|
|
@@ -960,13 +868,18 @@ static unsigned long long __cpu_clock(int cpu)
|
|
unsigned long long cpu_clock(int cpu)
|
|
unsigned long long cpu_clock(int cpu)
|
|
{
|
|
{
|
|
unsigned long long prev_cpu_time, time, delta_time;
|
|
unsigned long long prev_cpu_time, time, delta_time;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
|
|
+ local_irq_save(flags);
|
|
prev_cpu_time = per_cpu(prev_cpu_time, cpu);
|
|
prev_cpu_time = per_cpu(prev_cpu_time, cpu);
|
|
time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
|
|
time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
|
|
delta_time = time-prev_cpu_time;
|
|
delta_time = time-prev_cpu_time;
|
|
|
|
|
|
- if (unlikely(delta_time > time_sync_thresh))
|
|
|
|
|
|
+ if (unlikely(delta_time > time_sync_thresh)) {
|
|
time = __sync_cpu_clock(time, cpu);
|
|
time = __sync_cpu_clock(time, cpu);
|
|
|
|
+ per_cpu(prev_cpu_time, cpu) = time;
|
|
|
|
+ }
|
|
|
|
+ local_irq_restore(flags);
|
|
|
|
|
|
return time;
|
|
return time;
|
|
}
|
|
}
|
|
@@ -1117,43 +1030,6 @@ static struct rq *this_rq_lock(void)
|
|
return rq;
|
|
return rq;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * We are going deep-idle (irqs are disabled):
|
|
|
|
- */
|
|
|
|
-void sched_clock_idle_sleep_event(void)
|
|
|
|
-{
|
|
|
|
- struct rq *rq = cpu_rq(smp_processor_id());
|
|
|
|
-
|
|
|
|
- spin_lock(&rq->lock);
|
|
|
|
- __update_rq_clock(rq);
|
|
|
|
- spin_unlock(&rq->lock);
|
|
|
|
- rq->clock_deep_idle_events++;
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * We just idled delta nanoseconds (called with irqs disabled):
|
|
|
|
- */
|
|
|
|
-void sched_clock_idle_wakeup_event(u64 delta_ns)
|
|
|
|
-{
|
|
|
|
- struct rq *rq = cpu_rq(smp_processor_id());
|
|
|
|
- u64 now = sched_clock();
|
|
|
|
-
|
|
|
|
- rq->idle_clock += delta_ns;
|
|
|
|
- /*
|
|
|
|
- * Override the previous timestamp and ignore all
|
|
|
|
- * sched_clock() deltas that occured while we idled,
|
|
|
|
- * and use the PM-provided delta_ns to advance the
|
|
|
|
- * rq clock:
|
|
|
|
- */
|
|
|
|
- spin_lock(&rq->lock);
|
|
|
|
- rq->prev_clock_raw = now;
|
|
|
|
- rq->clock += delta_ns;
|
|
|
|
- spin_unlock(&rq->lock);
|
|
|
|
- touch_softlockup_watchdog();
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
|
|
|
|
-
|
|
|
|
static void __resched_task(struct task_struct *p, int tif_bit);
|
|
static void __resched_task(struct task_struct *p, int tif_bit);
|
|
|
|
|
|
static inline void resched_task(struct task_struct *p)
|
|
static inline void resched_task(struct task_struct *p)
|
|
@@ -1189,6 +1065,7 @@ static inline void resched_rq(struct rq *rq)
|
|
enum {
|
|
enum {
|
|
HRTICK_SET, /* re-programm hrtick_timer */
|
|
HRTICK_SET, /* re-programm hrtick_timer */
|
|
HRTICK_RESET, /* not a new slice */
|
|
HRTICK_RESET, /* not a new slice */
|
|
|
|
+ HRTICK_BLOCK, /* stop hrtick operations */
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1200,6 +1077,8 @@ static inline int hrtick_enabled(struct rq *rq)
|
|
{
|
|
{
|
|
if (!sched_feat(HRTICK))
|
|
if (!sched_feat(HRTICK))
|
|
return 0;
|
|
return 0;
|
|
|
|
+ if (unlikely(test_bit(HRTICK_BLOCK, &rq->hrtick_flags)))
|
|
|
|
+ return 0;
|
|
return hrtimer_is_hres_active(&rq->hrtick_timer);
|
|
return hrtimer_is_hres_active(&rq->hrtick_timer);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1275,14 +1154,70 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
|
|
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
|
|
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
|
|
|
|
|
|
spin_lock(&rq->lock);
|
|
spin_lock(&rq->lock);
|
|
- __update_rq_clock(rq);
|
|
|
|
|
|
+ update_rq_clock(rq);
|
|
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
|
|
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
|
|
spin_unlock(&rq->lock);
|
|
spin_unlock(&rq->lock);
|
|
|
|
|
|
return HRTIMER_NORESTART;
|
|
return HRTIMER_NORESTART;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void init_rq_hrtick(struct rq *rq)
|
|
|
|
|
|
+static void hotplug_hrtick_disable(int cpu)
|
|
|
|
+{
|
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
+ rq->hrtick_flags = 0;
|
|
|
|
+ __set_bit(HRTICK_BLOCK, &rq->hrtick_flags);
|
|
|
|
+ spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|
+
|
|
|
|
+ hrtick_clear(rq);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void hotplug_hrtick_enable(int cpu)
|
|
|
|
+{
|
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
+ __clear_bit(HRTICK_BLOCK, &rq->hrtick_flags);
|
|
|
|
+ spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
|
|
+{
|
|
|
|
+ int cpu = (int)(long)hcpu;
|
|
|
|
+
|
|
|
|
+ switch (action) {
|
|
|
|
+ case CPU_UP_CANCELED:
|
|
|
|
+ case CPU_UP_CANCELED_FROZEN:
|
|
|
|
+ case CPU_DOWN_PREPARE:
|
|
|
|
+ case CPU_DOWN_PREPARE_FROZEN:
|
|
|
|
+ case CPU_DEAD:
|
|
|
|
+ case CPU_DEAD_FROZEN:
|
|
|
|
+ hotplug_hrtick_disable(cpu);
|
|
|
|
+ return NOTIFY_OK;
|
|
|
|
+
|
|
|
|
+ case CPU_UP_PREPARE:
|
|
|
|
+ case CPU_UP_PREPARE_FROZEN:
|
|
|
|
+ case CPU_DOWN_FAILED:
|
|
|
|
+ case CPU_DOWN_FAILED_FROZEN:
|
|
|
|
+ case CPU_ONLINE:
|
|
|
|
+ case CPU_ONLINE_FROZEN:
|
|
|
|
+ hotplug_hrtick_enable(cpu);
|
|
|
|
+ return NOTIFY_OK;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return NOTIFY_DONE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void init_hrtick(void)
|
|
|
|
+{
|
|
|
|
+ hotcpu_notifier(hotplug_hrtick, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void init_rq_hrtick(struct rq *rq)
|
|
{
|
|
{
|
|
rq->hrtick_flags = 0;
|
|
rq->hrtick_flags = 0;
|
|
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
@@ -1319,6 +1254,10 @@ static inline void init_rq_hrtick(struct rq *rq)
|
|
void hrtick_resched(void)
|
|
void hrtick_resched(void)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static inline void init_hrtick(void)
|
|
|
|
+{
|
|
|
|
+}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1438,8 +1377,8 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
|
{
|
|
{
|
|
u64 tmp;
|
|
u64 tmp;
|
|
|
|
|
|
- if (unlikely(!lw->inv_weight))
|
|
|
|
- lw->inv_weight = (WMULT_CONST-lw->weight/2) / (lw->weight+1);
|
|
|
|
|
|
+ if (!lw->inv_weight)
|
|
|
|
+ lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)/(lw->weight+1);
|
|
|
|
|
|
tmp = (u64)delta_exec * weight;
|
|
tmp = (u64)delta_exec * weight;
|
|
/*
|
|
/*
|
|
@@ -1748,6 +1687,8 @@ __update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd,
|
|
|
|
|
|
if (shares < MIN_SHARES)
|
|
if (shares < MIN_SHARES)
|
|
shares = MIN_SHARES;
|
|
shares = MIN_SHARES;
|
|
|
|
+ else if (shares > MAX_SHARES)
|
|
|
|
+ shares = MAX_SHARES;
|
|
|
|
|
|
__set_se_shares(tg->se[tcpu], shares);
|
|
__set_se_shares(tg->se[tcpu], shares);
|
|
}
|
|
}
|
|
@@ -4339,8 +4280,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
|
|
struct rq *rq = this_rq();
|
|
struct rq *rq = this_rq();
|
|
cputime64_t tmp;
|
|
cputime64_t tmp;
|
|
|
|
|
|
- if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
|
|
|
|
- return account_guest_time(p, cputime);
|
|
|
|
|
|
+ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
|
|
|
|
+ account_guest_time(p, cputime);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
|
|
p->stime = cputime_add(p->stime, cputime);
|
|
p->stime = cputime_add(p->stime, cputime);
|
|
|
|
|
|
@@ -4404,19 +4347,11 @@ void scheduler_tick(void)
|
|
int cpu = smp_processor_id();
|
|
int cpu = smp_processor_id();
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct rq *rq = cpu_rq(cpu);
|
|
struct task_struct *curr = rq->curr;
|
|
struct task_struct *curr = rq->curr;
|
|
- u64 next_tick = rq->tick_timestamp + TICK_NSEC;
|
|
|
|
|
|
+
|
|
|
|
+ sched_clock_tick();
|
|
|
|
|
|
spin_lock(&rq->lock);
|
|
spin_lock(&rq->lock);
|
|
- __update_rq_clock(rq);
|
|
|
|
- /*
|
|
|
|
- * Let rq->clock advance by at least TICK_NSEC:
|
|
|
|
- */
|
|
|
|
- if (unlikely(rq->clock < next_tick)) {
|
|
|
|
- rq->clock = next_tick;
|
|
|
|
- rq->clock_underflows++;
|
|
|
|
- }
|
|
|
|
- rq->tick_timestamp = rq->clock;
|
|
|
|
- update_last_tick_seen(rq);
|
|
|
|
|
|
+ update_rq_clock(rq);
|
|
update_cpu_load(rq);
|
|
update_cpu_load(rq);
|
|
curr->sched_class->task_tick(rq, curr, 0);
|
|
curr->sched_class->task_tick(rq, curr, 0);
|
|
spin_unlock(&rq->lock);
|
|
spin_unlock(&rq->lock);
|
|
@@ -4570,7 +4505,7 @@ need_resched_nonpreemptible:
|
|
* Do the rq-clock update outside the rq lock:
|
|
* Do the rq-clock update outside the rq lock:
|
|
*/
|
|
*/
|
|
local_irq_disable();
|
|
local_irq_disable();
|
|
- __update_rq_clock(rq);
|
|
|
|
|
|
+ update_rq_clock(rq);
|
|
spin_lock(&rq->lock);
|
|
spin_lock(&rq->lock);
|
|
clear_tsk_need_resched(prev);
|
|
clear_tsk_need_resched(prev);
|
|
|
|
|
|
@@ -4595,9 +4530,9 @@ need_resched_nonpreemptible:
|
|
prev->sched_class->put_prev_task(rq, prev);
|
|
prev->sched_class->put_prev_task(rq, prev);
|
|
next = pick_next_task(rq, prev);
|
|
next = pick_next_task(rq, prev);
|
|
|
|
|
|
- sched_info_switch(prev, next);
|
|
|
|
-
|
|
|
|
if (likely(prev != next)) {
|
|
if (likely(prev != next)) {
|
|
|
|
+ sched_info_switch(prev, next);
|
|
|
|
+
|
|
rq->nr_switches++;
|
|
rq->nr_switches++;
|
|
rq->curr = next;
|
|
rq->curr = next;
|
|
++*switch_count;
|
|
++*switch_count;
|
|
@@ -7755,7 +7690,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
|
|
{
|
|
{
|
|
int i, j;
|
|
int i, j;
|
|
|
|
|
|
- lock_doms_cur();
|
|
|
|
|
|
+ mutex_lock(&sched_domains_mutex);
|
|
|
|
|
|
/* always unregister in case we don't destroy any domains */
|
|
/* always unregister in case we don't destroy any domains */
|
|
unregister_sched_domain_sysctl();
|
|
unregister_sched_domain_sysctl();
|
|
@@ -7804,7 +7739,7 @@ match2:
|
|
|
|
|
|
register_sched_domain_sysctl();
|
|
register_sched_domain_sysctl();
|
|
|
|
|
|
- unlock_doms_cur();
|
|
|
|
|
|
+ mutex_unlock(&sched_domains_mutex);
|
|
}
|
|
}
|
|
|
|
|
|
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
|
|
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
|
|
@@ -7813,8 +7748,10 @@ int arch_reinit_sched_domains(void)
|
|
int err;
|
|
int err;
|
|
|
|
|
|
get_online_cpus();
|
|
get_online_cpus();
|
|
|
|
+ mutex_lock(&sched_domains_mutex);
|
|
detach_destroy_domains(&cpu_online_map);
|
|
detach_destroy_domains(&cpu_online_map);
|
|
err = arch_init_sched_domains(&cpu_online_map);
|
|
err = arch_init_sched_domains(&cpu_online_map);
|
|
|
|
+ mutex_unlock(&sched_domains_mutex);
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
|
|
|
|
return err;
|
|
return err;
|
|
@@ -7932,13 +7869,16 @@ void __init sched_init_smp(void)
|
|
BUG_ON(sched_group_nodes_bycpu == NULL);
|
|
BUG_ON(sched_group_nodes_bycpu == NULL);
|
|
#endif
|
|
#endif
|
|
get_online_cpus();
|
|
get_online_cpus();
|
|
|
|
+ mutex_lock(&sched_domains_mutex);
|
|
arch_init_sched_domains(&cpu_online_map);
|
|
arch_init_sched_domains(&cpu_online_map);
|
|
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
|
|
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
|
|
if (cpus_empty(non_isolated_cpus))
|
|
if (cpus_empty(non_isolated_cpus))
|
|
cpu_set(smp_processor_id(), non_isolated_cpus);
|
|
cpu_set(smp_processor_id(), non_isolated_cpus);
|
|
|
|
+ mutex_unlock(&sched_domains_mutex);
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
/* XXX: Theoretical race here - CPU may be hotplugged now */
|
|
/* XXX: Theoretical race here - CPU may be hotplugged now */
|
|
hotcpu_notifier(update_sched_domains, 0);
|
|
hotcpu_notifier(update_sched_domains, 0);
|
|
|
|
+ init_hrtick();
|
|
|
|
|
|
/* Move init over to a non-isolated CPU */
|
|
/* Move init over to a non-isolated CPU */
|
|
if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)
|
|
if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)
|
|
@@ -8025,7 +7965,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
|
|
|
|
|
|
se->my_q = cfs_rq;
|
|
se->my_q = cfs_rq;
|
|
se->load.weight = tg->shares;
|
|
se->load.weight = tg->shares;
|
|
- se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight);
|
|
|
|
|
|
+ se->load.inv_weight = 0;
|
|
se->parent = parent;
|
|
se->parent = parent;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
@@ -8149,8 +8089,6 @@ void __init sched_init(void)
|
|
spin_lock_init(&rq->lock);
|
|
spin_lock_init(&rq->lock);
|
|
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
|
|
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
|
|
rq->nr_running = 0;
|
|
rq->nr_running = 0;
|
|
- rq->clock = 1;
|
|
|
|
- update_last_tick_seen(rq);
|
|
|
|
init_cfs_rq(&rq->cfs, rq);
|
|
init_cfs_rq(&rq->cfs, rq);
|
|
init_rt_rq(&rq->rt, rq);
|
|
init_rt_rq(&rq->rt, rq);
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
@@ -8294,6 +8232,7 @@ EXPORT_SYMBOL(__might_sleep);
|
|
static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
int on_rq;
|
|
int on_rq;
|
|
|
|
+
|
|
update_rq_clock(rq);
|
|
update_rq_clock(rq);
|
|
on_rq = p->se.on_rq;
|
|
on_rq = p->se.on_rq;
|
|
if (on_rq)
|
|
if (on_rq)
|
|
@@ -8325,7 +8264,6 @@ void normalize_rt_tasks(void)
|
|
p->se.sleep_start = 0;
|
|
p->se.sleep_start = 0;
|
|
p->se.block_start = 0;
|
|
p->se.block_start = 0;
|
|
#endif
|
|
#endif
|
|
- task_rq(p)->clock = 0;
|
|
|
|
|
|
|
|
if (!rt_task(p)) {
|
|
if (!rt_task(p)) {
|
|
/*
|
|
/*
|
|
@@ -8692,7 +8630,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares)
|
|
dequeue_entity(cfs_rq, se, 0);
|
|
dequeue_entity(cfs_rq, se, 0);
|
|
|
|
|
|
se->load.weight = shares;
|
|
se->load.weight = shares;
|
|
- se->load.inv_weight = div64_u64((1ULL<<32), shares);
|
|
|
|
|
|
+ se->load.inv_weight = 0;
|
|
|
|
|
|
if (on_rq)
|
|
if (on_rq)
|
|
enqueue_entity(cfs_rq, se, 0);
|
|
enqueue_entity(cfs_rq, se, 0);
|
|
@@ -8722,13 +8660,10 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
if (!tg->se[0])
|
|
if (!tg->se[0])
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
- /*
|
|
|
|
- * A weight of 0 or 1 can cause arithmetics problems.
|
|
|
|
- * (The default weight is 1024 - so there's no practical
|
|
|
|
- * limitation from this.)
|
|
|
|
- */
|
|
|
|
if (shares < MIN_SHARES)
|
|
if (shares < MIN_SHARES)
|
|
shares = MIN_SHARES;
|
|
shares = MIN_SHARES;
|
|
|
|
+ else if (shares > MAX_SHARES)
|
|
|
|
+ shares = MAX_SHARES;
|
|
|
|
|
|
mutex_lock(&shares_mutex);
|
|
mutex_lock(&shares_mutex);
|
|
if (tg->shares == shares)
|
|
if (tg->shares == shares)
|
|
@@ -8753,7 +8688,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
|
* force a rebalance
|
|
* force a rebalance
|
|
*/
|
|
*/
|
|
cfs_rq_set_shares(tg->cfs_rq[i], 0);
|
|
cfs_rq_set_shares(tg->cfs_rq[i], 0);
|
|
- set_se_shares(tg->se[i], shares/nr_cpu_ids);
|
|
|
|
|
|
+ set_se_shares(tg->se[i], shares);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|