|
@@ -73,6 +73,8 @@ unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
|
|
|
|
|
|
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
|
|
|
|
|
+static const struct sched_class fair_sched_class;
|
|
|
+
|
|
|
/**************************************************************
|
|
|
* CFS operations on generic schedulable entities:
|
|
|
*/
|
|
@@ -334,7 +336,7 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
|
- * delta *= w / rw
|
|
|
+ * delta *= P[w / rw]
|
|
|
*/
|
|
|
static inline unsigned long
|
|
|
calc_delta_weight(unsigned long delta, struct sched_entity *se)
|
|
@@ -348,15 +350,13 @@ calc_delta_weight(unsigned long delta, struct sched_entity *se)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * delta *= rw / w
|
|
|
+ * delta /= w
|
|
|
*/
|
|
|
static inline unsigned long
|
|
|
calc_delta_fair(unsigned long delta, struct sched_entity *se)
|
|
|
{
|
|
|
- for_each_sched_entity(se) {
|
|
|
- delta = calc_delta_mine(delta,
|
|
|
- cfs_rq_of(se)->load.weight, &se->load);
|
|
|
- }
|
|
|
+ if (unlikely(se->load.weight != NICE_0_LOAD))
|
|
|
+ delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
|
|
|
|
|
|
return delta;
|
|
|
}
|
|
@@ -386,26 +386,26 @@ static u64 __sched_period(unsigned long nr_running)
|
|
|
* We calculate the wall-time slice from the period by taking a part
|
|
|
* proportional to the weight.
|
|
|
*
|
|
|
- * s = p*w/rw
|
|
|
+ * s = p*P[w/rw]
|
|
|
*/
|
|
|
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
{
|
|
|
- return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
|
|
|
+ unsigned long nr_running = cfs_rq->nr_running;
|
|
|
+
|
|
|
+ if (unlikely(!se->on_rq))
|
|
|
+ nr_running++;
|
|
|
+
|
|
|
+ return calc_delta_weight(__sched_period(nr_running), se);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* We calculate the vruntime slice of a to be inserted task
|
|
|
*
|
|
|
- * vs = s*rw/w = p
|
|
|
+ * vs = s/w
|
|
|
*/
|
|
|
-static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
+static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
{
|
|
|
- unsigned long nr_running = cfs_rq->nr_running;
|
|
|
-
|
|
|
- if (!se->on_rq)
|
|
|
- nr_running++;
|
|
|
-
|
|
|
- return __sched_period(nr_running);
|
|
|
+ return calc_delta_fair(sched_slice(cfs_rq, se), se);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -628,7 +628,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
|
|
* stays open at the end.
|
|
|
*/
|
|
|
if (initial && sched_feat(START_DEBIT))
|
|
|
- vruntime += sched_vslice_add(cfs_rq, se);
|
|
|
+ vruntime += sched_vslice(cfs_rq, se);
|
|
|
|
|
|
if (!initial) {
|
|
|
/* sleeps upto a single latency don't count. */
|
|
@@ -748,7 +748,7 @@ pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
struct rq *rq = rq_of(cfs_rq);
|
|
|
u64 pair_slice = rq->clock - cfs_rq->pair_start;
|
|
|
|
|
|
- if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) {
|
|
|
+ if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) {
|
|
|
cfs_rq->pair_start = rq->clock;
|
|
|
return se;
|
|
|
}
|
|
@@ -849,11 +849,31 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
|
|
hrtick_start(rq, delta);
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+/*
|
|
|
+ * called from enqueue/dequeue and updates the hrtick when the
|
|
|
+ * current task is from our class and nr_running is low enough
|
|
|
+ * to matter.
|
|
|
+ */
|
|
|
+static void hrtick_update(struct rq *rq)
|
|
|
+{
|
|
|
+ struct task_struct *curr = rq->curr;
|
|
|
+
|
|
|
+ if (curr->sched_class != &fair_sched_class)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
|
|
|
+ hrtick_start_fair(rq, curr);
|
|
|
+}
|
|
|
#else /* !CONFIG_SCHED_HRTICK */
|
|
|
static inline void
|
|
|
hrtick_start_fair(struct rq *rq, struct task_struct *p)
|
|
|
{
|
|
|
}
|
|
|
+
|
|
|
+static inline void hrtick_update(struct rq *rq)
|
|
|
+{
|
|
|
+}
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -874,7 +894,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
|
|
|
wakeup = 1;
|
|
|
}
|
|
|
|
|
|
- hrtick_start_fair(rq, rq->curr);
|
|
|
+ hrtick_update(rq);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -896,7 +916,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
|
|
|
sleep = 1;
|
|
|
}
|
|
|
|
|
|
- hrtick_start_fair(rq, rq->curr);
|
|
|
+ hrtick_update(rq);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1002,8 +1022,6 @@ static inline int wake_idle(int cpu, struct task_struct *p)
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
-static const struct sched_class fair_sched_class;
|
|
|
-
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
/*
|
|
|
* effective_load() calculates the load change as seen from the root_task_group
|