|
@@ -385,20 +385,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
|
- * delta *= P[w / rw]
|
|
|
|
- */
|
|
|
|
-static inline unsigned long
|
|
|
|
-calc_delta_weight(unsigned long delta, struct sched_entity *se)
|
|
|
|
-{
|
|
|
|
- for_each_sched_entity(se) {
|
|
|
|
- delta = calc_delta_mine(delta,
|
|
|
|
- se->load.weight, &cfs_rq_of(se)->load);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return delta;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* delta /= w
|
|
* delta /= w
|
|
*/
|
|
*/
|
|
@@ -440,12 +426,20 @@ static u64 __sched_period(unsigned long nr_running)
|
|
*/
|
|
*/
|
|
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
{
|
|
{
|
|
- unsigned long nr_running = cfs_rq->nr_running;
|
|
|
|
|
|
+ u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
|
|
|
|
|
|
- if (unlikely(!se->on_rq))
|
|
|
|
- nr_running++;
|
|
|
|
|
|
+ for_each_sched_entity(se) {
|
|
|
|
+ struct load_weight *load = &cfs_rq->load;
|
|
|
|
|
|
- return calc_delta_weight(__sched_period(nr_running), se);
|
|
|
|
|
|
+ if (unlikely(!se->on_rq)) {
|
|
|
|
+ struct load_weight lw = cfs_rq->load;
|
|
|
|
+
|
|
|
|
+ update_load_add(&lw, se->load.weight);
|
|
|
|
+ load = &lw;
|
|
|
|
+ }
|
|
|
|
+ slice = calc_delta_mine(slice, se->load.weight, load);
|
|
|
|
+ }
|
|
|
|
+ return slice;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|