|
@@ -894,7 +894,7 @@ static inline int __normal_prio(struct task_struct *p)
|
|
|
{
|
|
|
int bonus, prio;
|
|
|
|
|
|
- bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
|
|
|
+ bonus = 0;
|
|
|
|
|
|
prio = p->static_prio - bonus;
|
|
|
if (prio < MAX_RT_PRIO)
|
|
@@ -970,42 +970,6 @@ static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
|
|
|
*/
|
|
|
static int recalc_task_prio(struct task_struct *p, unsigned long long now)
|
|
|
{
|
|
|
- /* Caller must always ensure 'now >= p->timestamp' */
|
|
|
- unsigned long sleep_time = now - p->timestamp;
|
|
|
-
|
|
|
- if (batch_task(p))
|
|
|
- sleep_time = 0;
|
|
|
-
|
|
|
- if (likely(sleep_time > 0)) {
|
|
|
- /*
|
|
|
- * This ceiling is set to the lowest priority that would allow
|
|
|
- * a task to be reinserted into the active array on timeslice
|
|
|
- * completion.
|
|
|
- */
|
|
|
- unsigned long ceiling = INTERACTIVE_SLEEP(p);
|
|
|
-
|
|
|
- if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
|
|
|
- /*
|
|
|
- * Prevents user tasks from achieving best priority
|
|
|
- * with one single large enough sleep.
|
|
|
- */
|
|
|
- p->sleep_avg = ceiling;
|
|
|
- } else {
|
|
|
- /*
|
|
|
- * This code gives a bonus to interactive tasks.
|
|
|
- *
|
|
|
- * The boost works by updating the 'average sleep time'
|
|
|
- * value here, based on ->timestamp. The more time a
|
|
|
- * task spends sleeping, the higher the average gets -
|
|
|
- * and the higher the priority boost gets as well.
|
|
|
- */
|
|
|
- p->sleep_avg += sleep_time;
|
|
|
-
|
|
|
- }
|
|
|
- if (p->sleep_avg > NS_MAX_SLEEP_AVG)
|
|
|
- p->sleep_avg = NS_MAX_SLEEP_AVG;
|
|
|
- }
|
|
|
-
|
|
|
return effective_prio(p);
|
|
|
}
|
|
|
|
|
@@ -3560,9 +3524,6 @@ switch_tasks:
|
|
|
clear_tsk_need_resched(prev);
|
|
|
rcu_qsctr_inc(task_cpu(prev));
|
|
|
|
|
|
- prev->sleep_avg -= run_time;
|
|
|
- if ((long)prev->sleep_avg <= 0)
|
|
|
- prev->sleep_avg = 0;
|
|
|
prev->timestamp = prev->last_ran = now;
|
|
|
|
|
|
sched_info_switch(prev, next);
|
|
@@ -4204,11 +4165,6 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
|
|
|
p->normal_prio = normal_prio(p);
|
|
|
/* we are holding p->pi_lock already */
|
|
|
p->prio = rt_mutex_getprio(p);
|
|
|
- /*
|
|
|
- * SCHED_BATCH tasks are treated as perpetual CPU hogs:
|
|
|
- */
|
|
|
- if (policy == SCHED_BATCH)
|
|
|
- p->sleep_avg = 0;
|
|
|
set_load_weight(p);
|
|
|
}
|
|
|
|
|
@@ -4931,7 +4887,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
|
|
unsigned long flags;
|
|
|
|
|
|
idle->timestamp = sched_clock();
|
|
|
- idle->sleep_avg = 0;
|
|
|
idle->array = NULL;
|
|
|
idle->prio = idle->normal_prio = MAX_PRIO;
|
|
|
idle->state = TASK_RUNNING;
|