|
@@ -526,6 +526,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
|
|
|
|
|
update_stats_dequeue(cfs_rq, se);
|
|
|
if (sleep) {
|
|
|
+ se->peer_preempt = 0;
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
|
if (entity_is_task(se)) {
|
|
|
struct task_struct *tsk = task_of(se);
|
|
@@ -553,8 +554,10 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
|
|
|
|
|
|
ideal_runtime = sched_slice(cfs_rq, curr);
|
|
|
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
|
|
|
- if (delta_exec > ideal_runtime)
|
|
|
+ if (delta_exec > ideal_runtime ||
|
|
|
+ (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt))
|
|
|
resched_task(rq_of(cfs_rq)->curr);
|
|
|
+ curr->peer_preempt = 0;
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -839,8 +842,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
|
|
|
if (unlikely(se->load.weight != NICE_0_LOAD))
|
|
|
gran = calc_delta_fair(gran, &se->load);
|
|
|
|
|
|
- if (delta > gran)
|
|
|
- resched_task(curr);
|
|
|
+ if (delta > gran) {
|
|
|
+ int now = !sched_feat(PREEMPT_RESTRICT);
|
|
|
+
|
|
|
+ if (now || p->prio < curr->prio || !se->peer_preempt++)
|
|
|
+ resched_task(curr);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1034,6 +1041,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
|
|
|
check_spread(cfs_rq, curr);
|
|
|
__enqueue_entity(cfs_rq, se);
|
|
|
account_entity_enqueue(cfs_rq, se);
|
|
|
+ se->peer_preempt = 0;
|
|
|
resched_task(rq->curr);
|
|
|
}
|
|
|
|