|
@@ -2348,6 +2348,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
|
|
if (unlikely(se == pse))
|
|
|
return;
|
|
|
|
|
|
+ /*
|
|
|
+ * This is possible from callers such as pull_task(), in which we
|
|
|
+ * unconditionally check_prempt_curr() after an enqueue (which may have
|
|
|
+ * lead to a throttle). This both saves work and prevents false
|
|
|
+ * next-buddy nomination below.
|
|
|
+ */
|
|
|
+ if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
|
|
|
+ return;
|
|
|
+
|
|
|
if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
|
|
|
set_next_buddy(pse);
|
|
|
next_buddy_marked = 1;
|
|
@@ -2356,6 +2365,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
|
|
/*
|
|
|
* We can come here with TIF_NEED_RESCHED already set from new task
|
|
|
* wake up path.
|
|
|
+ *
|
|
|
+ * Note: this also catches the edge-case of curr being in a throttled
|
|
|
+ * group (e.g. via set_curr_task), since update_curr() (in the
|
|
|
+ * enqueue of curr) will have resulted in resched being set. This
|
|
|
+ * prevents us from potentially nominating it as a false LAST_BUDDY
|
|
|
+ * below.
|
|
|
*/
|
|
|
if (test_tsk_need_resched(curr))
|
|
|
return;
|
|
@@ -2474,7 +2489,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
|
|
|
{
|
|
|
struct sched_entity *se = &p->se;
|
|
|
|
|
|
- if (!se->on_rq)
|
|
|
+ /* throttled hierarchies are not runnable */
|
|
|
+ if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
|
|
|
return false;
|
|
|
|
|
|
/* Tell the scheduler that we'd really like pse to run next. */
|