|
@@ -970,6 +970,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
|
|
se->vruntime = vruntime;
|
|
|
}
|
|
|
|
|
|
+static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
|
|
|
+
|
|
|
static void
|
|
|
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
{
|
|
@@ -999,8 +1001,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
|
__enqueue_entity(cfs_rq, se);
|
|
|
se->on_rq = 1;
|
|
|
|
|
|
- if (cfs_rq->nr_running == 1)
|
|
|
+ if (cfs_rq->nr_running == 1) {
|
|
|
list_add_leaf_cfs_rq(cfs_rq);
|
|
|
+ check_enqueue_throttle(cfs_rq);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void __clear_buddies_last(struct sched_entity *se)
|
|
@@ -1202,6 +1206,8 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
|
|
|
return se;
|
|
|
}
|
|
|
|
|
|
+static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
|
|
|
+
|
|
|
static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
|
|
|
{
|
|
|
/*
|
|
@@ -1211,6 +1217,9 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
|
|
|
if (prev->on_rq)
|
|
|
update_curr(cfs_rq);
|
|
|
|
|
|
+ /* throttle cfs_rqs exceeding runtime */
|
|
|
+ check_cfs_rq_runtime(cfs_rq);
|
|
|
+
|
|
|
check_spread(cfs_rq, prev);
|
|
|
if (prev->on_rq) {
|
|
|
update_stats_wait_start(cfs_rq, prev);
|
|
@@ -1464,7 +1473,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
|
|
+static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
|
|
{
|
|
|
struct rq *rq = rq_of(cfs_rq);
|
|
|
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
|
|
@@ -1657,9 +1666,48 @@ out_unlock:
|
|
|
|
|
|
return idle;
|
|
|
}
|
|
|
+
|
|
|
+/*
|
|
|
+ * When a group wakes up we want to make sure that its quota is not already
|
|
|
+ * expired/exceeded, otherwise it may be allowed to steal additional ticks of
|
|
|
+ * runtime as update_curr() throttling can not not trigger until it's on-rq.
|
|
|
+ */
|
|
|
+static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
|
|
|
+{
|
|
|
+ /* an active group must be handled by the update_curr()->put() path */
|
|
|
+ if (!cfs_rq->runtime_enabled || cfs_rq->curr)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* ensure the group is not already throttled */
|
|
|
+ if (cfs_rq_throttled(cfs_rq))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* update runtime allocation */
|
|
|
+ account_cfs_rq_runtime(cfs_rq, 0);
|
|
|
+ if (cfs_rq->runtime_remaining <= 0)
|
|
|
+ throttle_cfs_rq(cfs_rq);
|
|
|
+}
|
|
|
+
|
|
|
+/* conditionally throttle active cfs_rq's from put_prev_entity() */
|
|
|
+static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
|
|
|
+{
|
|
|
+ if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * it's possible for a throttled entity to be forced into a running
|
|
|
+ * state (e.g. set_curr_task), in this case we're finished.
|
|
|
+ */
|
|
|
+ if (cfs_rq_throttled(cfs_rq))
|
|
|
+ return;
|
|
|
+
|
|
|
+ throttle_cfs_rq(cfs_rq);
|
|
|
+}
|
|
|
#else
|
|
|
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
|
|
|
unsigned long delta_exec) {}
|
|
|
+static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
|
|
|
+static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
|
|
|
|
|
|
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
|
|
|
{
|