|
@@ -664,6 +664,48 @@ static int effective_prio(task_t *p)
|
|
|
return prio;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * We place interactive tasks back into the active array, if possible.
|
|
|
+ *
|
|
|
+ * To guarantee that this does not starve expired tasks we ignore the
|
|
|
+ * interactivity of a task if the first expired task had to wait more
|
|
|
+ * than a 'reasonable' amount of time. This deadline timeout is
|
|
|
+ * load-dependent, as the frequency of array switched decreases with
|
|
|
+ * increasing number of running tasks. We also ignore the interactivity
|
|
|
+ * if a better static_prio task has expired, and switch periodically
|
|
|
+ * regardless, to ensure that highly interactive tasks do not starve
|
|
|
+ * the less fortunate for unreasonably long periods.
|
|
|
+ */
|
|
|
+static inline int expired_starving(runqueue_t *rq)
|
|
|
+{
|
|
|
+ int limit;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Arrays were recently switched, all is well
|
|
|
+ */
|
|
|
+ if (!rq->expired_timestamp)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ limit = STARVATION_LIMIT * rq->nr_running;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * It's time to switch arrays
|
|
|
+ */
|
|
|
+ if (jiffies - rq->expired_timestamp >= limit)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * There's a better selection in the expired array
|
|
|
+ */
|
|
|
+ if (rq->curr->static_prio > rq->best_expired_prio)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * All is well
|
|
|
+ */
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* __activate_task - move a task to the runqueue.
|
|
|
*/
|
|
@@ -671,7 +713,7 @@ static void __activate_task(task_t *p, runqueue_t *rq)
|
|
|
{
|
|
|
prio_array_t *target = rq->active;
|
|
|
|
|
|
- if (batch_task(p))
|
|
|
+ if (unlikely(batch_task(p) || expired_starving(rq)))
|
|
|
target = rq->expired;
|
|
|
enqueue_task(p, target);
|
|
|
rq->nr_running++;
|
|
@@ -2489,22 +2531,6 @@ unsigned long long current_sched_time(const task_t *tsk)
|
|
|
return ns;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * We place interactive tasks back into the active array, if possible.
|
|
|
- *
|
|
|
- * To guarantee that this does not starve expired tasks we ignore the
|
|
|
- * interactivity of a task if the first expired task had to wait more
|
|
|
- * than a 'reasonable' amount of time. This deadline timeout is
|
|
|
- * load-dependent, as the frequency of array switched decreases with
|
|
|
- * increasing number of running tasks. We also ignore the interactivity
|
|
|
- * if a better static_prio task has expired:
|
|
|
- */
|
|
|
-#define EXPIRED_STARVING(rq) \
|
|
|
- ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
|
|
|
- (jiffies - (rq)->expired_timestamp >= \
|
|
|
- STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
|
|
|
- ((rq)->curr->static_prio > (rq)->best_expired_prio))
|
|
|
-
|
|
|
/*
|
|
|
* Account user cpu time to a process.
|
|
|
* @p: the process that the cpu time gets accounted to
|
|
@@ -2640,7 +2666,7 @@ void scheduler_tick(void)
|
|
|
|
|
|
if (!rq->expired_timestamp)
|
|
|
rq->expired_timestamp = jiffies;
|
|
|
- if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
|
|
|
+ if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
|
|
|
enqueue_task(p, rq->expired);
|
|
|
if (p->static_prio < rq->best_expired_prio)
|
|
|
rq->best_expired_prio = p->static_prio;
|