|
@@ -73,13 +73,13 @@ unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
|
|
|
|
|
|
/*
|
|
/*
|
|
* SCHED_OTHER wake-up granularity.
|
|
* SCHED_OTHER wake-up granularity.
|
|
- * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
|
|
|
|
+ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
*
|
|
*
|
|
* This option delays the preemption effects of decoupled workloads
|
|
* This option delays the preemption effects of decoupled workloads
|
|
* and reduces their over-scheduling. Synchronous workloads will still
|
|
* and reduces their over-scheduling. Synchronous workloads will still
|
|
* have immediate wakeup/sleep latencies.
|
|
* have immediate wakeup/sleep latencies.
|
|
*/
|
|
*/
|
|
-unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
|
|
|
|
|
|
+unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
|
|
|
|
|
|
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
|
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
|
|
|
|
|
@@ -629,20 +629,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
se->prev_sum_exec_runtime = se->sum_exec_runtime;
|
|
se->prev_sum_exec_runtime = se->sum_exec_runtime;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int
|
|
|
|
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
|
|
|
|
+
|
|
static struct sched_entity *
|
|
static struct sched_entity *
|
|
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
{
|
|
{
|
|
- s64 diff, gran;
|
|
|
|
-
|
|
|
|
if (!cfs_rq->next)
|
|
if (!cfs_rq->next)
|
|
return se;
|
|
return se;
|
|
|
|
|
|
- diff = cfs_rq->next->vruntime - se->vruntime;
|
|
|
|
- if (diff < 0)
|
|
|
|
- return se;
|
|
|
|
-
|
|
|
|
- gran = calc_delta_fair(sysctl_sched_wakeup_granularity, &cfs_rq->load);
|
|
|
|
- if (diff > gran)
|
|
|
|
|
|
+ if (wakeup_preempt_entity(cfs_rq->next, se) != 0)
|
|
return se;
|
|
return se;
|
|
|
|
|
|
return cfs_rq->next;
|
|
return cfs_rq->next;
|
|
@@ -1101,6 +1097,48 @@ out:
|
|
}
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
+static unsigned long wakeup_gran(struct sched_entity *se)
|
|
|
|
+{
|
|
|
|
+ unsigned long gran = sysctl_sched_wakeup_granularity;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * More easily preempt - nice tasks, while not making
|
|
|
|
+ * it harder for + nice tasks.
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(se->load.weight > NICE_0_LOAD))
|
|
|
|
+ gran = calc_delta_fair(gran, &se->load);
|
|
|
|
+
|
|
|
|
+ return gran;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Should 'se' preempt 'curr'.
|
|
|
|
+ *
|
|
|
|
+ * |s1
|
|
|
|
+ * |s2
|
|
|
|
+ * |s3
|
|
|
|
+ * g
|
|
|
|
+ * |<--->|c
|
|
|
|
+ *
|
|
|
|
+ * w(c, s1) = -1
|
|
|
|
+ * w(c, s2) = 0
|
|
|
|
+ * w(c, s3) = 1
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+static int
|
|
|
|
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
|
|
|
|
+{
|
|
|
|
+ s64 gran, vdiff = curr->vruntime - se->vruntime;
|
|
|
|
+
|
|
|
|
+ if (vdiff < 0)
|
|
|
|
+ return -1;
|
|
|
|
+
|
|
|
|
+ gran = wakeup_gran(curr);
|
|
|
|
+ if (vdiff > gran)
|
|
|
|
+ return 1;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
|
|
/*
|
|
/*
|
|
* Preempt the current task with a newly woken task if needed:
|
|
* Preempt the current task with a newly woken task if needed:
|
|
@@ -1110,7 +1148,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
|
|
struct task_struct *curr = rq->curr;
|
|
struct task_struct *curr = rq->curr;
|
|
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
|
|
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
|
|
struct sched_entity *se = &curr->se, *pse = &p->se;
|
|
struct sched_entity *se = &curr->se, *pse = &p->se;
|
|
- unsigned long gran;
|
|
|
|
|
|
|
|
if (unlikely(rt_prio(p->prio))) {
|
|
if (unlikely(rt_prio(p->prio))) {
|
|
update_rq_clock(rq);
|
|
update_rq_clock(rq);
|
|
@@ -1140,15 +1177,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
|
|
pse = parent_entity(pse);
|
|
pse = parent_entity(pse);
|
|
}
|
|
}
|
|
|
|
|
|
- gran = sysctl_sched_wakeup_granularity;
|
|
|
|
- /*
|
|
|
|
- * More easily preempt - nice tasks, while not making
|
|
|
|
- * it harder for + nice tasks.
|
|
|
|
- */
|
|
|
|
- if (unlikely(se->load.weight > NICE_0_LOAD))
|
|
|
|
- gran = calc_delta_fair(gran, &se->load);
|
|
|
|
-
|
|
|
|
- if (pse->vruntime + gran < se->vruntime)
|
|
|
|
|
|
+ if (wakeup_preempt_entity(se, pse) == 1)
|
|
resched_task(curr);
|
|
resched_task(curr);
|
|
}
|
|
}
|
|
|
|
|