|
@@ -63,13 +63,13 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
|
|
|
|
|
|
/*
|
|
/*
|
|
* SCHED_OTHER wake-up granularity.
|
|
* SCHED_OTHER wake-up granularity.
|
|
- * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
|
|
|
|
+ * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
*
|
|
*
|
|
* This option delays the preemption effects of decoupled workloads
|
|
* This option delays the preemption effects of decoupled workloads
|
|
* and reduces their over-scheduling. Synchronous workloads will still
|
|
* and reduces their over-scheduling. Synchronous workloads will still
|
|
* have immediate wakeup/sleep latencies.
|
|
* have immediate wakeup/sleep latencies.
|
|
*/
|
|
*/
|
|
-unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
|
|
|
|
|
|
+unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
|
|
|
|
|
|
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
|
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
|
|
|
|
|
@@ -813,17 +813,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
se->prev_sum_exec_runtime = se->sum_exec_runtime;
|
|
se->prev_sum_exec_runtime = se->sum_exec_runtime;
|
|
}
|
|
}
|
|
|
|
|
|
-static int
|
|
|
|
-wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
|
|
|
|
-
|
|
|
|
static struct sched_entity *
|
|
static struct sched_entity *
|
|
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
{
|
|
{
|
|
- if (!cfs_rq->next)
|
|
|
|
- return se;
|
|
|
|
|
|
+ struct rq *rq = rq_of(cfs_rq);
|
|
|
|
+ u64 pair_slice = rq->clock - cfs_rq->pair_start;
|
|
|
|
|
|
- if (wakeup_preempt_entity(cfs_rq->next, se) != 0)
|
|
|
|
|
|
+ if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) {
|
|
|
|
+ cfs_rq->pair_start = rq->clock;
|
|
return se;
|
|
return se;
|
|
|
|
+ }
|
|
|
|
|
|
return cfs_rq->next;
|
|
return cfs_rq->next;
|
|
}
|
|
}
|