|
@@ -35,8 +35,8 @@
|
|
* (to see the precise effective timeslice length of your workload,
|
|
* (to see the precise effective timeslice length of your workload,
|
|
* run vmstat and monitor the context-switches (cs) field)
|
|
* run vmstat and monitor the context-switches (cs) field)
|
|
*/
|
|
*/
|
|
-unsigned int sysctl_sched_latency = 5000000ULL;
|
|
|
|
-unsigned int normalized_sysctl_sched_latency = 5000000ULL;
|
|
|
|
|
|
+unsigned int sysctl_sched_latency = 6000000ULL;
|
|
|
|
+unsigned int normalized_sysctl_sched_latency = 6000000ULL;
|
|
|
|
|
|
/*
|
|
/*
|
|
* The initial- and re-scaling of tunables is configurable
|
|
* The initial- and re-scaling of tunables is configurable
|
|
@@ -52,15 +52,15 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
|
|
|
|
|
|
/*
|
|
/*
|
|
* Minimal preemption granularity for CPU-bound tasks:
|
|
* Minimal preemption granularity for CPU-bound tasks:
|
|
- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
|
|
|
|
+ * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
|
|
*/
|
|
*/
|
|
-unsigned int sysctl_sched_min_granularity = 1000000ULL;
|
|
|
|
-unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
|
|
|
|
|
|
+unsigned int sysctl_sched_min_granularity = 2000000ULL;
|
|
|
|
+unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
|
|
|
|
|
|
/*
|
|
/*
|
|
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
|
|
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
|
|
*/
|
|
*/
|
|
-static unsigned int sched_nr_latency = 5;
|
|
|
|
|
|
+static unsigned int sched_nr_latency = 3;
|
|
|
|
|
|
/*
|
|
/*
|
|
* After fork, child runs first. If set to 0 (default) then
|
|
* After fork, child runs first. If set to 0 (default) then
|
|
@@ -505,7 +505,8 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
|
|
{
|
|
{
|
|
unsigned long delta_exec_weighted;
|
|
unsigned long delta_exec_weighted;
|
|
|
|
|
|
- schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
|
|
|
|
|
|
+ schedstat_set(curr->statistics.exec_max,
|
|
|
|
+ max((u64)delta_exec, curr->statistics.exec_max));
|
|
|
|
|
|
curr->sum_exec_runtime += delta_exec;
|
|
curr->sum_exec_runtime += delta_exec;
|
|
schedstat_add(cfs_rq, exec_clock, delta_exec);
|
|
schedstat_add(cfs_rq, exec_clock, delta_exec);
|
|
@@ -548,7 +549,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
|
|
static inline void
|
|
static inline void
|
|
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
{
|
|
{
|
|
- schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
|
|
|
|
|
|
+ schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -567,18 +568,18 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
static void
|
|
static void
|
|
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
{
|
|
{
|
|
- schedstat_set(se->wait_max, max(se->wait_max,
|
|
|
|
- rq_of(cfs_rq)->clock - se->wait_start));
|
|
|
|
- schedstat_set(se->wait_count, se->wait_count + 1);
|
|
|
|
- schedstat_set(se->wait_sum, se->wait_sum +
|
|
|
|
- rq_of(cfs_rq)->clock - se->wait_start);
|
|
|
|
|
|
+ schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
|
|
|
|
+ rq_of(cfs_rq)->clock - se->statistics.wait_start));
|
|
|
|
+ schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
|
|
|
|
+ schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
|
|
|
|
+ rq_of(cfs_rq)->clock - se->statistics.wait_start);
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
if (entity_is_task(se)) {
|
|
if (entity_is_task(se)) {
|
|
trace_sched_stat_wait(task_of(se),
|
|
trace_sched_stat_wait(task_of(se),
|
|
- rq_of(cfs_rq)->clock - se->wait_start);
|
|
|
|
|
|
+ rq_of(cfs_rq)->clock - se->statistics.wait_start);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
- schedstat_set(se->wait_start, 0);
|
|
|
|
|
|
+ schedstat_set(se->statistics.wait_start, 0);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void
|
|
static inline void
|
|
@@ -657,39 +658,39 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
if (entity_is_task(se))
|
|
if (entity_is_task(se))
|
|
tsk = task_of(se);
|
|
tsk = task_of(se);
|
|
|
|
|
|
- if (se->sleep_start) {
|
|
|
|
- u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
|
|
|
|
|
|
+ if (se->statistics.sleep_start) {
|
|
|
|
+ u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
|
|
|
|
|
|
if ((s64)delta < 0)
|
|
if ((s64)delta < 0)
|
|
delta = 0;
|
|
delta = 0;
|
|
|
|
|
|
- if (unlikely(delta > se->sleep_max))
|
|
|
|
- se->sleep_max = delta;
|
|
|
|
|
|
+ if (unlikely(delta > se->statistics.sleep_max))
|
|
|
|
+ se->statistics.sleep_max = delta;
|
|
|
|
|
|
- se->sleep_start = 0;
|
|
|
|
- se->sum_sleep_runtime += delta;
|
|
|
|
|
|
+ se->statistics.sleep_start = 0;
|
|
|
|
+ se->statistics.sum_sleep_runtime += delta;
|
|
|
|
|
|
if (tsk) {
|
|
if (tsk) {
|
|
account_scheduler_latency(tsk, delta >> 10, 1);
|
|
account_scheduler_latency(tsk, delta >> 10, 1);
|
|
trace_sched_stat_sleep(tsk, delta);
|
|
trace_sched_stat_sleep(tsk, delta);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if (se->block_start) {
|
|
|
|
- u64 delta = rq_of(cfs_rq)->clock - se->block_start;
|
|
|
|
|
|
+ if (se->statistics.block_start) {
|
|
|
|
+ u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
|
|
|
|
|
|
if ((s64)delta < 0)
|
|
if ((s64)delta < 0)
|
|
delta = 0;
|
|
delta = 0;
|
|
|
|
|
|
- if (unlikely(delta > se->block_max))
|
|
|
|
- se->block_max = delta;
|
|
|
|
|
|
+ if (unlikely(delta > se->statistics.block_max))
|
|
|
|
+ se->statistics.block_max = delta;
|
|
|
|
|
|
- se->block_start = 0;
|
|
|
|
- se->sum_sleep_runtime += delta;
|
|
|
|
|
|
+ se->statistics.block_start = 0;
|
|
|
|
+ se->statistics.sum_sleep_runtime += delta;
|
|
|
|
|
|
if (tsk) {
|
|
if (tsk) {
|
|
if (tsk->in_iowait) {
|
|
if (tsk->in_iowait) {
|
|
- se->iowait_sum += delta;
|
|
|
|
- se->iowait_count++;
|
|
|
|
|
|
+ se->statistics.iowait_sum += delta;
|
|
|
|
+ se->statistics.iowait_count++;
|
|
trace_sched_stat_iowait(tsk, delta);
|
|
trace_sched_stat_iowait(tsk, delta);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -737,19 +738,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
|
vruntime += sched_vslice(cfs_rq, se);
|
|
vruntime += sched_vslice(cfs_rq, se);
|
|
|
|
|
|
/* sleeps up to a single latency don't count. */
|
|
/* sleeps up to a single latency don't count. */
|
|
- if (!initial && sched_feat(FAIR_SLEEPERS)) {
|
|
|
|
|
|
+ if (!initial) {
|
|
unsigned long thresh = sysctl_sched_latency;
|
|
unsigned long thresh = sysctl_sched_latency;
|
|
|
|
|
|
- /*
|
|
|
|
- * Convert the sleeper threshold into virtual time.
|
|
|
|
- * SCHED_IDLE is a special sub-class. We care about
|
|
|
|
- * fairness only relative to other SCHED_IDLE tasks,
|
|
|
|
- * all of which have the same weight.
|
|
|
|
- */
|
|
|
|
- if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
|
|
|
|
- task_of(se)->policy != SCHED_IDLE))
|
|
|
|
- thresh = calc_delta_fair(thresh, se);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Halve their sleep time's effect, to allow
|
|
* Halve their sleep time's effect, to allow
|
|
* for a gentler effect of sleepers:
|
|
* for a gentler effect of sleepers:
|
|
@@ -766,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
|
se->vruntime = vruntime;
|
|
se->vruntime = vruntime;
|
|
}
|
|
}
|
|
|
|
|
|
-#define ENQUEUE_WAKEUP 1
|
|
|
|
-#define ENQUEUE_MIGRATE 2
|
|
|
|
-
|
|
|
|
static void
|
|
static void
|
|
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
{
|
|
{
|
|
@@ -776,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
* Update the normalized vruntime before updating min_vruntime
|
|
* Update the normalized vruntime before updating min_vruntime
|
|
* through callig update_curr().
|
|
* through callig update_curr().
|
|
*/
|
|
*/
|
|
- if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
|
|
|
|
|
|
+ if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
|
|
se->vruntime += cfs_rq->min_vruntime;
|
|
se->vruntime += cfs_rq->min_vruntime;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -812,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
-dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
|
|
|
|
|
+dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
* Update run-time statistics of the 'current'.
|
|
* Update run-time statistics of the 'current'.
|
|
@@ -820,15 +808,15 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
|
update_curr(cfs_rq);
|
|
update_curr(cfs_rq);
|
|
|
|
|
|
update_stats_dequeue(cfs_rq, se);
|
|
update_stats_dequeue(cfs_rq, se);
|
|
- if (sleep) {
|
|
|
|
|
|
+ if (flags & DEQUEUE_SLEEP) {
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
if (entity_is_task(se)) {
|
|
if (entity_is_task(se)) {
|
|
struct task_struct *tsk = task_of(se);
|
|
struct task_struct *tsk = task_of(se);
|
|
|
|
|
|
if (tsk->state & TASK_INTERRUPTIBLE)
|
|
if (tsk->state & TASK_INTERRUPTIBLE)
|
|
- se->sleep_start = rq_of(cfs_rq)->clock;
|
|
|
|
|
|
+ se->statistics.sleep_start = rq_of(cfs_rq)->clock;
|
|
if (tsk->state & TASK_UNINTERRUPTIBLE)
|
|
if (tsk->state & TASK_UNINTERRUPTIBLE)
|
|
- se->block_start = rq_of(cfs_rq)->clock;
|
|
|
|
|
|
+ se->statistics.block_start = rq_of(cfs_rq)->clock;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
@@ -845,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
|
* update can refer to the ->curr item and we need to reflect this
|
|
* update can refer to the ->curr item and we need to reflect this
|
|
* movement in our normalized position.
|
|
* movement in our normalized position.
|
|
*/
|
|
*/
|
|
- if (!sleep)
|
|
|
|
|
|
+ if (!(flags & DEQUEUE_SLEEP))
|
|
se->vruntime -= cfs_rq->min_vruntime;
|
|
se->vruntime -= cfs_rq->min_vruntime;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -912,7 +900,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
* when there are only lesser-weight tasks around):
|
|
* when there are only lesser-weight tasks around):
|
|
*/
|
|
*/
|
|
if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
|
|
if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
|
|
- se->slice_max = max(se->slice_max,
|
|
|
|
|
|
+ se->statistics.slice_max = max(se->statistics.slice_max,
|
|
se->sum_exec_runtime - se->prev_sum_exec_runtime);
|
|
se->sum_exec_runtime - se->prev_sum_exec_runtime);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
@@ -1054,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq)
|
|
* then put the task into the rbtree:
|
|
* then put the task into the rbtree:
|
|
*/
|
|
*/
|
|
static void
|
|
static void
|
|
-enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
|
|
|
|
|
|
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
{
|
|
struct cfs_rq *cfs_rq;
|
|
struct cfs_rq *cfs_rq;
|
|
struct sched_entity *se = &p->se;
|
|
struct sched_entity *se = &p->se;
|
|
- int flags = 0;
|
|
|
|
-
|
|
|
|
- if (wakeup)
|
|
|
|
- flags |= ENQUEUE_WAKEUP;
|
|
|
|
- if (p->state == TASK_WAKING)
|
|
|
|
- flags |= ENQUEUE_MIGRATE;
|
|
|
|
|
|
|
|
for_each_sched_entity(se) {
|
|
for_each_sched_entity(se) {
|
|
if (se->on_rq)
|
|
if (se->on_rq)
|
|
@@ -1081,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
|
|
* decreased. We remove the task from the rbtree and
|
|
* decreased. We remove the task from the rbtree and
|
|
* update the fair scheduling stats:
|
|
* update the fair scheduling stats:
|
|
*/
|
|
*/
|
|
-static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
|
|
|
|
|
|
+static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
{
|
|
struct cfs_rq *cfs_rq;
|
|
struct cfs_rq *cfs_rq;
|
|
struct sched_entity *se = &p->se;
|
|
struct sched_entity *se = &p->se;
|
|
|
|
|
|
for_each_sched_entity(se) {
|
|
for_each_sched_entity(se) {
|
|
cfs_rq = cfs_rq_of(se);
|
|
cfs_rq = cfs_rq_of(se);
|
|
- dequeue_entity(cfs_rq, se, sleep);
|
|
|
|
|
|
+ dequeue_entity(cfs_rq, se, flags);
|
|
/* Don't dequeue parent if it has other entities besides us */
|
|
/* Don't dequeue parent if it has other entities besides us */
|
|
if (cfs_rq->load.weight)
|
|
if (cfs_rq->load.weight)
|
|
break;
|
|
break;
|
|
- sleep = 1;
|
|
|
|
|
|
+ flags |= DEQUEUE_SLEEP;
|
|
}
|
|
}
|
|
|
|
|
|
hrtick_update(rq);
|
|
hrtick_update(rq);
|
|
@@ -1240,7 +1222,6 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
|
|
|
|
|
|
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|
{
|
|
{
|
|
- struct task_struct *curr = current;
|
|
|
|
unsigned long this_load, load;
|
|
unsigned long this_load, load;
|
|
int idx, this_cpu, prev_cpu;
|
|
int idx, this_cpu, prev_cpu;
|
|
unsigned long tl_per_task;
|
|
unsigned long tl_per_task;
|
|
@@ -1255,18 +1236,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|
load = source_load(prev_cpu, idx);
|
|
load = source_load(prev_cpu, idx);
|
|
this_load = target_load(this_cpu, idx);
|
|
this_load = target_load(this_cpu, idx);
|
|
|
|
|
|
- if (sync) {
|
|
|
|
- if (sched_feat(SYNC_LESS) &&
|
|
|
|
- (curr->se.avg_overlap > sysctl_sched_migration_cost ||
|
|
|
|
- p->se.avg_overlap > sysctl_sched_migration_cost))
|
|
|
|
- sync = 0;
|
|
|
|
- } else {
|
|
|
|
- if (sched_feat(SYNC_MORE) &&
|
|
|
|
- (curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
|
|
|
- p->se.avg_overlap < sysctl_sched_migration_cost))
|
|
|
|
- sync = 1;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* If sync wakeup then subtract the (maximum possible)
|
|
* If sync wakeup then subtract the (maximum possible)
|
|
* effect of the currently running task from the load
|
|
* effect of the currently running task from the load
|
|
@@ -1306,7 +1275,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|
if (sync && balanced)
|
|
if (sync && balanced)
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
- schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
|
|
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
|
|
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
|
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
|
|
|
|
|
if (balanced ||
|
|
if (balanced ||
|
|
@@ -1318,7 +1287,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|
* there is no bad imbalance.
|
|
* there is no bad imbalance.
|
|
*/
|
|
*/
|
|
schedstat_inc(sd, ttwu_move_affine);
|
|
schedstat_inc(sd, ttwu_move_affine);
|
|
- schedstat_inc(p, se.nr_wakeups_affine);
|
|
|
|
|
|
+ schedstat_inc(p, se.statistics.nr_wakeups_affine);
|
|
|
|
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
@@ -1406,29 +1375,48 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
|
/*
|
|
/*
|
|
* Try and locate an idle CPU in the sched_domain.
|
|
* Try and locate an idle CPU in the sched_domain.
|
|
*/
|
|
*/
|
|
-static int
|
|
|
|
-select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
|
|
|
|
|
|
+static int select_idle_sibling(struct task_struct *p, int target)
|
|
{
|
|
{
|
|
int cpu = smp_processor_id();
|
|
int cpu = smp_processor_id();
|
|
int prev_cpu = task_cpu(p);
|
|
int prev_cpu = task_cpu(p);
|
|
|
|
+ struct sched_domain *sd;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
|
|
|
|
- * test in select_task_rq_fair) and the prev_cpu is idle then that's
|
|
|
|
- * always a better target than the current cpu.
|
|
|
|
|
|
+ * If the task is going to be woken-up on this cpu and if it is
|
|
|
|
+ * already idle, then it is the right target.
|
|
*/
|
|
*/
|
|
- if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running)
|
|
|
|
|
|
+ if (target == cpu && idle_cpu(cpu))
|
|
|
|
+ return cpu;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If the task is going to be woken-up on the cpu where it previously
|
|
|
|
+ * ran and if it is currently idle, then it the right target.
|
|
|
|
+ */
|
|
|
|
+ if (target == prev_cpu && idle_cpu(prev_cpu))
|
|
return prev_cpu;
|
|
return prev_cpu;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Otherwise, iterate the domain and find an elegible idle cpu.
|
|
|
|
|
|
+ * Otherwise, iterate the domains and find an elegible idle cpu.
|
|
*/
|
|
*/
|
|
- for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
|
|
|
|
- if (!cpu_rq(i)->cfs.nr_running) {
|
|
|
|
- target = i;
|
|
|
|
|
|
+ for_each_domain(target, sd) {
|
|
|
|
+ if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
|
|
break;
|
|
break;
|
|
|
|
+
|
|
|
|
+ for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
|
|
|
|
+ if (idle_cpu(i)) {
|
|
|
|
+ target = i;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Lets stop looking for an idle sibling when we reached
|
|
|
|
+ * the domain that spans the current cpu and prev_cpu.
|
|
|
|
+ */
|
|
|
|
+ if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
|
|
|
|
+ cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
|
|
return target;
|
|
return target;
|
|
@@ -1445,7 +1433,8 @@ select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
|
|
*
|
|
*
|
|
* preempt must be disabled.
|
|
* preempt must be disabled.
|
|
*/
|
|
*/
|
|
-static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
|
|
|
|
|
|
+static int
|
|
|
|
+select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
|
|
{
|
|
{
|
|
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
|
|
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
|
|
int cpu = smp_processor_id();
|
|
int cpu = smp_processor_id();
|
|
@@ -1456,8 +1445,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
|
|
int sync = wake_flags & WF_SYNC;
|
|
int sync = wake_flags & WF_SYNC;
|
|
|
|
|
|
if (sd_flag & SD_BALANCE_WAKE) {
|
|
if (sd_flag & SD_BALANCE_WAKE) {
|
|
- if (sched_feat(AFFINE_WAKEUPS) &&
|
|
|
|
- cpumask_test_cpu(cpu, &p->cpus_allowed))
|
|
|
|
|
|
+ if (cpumask_test_cpu(cpu, &p->cpus_allowed))
|
|
want_affine = 1;
|
|
want_affine = 1;
|
|
new_cpu = prev_cpu;
|
|
new_cpu = prev_cpu;
|
|
}
|
|
}
|
|
@@ -1491,34 +1479,13 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * While iterating the domains looking for a spanning
|
|
|
|
- * WAKE_AFFINE domain, adjust the affine target to any idle cpu
|
|
|
|
- * in cache sharing domains along the way.
|
|
|
|
|
|
+ * If both cpu and prev_cpu are part of this domain,
|
|
|
|
+ * cpu is a valid SD_WAKE_AFFINE target.
|
|
*/
|
|
*/
|
|
- if (want_affine) {
|
|
|
|
- int target = -1;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If both cpu and prev_cpu are part of this domain,
|
|
|
|
- * cpu is a valid SD_WAKE_AFFINE target.
|
|
|
|
- */
|
|
|
|
- if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
|
|
|
|
- target = cpu;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If there's an idle sibling in this domain, make that
|
|
|
|
- * the wake_affine target instead of the current cpu.
|
|
|
|
- */
|
|
|
|
- if (tmp->flags & SD_SHARE_PKG_RESOURCES)
|
|
|
|
- target = select_idle_sibling(p, tmp, target);
|
|
|
|
-
|
|
|
|
- if (target >= 0) {
|
|
|
|
- if (tmp->flags & SD_WAKE_AFFINE) {
|
|
|
|
- affine_sd = tmp;
|
|
|
|
- want_affine = 0;
|
|
|
|
- }
|
|
|
|
- cpu = target;
|
|
|
|
- }
|
|
|
|
|
|
+ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
|
|
|
|
+ cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
|
|
|
|
+ affine_sd = tmp;
|
|
|
|
+ want_affine = 0;
|
|
}
|
|
}
|
|
|
|
|
|
if (!want_sd && !want_affine)
|
|
if (!want_sd && !want_affine)
|
|
@@ -1531,22 +1498,29 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
|
|
sd = tmp;
|
|
sd = tmp;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
if (sched_feat(LB_SHARES_UPDATE)) {
|
|
if (sched_feat(LB_SHARES_UPDATE)) {
|
|
/*
|
|
/*
|
|
* Pick the largest domain to update shares over
|
|
* Pick the largest domain to update shares over
|
|
*/
|
|
*/
|
|
tmp = sd;
|
|
tmp = sd;
|
|
- if (affine_sd && (!tmp ||
|
|
|
|
- cpumask_weight(sched_domain_span(affine_sd)) >
|
|
|
|
- cpumask_weight(sched_domain_span(sd))))
|
|
|
|
|
|
+ if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
|
|
tmp = affine_sd;
|
|
tmp = affine_sd;
|
|
|
|
|
|
- if (tmp)
|
|
|
|
|
|
+ if (tmp) {
|
|
|
|
+ raw_spin_unlock(&rq->lock);
|
|
update_shares(tmp);
|
|
update_shares(tmp);
|
|
|
|
+ raw_spin_lock(&rq->lock);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
- if (affine_sd && wake_affine(affine_sd, p, sync))
|
|
|
|
- return cpu;
|
|
|
|
|
|
+ if (affine_sd) {
|
|
|
|
+ if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
|
|
|
|
+ return select_idle_sibling(p, cpu);
|
|
|
|
+ else
|
|
|
|
+ return select_idle_sibling(p, prev_cpu);
|
|
|
|
+ }
|
|
|
|
|
|
while (sd) {
|
|
while (sd) {
|
|
int load_idx = sd->forkexec_idx;
|
|
int load_idx = sd->forkexec_idx;
|
|
@@ -1576,10 +1550,10 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
|
|
|
|
|
|
/* Now try balancing at a lower domain level of new_cpu */
|
|
/* Now try balancing at a lower domain level of new_cpu */
|
|
cpu = new_cpu;
|
|
cpu = new_cpu;
|
|
- weight = cpumask_weight(sched_domain_span(sd));
|
|
|
|
|
|
+ weight = sd->span_weight;
|
|
sd = NULL;
|
|
sd = NULL;
|
|
for_each_domain(cpu, tmp) {
|
|
for_each_domain(cpu, tmp) {
|
|
- if (weight <= cpumask_weight(sched_domain_span(tmp)))
|
|
|
|
|
|
+ if (weight <= tmp->span_weight)
|
|
break;
|
|
break;
|
|
if (tmp->flags & sd_flag)
|
|
if (tmp->flags & sd_flag)
|
|
sd = tmp;
|
|
sd = tmp;
|
|
@@ -1591,63 +1565,26 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
|
|
}
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
-/*
|
|
|
|
- * Adaptive granularity
|
|
|
|
- *
|
|
|
|
- * se->avg_wakeup gives the average time a task runs until it does a wakeup,
|
|
|
|
- * with the limit of wakeup_gran -- when it never does a wakeup.
|
|
|
|
- *
|
|
|
|
- * So the smaller avg_wakeup is the faster we want this task to preempt,
|
|
|
|
- * but we don't want to treat the preemptee unfairly and therefore allow it
|
|
|
|
- * to run for at least the amount of time we'd like to run.
|
|
|
|
- *
|
|
|
|
- * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
|
|
|
|
- *
|
|
|
|
- * NOTE: we use *nr_running to scale with load, this nicely matches the
|
|
|
|
- * degrading latency on load.
|
|
|
|
- */
|
|
|
|
-static unsigned long
|
|
|
|
-adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
|
|
|
|
-{
|
|
|
|
- u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
|
|
|
|
- u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
|
|
|
|
- u64 gran = 0;
|
|
|
|
-
|
|
|
|
- if (this_run < expected_wakeup)
|
|
|
|
- gran = expected_wakeup - this_run;
|
|
|
|
-
|
|
|
|
- return min_t(s64, gran, sysctl_sched_wakeup_granularity);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static unsigned long
|
|
static unsigned long
|
|
wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
|
|
wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
|
|
{
|
|
{
|
|
unsigned long gran = sysctl_sched_wakeup_granularity;
|
|
unsigned long gran = sysctl_sched_wakeup_granularity;
|
|
|
|
|
|
- if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
|
|
|
|
- gran = adaptive_gran(curr, se);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Since its curr running now, convert the gran from real-time
|
|
* Since its curr running now, convert the gran from real-time
|
|
* to virtual-time in his units.
|
|
* to virtual-time in his units.
|
|
|
|
+ *
|
|
|
|
+ * By using 'se' instead of 'curr' we penalize light tasks, so
|
|
|
|
+ * they get preempted easier. That is, if 'se' < 'curr' then
|
|
|
|
+ * the resulting gran will be larger, therefore penalizing the
|
|
|
|
+ * lighter, if otoh 'se' > 'curr' then the resulting gran will
|
|
|
|
+ * be smaller, again penalizing the lighter task.
|
|
|
|
+ *
|
|
|
|
+ * This is especially important for buddies when the leftmost
|
|
|
|
+ * task is higher priority than the buddy.
|
|
*/
|
|
*/
|
|
- if (sched_feat(ASYM_GRAN)) {
|
|
|
|
- /*
|
|
|
|
- * By using 'se' instead of 'curr' we penalize light tasks, so
|
|
|
|
- * they get preempted easier. That is, if 'se' < 'curr' then
|
|
|
|
- * the resulting gran will be larger, therefore penalizing the
|
|
|
|
- * lighter, if otoh 'se' > 'curr' then the resulting gran will
|
|
|
|
- * be smaller, again penalizing the lighter task.
|
|
|
|
- *
|
|
|
|
- * This is especially important for buddies when the leftmost
|
|
|
|
- * task is higher priority than the buddy.
|
|
|
|
- */
|
|
|
|
- if (unlikely(se->load.weight != NICE_0_LOAD))
|
|
|
|
- gran = calc_delta_fair(gran, se);
|
|
|
|
- } else {
|
|
|
|
- if (unlikely(curr->load.weight != NICE_0_LOAD))
|
|
|
|
- gran = calc_delta_fair(gran, curr);
|
|
|
|
- }
|
|
|
|
|
|
+ if (unlikely(se->load.weight != NICE_0_LOAD))
|
|
|
|
+ gran = calc_delta_fair(gran, se);
|
|
|
|
|
|
return gran;
|
|
return gran;
|
|
}
|
|
}
|
|
@@ -1705,7 +1642,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
|
struct task_struct *curr = rq->curr;
|
|
struct task_struct *curr = rq->curr;
|
|
struct sched_entity *se = &curr->se, *pse = &p->se;
|
|
struct sched_entity *se = &curr->se, *pse = &p->se;
|
|
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
|
|
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
|
|
- int sync = wake_flags & WF_SYNC;
|
|
|
|
int scale = cfs_rq->nr_running >= sched_nr_latency;
|
|
int scale = cfs_rq->nr_running >= sched_nr_latency;
|
|
|
|
|
|
if (unlikely(rt_prio(p->prio)))
|
|
if (unlikely(rt_prio(p->prio)))
|
|
@@ -1738,14 +1674,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
|
if (unlikely(curr->policy == SCHED_IDLE))
|
|
if (unlikely(curr->policy == SCHED_IDLE))
|
|
goto preempt;
|
|
goto preempt;
|
|
|
|
|
|
- if (sched_feat(WAKEUP_SYNC) && sync)
|
|
|
|
- goto preempt;
|
|
|
|
-
|
|
|
|
- if (sched_feat(WAKEUP_OVERLAP) &&
|
|
|
|
- se->avg_overlap < sysctl_sched_migration_cost &&
|
|
|
|
- pse->avg_overlap < sysctl_sched_migration_cost)
|
|
|
|
- goto preempt;
|
|
|
|
-
|
|
|
|
if (!sched_feat(WAKEUP_PREEMPT))
|
|
if (!sched_feat(WAKEUP_PREEMPT))
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -1844,13 +1772,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
|
|
* 3) are cache-hot on their current CPU.
|
|
* 3) are cache-hot on their current CPU.
|
|
*/
|
|
*/
|
|
if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
|
|
if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
|
|
- schedstat_inc(p, se.nr_failed_migrations_affine);
|
|
|
|
|
|
+ schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
*all_pinned = 0;
|
|
*all_pinned = 0;
|
|
|
|
|
|
if (task_running(rq, p)) {
|
|
if (task_running(rq, p)) {
|
|
- schedstat_inc(p, se.nr_failed_migrations_running);
|
|
|
|
|
|
+ schedstat_inc(p, se.statistics.nr_failed_migrations_running);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1866,14 +1794,14 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
if (tsk_cache_hot) {
|
|
if (tsk_cache_hot) {
|
|
schedstat_inc(sd, lb_hot_gained[idle]);
|
|
schedstat_inc(sd, lb_hot_gained[idle]);
|
|
- schedstat_inc(p, se.nr_forced_migrations);
|
|
|
|
|
|
+ schedstat_inc(p, se.statistics.nr_forced_migrations);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
if (tsk_cache_hot) {
|
|
if (tsk_cache_hot) {
|
|
- schedstat_inc(p, se.nr_failed_migrations_hot);
|
|
|
|
|
|
+ schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
return 1;
|
|
return 1;
|
|
@@ -2311,7 +2239,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
|
|
|
|
|
|
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
|
|
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
|
|
{
|
|
{
|
|
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
|
|
|
|
|
|
+ unsigned long weight = sd->span_weight;
|
|
unsigned long smt_gain = sd->smt_gain;
|
|
unsigned long smt_gain = sd->smt_gain;
|
|
|
|
|
|
smt_gain /= weight;
|
|
smt_gain /= weight;
|
|
@@ -2344,7 +2272,7 @@ unsigned long scale_rt_power(int cpu)
|
|
|
|
|
|
static void update_cpu_power(struct sched_domain *sd, int cpu)
|
|
static void update_cpu_power(struct sched_domain *sd, int cpu)
|
|
{
|
|
{
|
|
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
|
|
|
|
|
|
+ unsigned long weight = sd->span_weight;
|
|
unsigned long power = SCHED_LOAD_SCALE;
|
|
unsigned long power = SCHED_LOAD_SCALE;
|
|
struct sched_group *sdg = sd->groups;
|
|
struct sched_group *sdg = sd->groups;
|
|
|
|
|
|
@@ -2870,6 +2798,8 @@ static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
|
|
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
|
|
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int active_load_balance_cpu_stop(void *data);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
|
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
|
* tasks if there is an imbalance.
|
|
* tasks if there is an imbalance.
|
|
@@ -2959,8 +2889,9 @@ redo:
|
|
if (need_active_balance(sd, sd_idle, idle)) {
|
|
if (need_active_balance(sd, sd_idle, idle)) {
|
|
raw_spin_lock_irqsave(&busiest->lock, flags);
|
|
raw_spin_lock_irqsave(&busiest->lock, flags);
|
|
|
|
|
|
- /* don't kick the migration_thread, if the curr
|
|
|
|
- * task on busiest cpu can't be moved to this_cpu
|
|
|
|
|
|
+ /* don't kick the active_load_balance_cpu_stop,
|
|
|
|
+ * if the curr task on busiest cpu can't be
|
|
|
|
+ * moved to this_cpu
|
|
*/
|
|
*/
|
|
if (!cpumask_test_cpu(this_cpu,
|
|
if (!cpumask_test_cpu(this_cpu,
|
|
&busiest->curr->cpus_allowed)) {
|
|
&busiest->curr->cpus_allowed)) {
|
|
@@ -2970,14 +2901,22 @@ redo:
|
|
goto out_one_pinned;
|
|
goto out_one_pinned;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * ->active_balance synchronizes accesses to
|
|
|
|
+ * ->active_balance_work. Once set, it's cleared
|
|
|
|
+ * only after active load balance is finished.
|
|
|
|
+ */
|
|
if (!busiest->active_balance) {
|
|
if (!busiest->active_balance) {
|
|
busiest->active_balance = 1;
|
|
busiest->active_balance = 1;
|
|
busiest->push_cpu = this_cpu;
|
|
busiest->push_cpu = this_cpu;
|
|
active_balance = 1;
|
|
active_balance = 1;
|
|
}
|
|
}
|
|
raw_spin_unlock_irqrestore(&busiest->lock, flags);
|
|
raw_spin_unlock_irqrestore(&busiest->lock, flags);
|
|
|
|
+
|
|
if (active_balance)
|
|
if (active_balance)
|
|
- wake_up_process(busiest->migration_thread);
|
|
|
|
|
|
+ stop_one_cpu_nowait(cpu_of(busiest),
|
|
|
|
+ active_load_balance_cpu_stop, busiest,
|
|
|
|
+ &busiest->active_balance_work);
|
|
|
|
|
|
/*
|
|
/*
|
|
* We've kicked active balancing, reset the failure
|
|
* We've kicked active balancing, reset the failure
|
|
@@ -3084,24 +3023,29 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * active_load_balance is run by migration threads. It pushes running tasks
|
|
|
|
- * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
|
|
|
|
- * running on each physical CPU where possible, and avoids physical /
|
|
|
|
- * logical imbalances.
|
|
|
|
- *
|
|
|
|
- * Called with busiest_rq locked.
|
|
|
|
|
|
+ * active_load_balance_cpu_stop is run by cpu stopper. It pushes
|
|
|
|
+ * running tasks off the busiest CPU onto idle CPUs. It requires at
|
|
|
|
+ * least 1 task to be running on each physical CPU where possible, and
|
|
|
|
+ * avoids physical / logical imbalances.
|
|
*/
|
|
*/
|
|
-static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
|
|
|
|
|
|
+static int active_load_balance_cpu_stop(void *data)
|
|
{
|
|
{
|
|
|
|
+ struct rq *busiest_rq = data;
|
|
|
|
+ int busiest_cpu = cpu_of(busiest_rq);
|
|
int target_cpu = busiest_rq->push_cpu;
|
|
int target_cpu = busiest_rq->push_cpu;
|
|
|
|
+ struct rq *target_rq = cpu_rq(target_cpu);
|
|
struct sched_domain *sd;
|
|
struct sched_domain *sd;
|
|
- struct rq *target_rq;
|
|
|
|
|
|
+
|
|
|
|
+ raw_spin_lock_irq(&busiest_rq->lock);
|
|
|
|
+
|
|
|
|
+ /* make sure the requested cpu hasn't gone down in the meantime */
|
|
|
|
+ if (unlikely(busiest_cpu != smp_processor_id() ||
|
|
|
|
+ !busiest_rq->active_balance))
|
|
|
|
+ goto out_unlock;
|
|
|
|
|
|
/* Is there any task to move? */
|
|
/* Is there any task to move? */
|
|
if (busiest_rq->nr_running <= 1)
|
|
if (busiest_rq->nr_running <= 1)
|
|
- return;
|
|
|
|
-
|
|
|
|
- target_rq = cpu_rq(target_cpu);
|
|
|
|
|
|
+ goto out_unlock;
|
|
|
|
|
|
/*
|
|
/*
|
|
* This condition is "impossible", if it occurs
|
|
* This condition is "impossible", if it occurs
|
|
@@ -3112,8 +3056,6 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
|
|
|
|
|
|
/* move a task from busiest_rq to target_rq */
|
|
/* move a task from busiest_rq to target_rq */
|
|
double_lock_balance(busiest_rq, target_rq);
|
|
double_lock_balance(busiest_rq, target_rq);
|
|
- update_rq_clock(busiest_rq);
|
|
|
|
- update_rq_clock(target_rq);
|
|
|
|
|
|
|
|
/* Search for an sd spanning us and the target CPU. */
|
|
/* Search for an sd spanning us and the target CPU. */
|
|
for_each_domain(target_cpu, sd) {
|
|
for_each_domain(target_cpu, sd) {
|
|
@@ -3132,6 +3074,10 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
|
|
schedstat_inc(sd, alb_failed);
|
|
schedstat_inc(sd, alb_failed);
|
|
}
|
|
}
|
|
double_unlock_balance(busiest_rq, target_rq);
|
|
double_unlock_balance(busiest_rq, target_rq);
|
|
|
|
+out_unlock:
|
|
|
|
+ busiest_rq->active_balance = 0;
|
|
|
|
+ raw_spin_unlock_irq(&busiest_rq->lock);
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NO_HZ
|
|
#ifdef CONFIG_NO_HZ
|