|
@@ -2546,8 +2546,6 @@ static inline unsigned long minus_1_or_zero(unsigned long n)
|
|
/*
|
|
/*
|
|
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
|
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
|
* tasks if there is an imbalance.
|
|
* tasks if there is an imbalance.
|
|
- *
|
|
|
|
- * Called with this_rq unlocked.
|
|
|
|
*/
|
|
*/
|
|
static int load_balance(int this_cpu, struct rq *this_rq,
|
|
static int load_balance(int this_cpu, struct rq *this_rq,
|
|
struct sched_domain *sd, enum idle_type idle)
|
|
struct sched_domain *sd, enum idle_type idle)
|
|
@@ -2557,6 +2555,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
unsigned long imbalance;
|
|
unsigned long imbalance;
|
|
struct rq *busiest;
|
|
struct rq *busiest;
|
|
cpumask_t cpus = CPU_MASK_ALL;
|
|
cpumask_t cpus = CPU_MASK_ALL;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
/*
|
|
/*
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
* When power savings policy is enabled for the parent domain, idle
|
|
@@ -2596,11 +2595,13 @@ redo:
|
|
* still unbalanced. nr_moved simply stays zero, so it is
|
|
* still unbalanced. nr_moved simply stays zero, so it is
|
|
* correctly treated as an imbalance.
|
|
* correctly treated as an imbalance.
|
|
*/
|
|
*/
|
|
|
|
+ local_irq_save(flags);
|
|
double_rq_lock(this_rq, busiest);
|
|
double_rq_lock(this_rq, busiest);
|
|
nr_moved = move_tasks(this_rq, this_cpu, busiest,
|
|
nr_moved = move_tasks(this_rq, this_cpu, busiest,
|
|
minus_1_or_zero(busiest->nr_running),
|
|
minus_1_or_zero(busiest->nr_running),
|
|
imbalance, sd, idle, &all_pinned);
|
|
imbalance, sd, idle, &all_pinned);
|
|
double_rq_unlock(this_rq, busiest);
|
|
double_rq_unlock(this_rq, busiest);
|
|
|
|
+ local_irq_restore(flags);
|
|
|
|
|
|
/* All tasks on this runqueue were pinned by CPU affinity */
|
|
/* All tasks on this runqueue were pinned by CPU affinity */
|
|
if (unlikely(all_pinned)) {
|
|
if (unlikely(all_pinned)) {
|
|
@@ -2617,13 +2618,13 @@ redo:
|
|
|
|
|
|
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
|
|
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
|
|
|
|
|
|
- spin_lock(&busiest->lock);
|
|
|
|
|
|
+ spin_lock_irqsave(&busiest->lock, flags);
|
|
|
|
|
|
/* don't kick the migration_thread, if the curr
|
|
/* don't kick the migration_thread, if the curr
|
|
* task on busiest cpu can't be moved to this_cpu
|
|
* task on busiest cpu can't be moved to this_cpu
|
|
*/
|
|
*/
|
|
if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
|
|
if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
|
|
- spin_unlock(&busiest->lock);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&busiest->lock, flags);
|
|
all_pinned = 1;
|
|
all_pinned = 1;
|
|
goto out_one_pinned;
|
|
goto out_one_pinned;
|
|
}
|
|
}
|
|
@@ -2633,7 +2634,7 @@ redo:
|
|
busiest->push_cpu = this_cpu;
|
|
busiest->push_cpu = this_cpu;
|
|
active_balance = 1;
|
|
active_balance = 1;
|
|
}
|
|
}
|
|
- spin_unlock(&busiest->lock);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&busiest->lock, flags);
|
|
if (active_balance)
|
|
if (active_balance)
|
|
wake_up_process(busiest->migration_thread);
|
|
wake_up_process(busiest->migration_thread);
|
|
|
|
|