|
@@ -2812,6 +2812,13 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
|
|
|
+ __releases(busiest->lock)
|
|
|
|
+{
|
|
|
|
+ spin_unlock(&busiest->lock);
|
|
|
|
+ lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* If dest_cpu is allowed for this process, migrate the task to it.
|
|
* If dest_cpu is allowed for this process, migrate the task to it.
|
|
* This is accomplished by forcing the cpu_allowed mask to only
|
|
* This is accomplished by forcing the cpu_allowed mask to only
|
|
@@ -3636,7 +3643,7 @@ redo:
|
|
ld_moved = move_tasks(this_rq, this_cpu, busiest,
|
|
ld_moved = move_tasks(this_rq, this_cpu, busiest,
|
|
imbalance, sd, CPU_NEWLY_IDLE,
|
|
imbalance, sd, CPU_NEWLY_IDLE,
|
|
&all_pinned);
|
|
&all_pinned);
|
|
- spin_unlock(&busiest->lock);
|
|
|
|
|
|
+ double_unlock_balance(this_rq, busiest);
|
|
|
|
|
|
if (unlikely(all_pinned)) {
|
|
if (unlikely(all_pinned)) {
|
|
cpu_clear(cpu_of(busiest), *cpus);
|
|
cpu_clear(cpu_of(busiest), *cpus);
|
|
@@ -3751,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
|
|
else
|
|
else
|
|
schedstat_inc(sd, alb_failed);
|
|
schedstat_inc(sd, alb_failed);
|
|
}
|
|
}
|
|
- spin_unlock(&target_rq->lock);
|
|
|
|
|
|
+ double_unlock_balance(busiest_rq, target_rq);
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NO_HZ
|
|
#ifdef CONFIG_NO_HZ
|