|
@@ -1952,6 +1952,7 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
|
|
__acquires(rq1->lock)
|
|
__acquires(rq1->lock)
|
|
__acquires(rq2->lock)
|
|
__acquires(rq2->lock)
|
|
{
|
|
{
|
|
|
|
+ BUG_ON(!irqs_disabled());
|
|
if (rq1 == rq2) {
|
|
if (rq1 == rq2) {
|
|
spin_lock(&rq1->lock);
|
|
spin_lock(&rq1->lock);
|
|
__acquire(rq2->lock); /* Fake it out ;) */
|
|
__acquire(rq2->lock); /* Fake it out ;) */
|
|
@@ -1991,6 +1992,11 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
|
__acquires(busiest->lock)
|
|
__acquires(busiest->lock)
|
|
__acquires(this_rq->lock)
|
|
__acquires(this_rq->lock)
|
|
{
|
|
{
|
|
|
|
+ if (unlikely(!irqs_disabled())) {
|
|
|
|
+ /* printk() doesn't work good under rq->lock */
|
|
|
|
+ spin_unlock(&this_rq->lock);
|
|
|
|
+ BUG_ON(1);
|
|
|
|
+ }
|
|
if (unlikely(!spin_trylock(&busiest->lock))) {
|
|
if (unlikely(!spin_trylock(&busiest->lock))) {
|
|
if (busiest < this_rq) {
|
|
if (busiest < this_rq) {
|
|
spin_unlock(&this_rq->lock);
|
|
spin_unlock(&this_rq->lock);
|
|
@@ -5067,7 +5073,10 @@ wait_to_die:
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
-/* Figure out where task on dead CPU should go, use force if neccessary. */
|
|
|
|
|
|
+/*
|
|
|
|
+ * Figure out where task on dead CPU should go, use force if neccessary.
|
|
|
|
+ * NOTE: interrupts should be disabled by the caller
|
|
|
|
+ */
|
|
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
|
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
@@ -5187,6 +5196,7 @@ void idle_task_exit(void)
|
|
mmdrop(mm);
|
|
mmdrop(mm);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* called under rq->lock with disabled interrupts */
|
|
static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
|
|
static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
|
|
{
|
|
{
|
|
struct rq *rq = cpu_rq(dead_cpu);
|
|
struct rq *rq = cpu_rq(dead_cpu);
|
|
@@ -5203,10 +5213,11 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
|
|
* Drop lock around migration; if someone else moves it,
|
|
* Drop lock around migration; if someone else moves it,
|
|
* that's OK. No task can be added to this CPU, so iteration is
|
|
* that's OK. No task can be added to this CPU, so iteration is
|
|
* fine.
|
|
* fine.
|
|
|
|
+ * NOTE: interrupts should be left disabled --dev@
|
|
*/
|
|
*/
|
|
- spin_unlock_irq(&rq->lock);
|
|
|
|
|
|
+ spin_unlock(&rq->lock);
|
|
move_task_off_dead_cpu(dead_cpu, p);
|
|
move_task_off_dead_cpu(dead_cpu, p);
|
|
- spin_lock_irq(&rq->lock);
|
|
|
|
|
|
+ spin_lock(&rq->lock);
|
|
|
|
|
|
put_task_struct(p);
|
|
put_task_struct(p);
|
|
}
|
|
}
|