|
@@ -600,7 +600,6 @@ struct rq {
|
|
|
/* BKL stats */
|
|
|
unsigned int bkl_count;
|
|
|
#endif
|
|
|
- struct lock_class_key rq_lock_key;
|
|
|
};
|
|
|
|
|
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
|
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
|
|
|
} else {
|
|
|
if (rq1 < rq2) {
|
|
|
spin_lock(&rq1->lock);
|
|
|
- spin_lock(&rq2->lock);
|
|
|
+ spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
|
|
|
} else {
|
|
|
spin_lock(&rq2->lock);
|
|
|
- spin_lock(&rq1->lock);
|
|
|
+ spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
|
|
|
}
|
|
|
}
|
|
|
update_rq_clock(rq1);
|
|
@@ -2805,10 +2804,10 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
|
|
if (busiest < this_rq) {
|
|
|
spin_unlock(&this_rq->lock);
|
|
|
spin_lock(&busiest->lock);
|
|
|
- spin_lock(&this_rq->lock);
|
|
|
+ spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
|
|
|
ret = 1;
|
|
|
} else
|
|
|
- spin_lock(&busiest->lock);
|
|
|
+ spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
@@ -7998,7 +7997,6 @@ void __init sched_init(void)
|
|
|
|
|
|
rq = cpu_rq(i);
|
|
|
spin_lock_init(&rq->lock);
|
|
|
- lockdep_set_class(&rq->lock, &rq->rq_lock_key);
|
|
|
rq->nr_running = 0;
|
|
|
init_cfs_rq(&rq->cfs, rq);
|
|
|
init_rt_rq(&rq->rt, rq);
|