|
@@ -1581,6 +1581,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
|
|
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
|
|
|
+ */
|
|
|
+static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
|
|
+ __releases(this_rq->lock)
|
|
|
+ __acquires(busiest->lock)
|
|
|
+ __acquires(this_rq->lock)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (unlikely(!irqs_disabled())) {
|
|
|
+ /* printk() doesn't work good under rq->lock */
|
|
|
+ spin_unlock(&this_rq->lock);
|
|
|
+ BUG_ON(1);
|
|
|
+ }
|
|
|
+ if (unlikely(!spin_trylock(&busiest->lock))) {
|
|
|
+ if (busiest < this_rq) {
|
|
|
+ spin_unlock(&this_rq->lock);
|
|
|
+ spin_lock(&busiest->lock);
|
|
|
+ spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
|
|
|
+ ret = 1;
|
|
|
+ } else
|
|
|
+ spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
|
|
+ __releases(busiest->lock)
|
|
|
+{
|
|
|
+ spin_unlock(&busiest->lock);
|
|
|
+ lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
|
|
|
+}
|
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
@@ -2780,40 +2813,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
|
|
|
__release(rq2->lock);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
|
|
|
- */
|
|
|
-static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
|
|
- __releases(this_rq->lock)
|
|
|
- __acquires(busiest->lock)
|
|
|
- __acquires(this_rq->lock)
|
|
|
-{
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- if (unlikely(!irqs_disabled())) {
|
|
|
- /* printk() doesn't work good under rq->lock */
|
|
|
- spin_unlock(&this_rq->lock);
|
|
|
- BUG_ON(1);
|
|
|
- }
|
|
|
- if (unlikely(!spin_trylock(&busiest->lock))) {
|
|
|
- if (busiest < this_rq) {
|
|
|
- spin_unlock(&this_rq->lock);
|
|
|
- spin_lock(&busiest->lock);
|
|
|
- spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
|
|
|
- ret = 1;
|
|
|
- } else
|
|
|
- spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
|
|
|
- }
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
|
|
- __releases(busiest->lock)
|
|
|
-{
|
|
|
- spin_unlock(&busiest->lock);
|
|
|
- lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* If dest_cpu is allowed for this process, migrate the task to it.
|
|
|
* This is accomplished by forcing the cpu_allowed mask to only
|