فهرست منبع

sched: Optimize task_rq_lock()

Now that we hold the rq->lock over set_task_cpu() again, we can do
away with most of the TASK_WAKING checks and reduce them again to
set_cpus_allowed_ptr().

Removes some conditionals from scheduling hot-paths.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Peter Zijlstra 15 سال پیش
والد
کامیت
65cc8e4859
1فایلهای تغییر یافته به همراه15 افزوده شده و 8 حذف شده
  1. 15 8
      kernel/sched.c

+ 15 - 8
kernel/sched.c

@@ -914,8 +914,8 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 
 
 /*
 /*
- * Check whether the task is waking, we use this to synchronize against
- * ttwu() so that task_cpu() reports a stable number.
+ * Check whether the task is waking, we use this to synchronize ->cpus_allowed
+ * against ttwu().
  */
  */
 static inline int task_is_waking(struct task_struct *p)
 static inline int task_is_waking(struct task_struct *p)
 {
 {
@@ -932,11 +932,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
 	struct rq *rq;
 	struct rq *rq;
 
 
 	for (;;) {
 	for (;;) {
-		while (task_is_waking(p))
-			cpu_relax();
 		rq = task_rq(p);
 		rq = task_rq(p);
 		raw_spin_lock(&rq->lock);
 		raw_spin_lock(&rq->lock);
-		if (likely(rq == task_rq(p) && !task_is_waking(p)))
+		if (likely(rq == task_rq(p)))
 			return rq;
 			return rq;
 		raw_spin_unlock(&rq->lock);
 		raw_spin_unlock(&rq->lock);
 	}
 	}
@@ -953,12 +951,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
 	struct rq *rq;
 	struct rq *rq;
 
 
 	for (;;) {
 	for (;;) {
-		while (task_is_waking(p))
-			cpu_relax();
 		local_irq_save(*flags);
 		local_irq_save(*flags);
 		rq = task_rq(p);
 		rq = task_rq(p);
 		raw_spin_lock(&rq->lock);
 		raw_spin_lock(&rq->lock);
-		if (likely(rq == task_rq(p) && !task_is_waking(p)))
+		if (likely(rq == task_rq(p)))
 			return rq;
 			return rq;
 		raw_spin_unlock_irqrestore(&rq->lock, *flags);
 		raw_spin_unlock_irqrestore(&rq->lock, *flags);
 	}
 	}
@@ -5262,7 +5258,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 	struct rq *rq;
 	struct rq *rq;
 	int ret = 0;
 	int ret = 0;
 
 
+	/*
+	 * Serialize against TASK_WAKING so that ttwu() and wunt() can
+	 * drop the rq->lock and still rely on ->cpus_allowed.
+	 */
+again:
+	while (task_is_waking(p))
+		cpu_relax();
 	rq = task_rq_lock(p, &flags);
 	rq = task_rq_lock(p, &flags);
+	if (task_is_waking(p)) {
+		task_rq_unlock(rq, &flags);
+		goto again;
+	}
 
 
 	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
 	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
 		ret = -EINVAL;
 		ret = -EINVAL;