|
@@ -2984,6 +2984,16 @@ next:
|
|
pulled++;
|
|
pulled++;
|
|
rem_load_move -= p->se.load.weight;
|
|
rem_load_move -= p->se.load.weight;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT
|
|
|
|
+ /*
|
|
|
|
+ * NEWIDLE balancing is a source of latency, so preemptible kernels
|
|
|
|
+ * will stop after the first task is pulled to minimize the critical
|
|
|
|
+ * section.
|
|
|
|
+ */
|
|
|
|
+ if (idle == CPU_NEWLY_IDLE)
|
|
|
|
+ goto out;
|
|
|
|
+#endif
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* We only want to steal up to the prescribed amount of weighted load.
|
|
* We only want to steal up to the prescribed amount of weighted load.
|
|
*/
|
|
*/
|
|
@@ -3030,9 +3040,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
sd, idle, all_pinned, &this_best_prio);
|
|
sd, idle, all_pinned, &this_best_prio);
|
|
class = class->next;
|
|
class = class->next;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_PREEMPT
|
|
|
|
+ /*
|
|
|
|
+ * NEWIDLE balancing is a source of latency, so preemptible
|
|
|
|
+ * kernels will stop after the first task is pulled to minimize
|
|
|
|
+ * the critical section.
|
|
|
|
+ */
|
|
if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
|
|
if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
|
|
break;
|
|
break;
|
|
-
|
|
|
|
|
|
+#endif
|
|
} while (class && max_load_move > total_load_moved);
|
|
} while (class && max_load_move > total_load_moved);
|
|
|
|
|
|
return total_load_moved > 0;
|
|
return total_load_moved > 0;
|