|
@@ -2816,6 +2816,39 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
|
|
/* Working cpumask for load_balance and load_balance_newidle. */
|
|
|
static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
|
|
|
|
|
|
+static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
|
|
|
+{
|
|
|
+ if (idle == CPU_NEWLY_IDLE) {
|
|
|
+ /*
|
|
|
+ * The only task running in a non-idle cpu can be moved to this
|
|
|
+ * cpu in an attempt to completely freeup the other CPU
|
|
|
+ * package.
|
|
|
+ *
|
|
|
+ * The package power saving logic comes from
|
|
|
+ * find_busiest_group(). If there are no imbalance, then
|
|
|
+ * f_b_g() will return NULL. However when sched_mc={1,2} then
|
|
|
+ * f_b_g() will select a group from which a running task may be
|
|
|
+ * pulled to this cpu in order to make the other package idle.
|
|
|
+ * If there is no opportunity to make a package idle and if
|
|
|
+ * there are no imbalance, then f_b_g() will return NULL and no
|
|
|
+ * action will be taken in load_balance_newidle().
|
|
|
+ *
|
|
|
+ * Under normal task pull operation due to imbalance, there
|
|
|
+ * will be more than one task in the source run queue and
|
|
|
+ * move_tasks() will succeed. ld_moved will be true and this
|
|
|
+ * active balance code will not be triggered.
|
|
|
+ */
|
|
|
+ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
|
|
+ !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
|
|
* tasks if there is an imbalance.
|
|
@@ -2902,8 +2935,7 @@ redo:
|
|
|
schedstat_inc(sd, lb_failed[idle]);
|
|
|
sd->nr_balance_failed++;
|
|
|
|
|
|
- if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
|
|
|
-
|
|
|
+ if (need_active_balance(sd, sd_idle, idle)) {
|
|
|
raw_spin_lock_irqsave(&busiest->lock, flags);
|
|
|
|
|
|
/* don't kick the migration_thread, if the curr
|
|
@@ -3049,66 +3081,37 @@ redo:
|
|
|
int active_balance = 0;
|
|
|
|
|
|
schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
|
|
|
- if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
|
|
|
- !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
|
|
|
- return -1;
|
|
|
-
|
|
|
- if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
|
|
|
- return -1;
|
|
|
+ sd->nr_balance_failed++;
|
|
|
|
|
|
- if (sd->nr_balance_failed++ < 2)
|
|
|
- return -1;
|
|
|
+ if (need_active_balance(sd, sd_idle, CPU_NEWLY_IDLE)) {
|
|
|
+ double_lock_balance(this_rq, busiest);
|
|
|
|
|
|
- /*
|
|
|
- * The only task running in a non-idle cpu can be moved to this
|
|
|
- * cpu in an attempt to completely freeup the other CPU
|
|
|
- * package. The same method used to move task in load_balance()
|
|
|
- * have been extended for load_balance_newidle() to speedup
|
|
|
- * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
|
|
|
- *
|
|
|
- * The package power saving logic comes from
|
|
|
- * find_busiest_group(). If there are no imbalance, then
|
|
|
- * f_b_g() will return NULL. However when sched_mc={1,2} then
|
|
|
- * f_b_g() will select a group from which a running task may be
|
|
|
- * pulled to this cpu in order to make the other package idle.
|
|
|
- * If there is no opportunity to make a package idle and if
|
|
|
- * there are no imbalance, then f_b_g() will return NULL and no
|
|
|
- * action will be taken in load_balance_newidle().
|
|
|
- *
|
|
|
- * Under normal task pull operation due to imbalance, there
|
|
|
- * will be more than one task in the source run queue and
|
|
|
- * move_tasks() will succeed. ld_moved will be true and this
|
|
|
- * active balance code will not be triggered.
|
|
|
- */
|
|
|
+ /*
|
|
|
+ * don't kick the migration_thread, if the curr
|
|
|
+ * task on busiest cpu can't be moved to this_cpu
|
|
|
+ */
|
|
|
+ if (!cpumask_test_cpu(this_cpu,
|
|
|
+ &busiest->curr->cpus_allowed)) {
|
|
|
+ double_unlock_balance(this_rq, busiest);
|
|
|
+ all_pinned = 1;
|
|
|
+ return ld_moved;
|
|
|
+ }
|
|
|
|
|
|
- /* Lock busiest in correct order while this_rq is held */
|
|
|
- double_lock_balance(this_rq, busiest);
|
|
|
+ if (!busiest->active_balance) {
|
|
|
+ busiest->active_balance = 1;
|
|
|
+ busiest->push_cpu = this_cpu;
|
|
|
+ active_balance = 1;
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * don't kick the migration_thread, if the curr
|
|
|
- * task on busiest cpu can't be moved to this_cpu
|
|
|
- */
|
|
|
- if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
|
|
|
double_unlock_balance(this_rq, busiest);
|
|
|
- all_pinned = 1;
|
|
|
- return ld_moved;
|
|
|
- }
|
|
|
-
|
|
|
- if (!busiest->active_balance) {
|
|
|
- busiest->active_balance = 1;
|
|
|
- busiest->push_cpu = this_cpu;
|
|
|
- active_balance = 1;
|
|
|
+ /*
|
|
|
+ * Should not call ttwu while holding a rq->lock
|
|
|
+ */
|
|
|
+ raw_spin_unlock(&this_rq->lock);
|
|
|
+ if (active_balance)
|
|
|
+ wake_up_process(busiest->migration_thread);
|
|
|
+ raw_spin_lock(&this_rq->lock);
|
|
|
}
|
|
|
-
|
|
|
- double_unlock_balance(this_rq, busiest);
|
|
|
- /*
|
|
|
- * Should not call ttwu while holding a rq->lock
|
|
|
- */
|
|
|
- raw_spin_unlock(&this_rq->lock);
|
|
|
- if (active_balance)
|
|
|
- wake_up_process(busiest->migration_thread);
|
|
|
- raw_spin_lock(&this_rq->lock);
|
|
|
-
|
|
|
} else
|
|
|
sd->nr_balance_failed = 0;
|
|
|
|