|
@@ -238,6 +238,7 @@ struct rq {
|
|
|
/* For active balancing */
|
|
|
int active_balance;
|
|
|
int push_cpu;
|
|
|
+ int cpu; /* cpu of this runqueue */
|
|
|
|
|
|
struct task_struct *migration_thread;
|
|
|
struct list_head migration_queue;
|
|
@@ -267,6 +268,15 @@ struct rq {
|
|
|
|
|
|
static DEFINE_PER_CPU(struct rq, runqueues);
|
|
|
|
|
|
+static inline int cpu_of(struct rq *rq)
|
|
|
+{
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ return rq->cpu;
|
|
|
+#else
|
|
|
+ return 0;
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
|
|
|
* See detach_destroy_domains: synchronize_sched for details.
|
|
@@ -2211,7 +2221,8 @@ out:
|
|
|
*/
|
|
|
static struct sched_group *
|
|
|
find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|
|
- unsigned long *imbalance, enum idle_type idle, int *sd_idle)
|
|
|
+ unsigned long *imbalance, enum idle_type idle, int *sd_idle,
|
|
|
+ cpumask_t *cpus)
|
|
|
{
|
|
|
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
|
|
|
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
|
|
@@ -2248,7 +2259,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|
|
sum_weighted_load = sum_nr_running = avg_load = 0;
|
|
|
|
|
|
for_each_cpu_mask(i, group->cpumask) {
|
|
|
- struct rq *rq = cpu_rq(i);
|
|
|
+ struct rq *rq;
|
|
|
+
|
|
|
+ if (!cpu_isset(i, *cpus))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ rq = cpu_rq(i);
|
|
|
|
|
|
if (*sd_idle && !idle_cpu(i))
|
|
|
*sd_idle = 0;
|
|
@@ -2466,13 +2482,17 @@ ret:
|
|
|
*/
|
|
|
static struct rq *
|
|
|
find_busiest_queue(struct sched_group *group, enum idle_type idle,
|
|
|
- unsigned long imbalance)
|
|
|
+ unsigned long imbalance, cpumask_t *cpus)
|
|
|
{
|
|
|
struct rq *busiest = NULL, *rq;
|
|
|
unsigned long max_load = 0;
|
|
|
int i;
|
|
|
|
|
|
for_each_cpu_mask(i, group->cpumask) {
|
|
|
+
|
|
|
+ if (!cpu_isset(i, *cpus))
|
|
|
+ continue;
|
|
|
+
|
|
|
rq = cpu_rq(i);
|
|
|
|
|
|
if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
|
|
@@ -2511,6 +2531,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
struct sched_group *group;
|
|
|
unsigned long imbalance;
|
|
|
struct rq *busiest;
|
|
|
+ cpumask_t cpus = CPU_MASK_ALL;
|
|
|
|
|
|
if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
|
|
|
!sched_smt_power_savings)
|
|
@@ -2518,13 +2539,15 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
|
|
|
schedstat_inc(sd, lb_cnt[idle]);
|
|
|
|
|
|
- group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle);
|
|
|
+redo:
|
|
|
+ group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
|
|
|
+ &cpus);
|
|
|
if (!group) {
|
|
|
schedstat_inc(sd, lb_nobusyg[idle]);
|
|
|
goto out_balanced;
|
|
|
}
|
|
|
|
|
|
- busiest = find_busiest_queue(group, idle, imbalance);
|
|
|
+ busiest = find_busiest_queue(group, idle, imbalance, &cpus);
|
|
|
if (!busiest) {
|
|
|
schedstat_inc(sd, lb_nobusyq[idle]);
|
|
|
goto out_balanced;
|
|
@@ -2549,8 +2572,12 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
double_rq_unlock(this_rq, busiest);
|
|
|
|
|
|
/* All tasks on this runqueue were pinned by CPU affinity */
|
|
|
- if (unlikely(all_pinned))
|
|
|
+ if (unlikely(all_pinned)) {
|
|
|
+ cpu_clear(cpu_of(busiest), cpus);
|
|
|
+ if (!cpus_empty(cpus))
|
|
|
+ goto redo;
|
|
|
goto out_balanced;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (!nr_moved) {
|
|
@@ -2639,18 +2666,22 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
|
|
unsigned long imbalance;
|
|
|
int nr_moved = 0;
|
|
|
int sd_idle = 0;
|
|
|
+ cpumask_t cpus = CPU_MASK_ALL;
|
|
|
|
|
|
if (sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
|
|
|
sd_idle = 1;
|
|
|
|
|
|
schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
|
|
|
- group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle);
|
|
|
+redo:
|
|
|
+ group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
|
|
|
+ &sd_idle, &cpus);
|
|
|
if (!group) {
|
|
|
schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
|
|
|
goto out_balanced;
|
|
|
}
|
|
|
|
|
|
- busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance);
|
|
|
+ busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance,
|
|
|
+ &cpus);
|
|
|
if (!busiest) {
|
|
|
schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
|
|
|
goto out_balanced;
|
|
@@ -2668,6 +2699,12 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
|
|
minus_1_or_zero(busiest->nr_running),
|
|
|
imbalance, sd, NEWLY_IDLE, NULL);
|
|
|
spin_unlock(&busiest->lock);
|
|
|
+
|
|
|
+ if (!nr_moved) {
|
|
|
+ cpu_clear(cpu_of(busiest), cpus);
|
|
|
+ if (!cpus_empty(cpus))
|
|
|
+ goto redo;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (!nr_moved) {
|
|
@@ -6747,6 +6784,7 @@ void __init sched_init(void)
|
|
|
rq->cpu_load[j] = 0;
|
|
|
rq->active_balance = 0;
|
|
|
rq->push_cpu = 0;
|
|
|
+ rq->cpu = i;
|
|
|
rq->migration_thread = NULL;
|
|
|
INIT_LIST_HEAD(&rq->migration_queue);
|
|
|
#endif
|