|
@@ -2326,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
|
|
|
int cpu = smp_processor_id();
|
|
|
int prev_cpu = task_cpu(p);
|
|
|
struct sched_domain *sd;
|
|
|
- int i;
|
|
|
+ struct sched_group *sg;
|
|
|
+ int i, smt = 0;
|
|
|
|
|
|
/*
|
|
|
* If the task is going to be woken-up on this cpu and if it is
|
|
@@ -2346,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target)
|
|
|
* Otherwise, iterate the domains and find an elegible idle cpu.
|
|
|
*/
|
|
|
rcu_read_lock();
|
|
|
+again:
|
|
|
for_each_domain(target, sd) {
|
|
|
- if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
|
|
|
- break;
|
|
|
+ if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
|
|
|
+ continue;
|
|
|
|
|
|
- for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
|
|
|
- if (idle_cpu(i)) {
|
|
|
- target = i;
|
|
|
- break;
|
|
|
+ if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
|
|
|
+ if (!smt) {
|
|
|
+ smt = 1;
|
|
|
+ goto again;
|
|
|
}
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Lets stop looking for an idle sibling when we reached
|
|
|
- * the domain that spans the current cpu and prev_cpu.
|
|
|
- */
|
|
|
- if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
|
|
|
- cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
|
|
|
- break;
|
|
|
+ sg = sd->groups;
|
|
|
+ do {
|
|
|
+ if (!cpumask_intersects(sched_group_cpus(sg),
|
|
|
+ tsk_cpus_allowed(p)))
|
|
|
+ goto next;
|
|
|
+
|
|
|
+ for_each_cpu(i, sched_group_cpus(sg)) {
|
|
|
+ if (!idle_cpu(i))
|
|
|
+ goto next;
|
|
|
+ }
|
|
|
+
|
|
|
+ target = cpumask_first_and(sched_group_cpus(sg),
|
|
|
+ tsk_cpus_allowed(p));
|
|
|
+ goto done;
|
|
|
+next:
|
|
|
+ sg = sg->next;
|
|
|
+ } while (sg != sd->groups);
|
|
|
}
|
|
|
+done:
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
return target;
|