|
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq)
|
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
|
#define task_rq(p) cpu_rq(task_cpu(p))
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
|
|
|
|
+ * clock constructed from sched_clock():
|
|
|
|
+ */
|
|
|
|
+unsigned long long cpu_clock(int cpu)
|
|
|
|
+{
|
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
|
|
+ unsigned long long now;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&rq->lock, flags);
|
|
|
|
+ now = rq_clock(rq);
|
|
|
|
+ spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
|
+
|
|
|
|
+ return now;
|
|
|
|
+}
|
|
|
|
+
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
/* Change a task's ->cfs_rq if it moves across CPUs */
|
|
/* Change a task's ->cfs_rq if it moves across CPUs */
|
|
static inline void set_task_cfs_rq(struct task_struct *p)
|
|
static inline void set_task_cfs_rq(struct task_struct *p)
|
|
@@ -2235,7 +2252,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|
|
|
|
|
rq = cpu_rq(i);
|
|
rq = cpu_rq(i);
|
|
|
|
|
|
- if (*sd_idle && !idle_cpu(i))
|
|
|
|
|
|
+ if (*sd_idle && rq->nr_running)
|
|
*sd_idle = 0;
|
|
*sd_idle = 0;
|
|
|
|
|
|
/* Bias balancing toward cpus of our domain */
|
|
/* Bias balancing toward cpus of our domain */
|
|
@@ -2257,9 +2274,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|
/*
|
|
/*
|
|
* First idle cpu or the first cpu(busiest) in this sched group
|
|
* First idle cpu or the first cpu(busiest) in this sched group
|
|
* is eligible for doing load balancing at this and above
|
|
* is eligible for doing load balancing at this and above
|
|
- * domains.
|
|
|
|
|
|
+ * domains. In the newly idle case, we will allow all the cpu's
|
|
|
|
+ * to do the newly idle load balance.
|
|
*/
|
|
*/
|
|
- if (local_group && balance_cpu != this_cpu && balance) {
|
|
|
|
|
|
+ if (idle != CPU_NEWLY_IDLE && local_group &&
|
|
|
|
+ balance_cpu != this_cpu && balance) {
|
|
*balance = 0;
|
|
*balance = 0;
|
|
goto ret;
|
|
goto ret;
|
|
}
|
|
}
|
|
@@ -2677,6 +2696,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
|
unsigned long imbalance;
|
|
unsigned long imbalance;
|
|
int nr_moved = 0;
|
|
int nr_moved = 0;
|
|
int sd_idle = 0;
|
|
int sd_idle = 0;
|
|
|
|
+ int all_pinned = 0;
|
|
cpumask_t cpus = CPU_MASK_ALL;
|
|
cpumask_t cpus = CPU_MASK_ALL;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2715,10 +2735,11 @@ redo:
|
|
double_lock_balance(this_rq, busiest);
|
|
double_lock_balance(this_rq, busiest);
|
|
nr_moved = move_tasks(this_rq, this_cpu, busiest,
|
|
nr_moved = move_tasks(this_rq, this_cpu, busiest,
|
|
minus_1_or_zero(busiest->nr_running),
|
|
minus_1_or_zero(busiest->nr_running),
|
|
- imbalance, sd, CPU_NEWLY_IDLE, NULL);
|
|
|
|
|
|
+ imbalance, sd, CPU_NEWLY_IDLE,
|
|
|
|
+ &all_pinned);
|
|
spin_unlock(&busiest->lock);
|
|
spin_unlock(&busiest->lock);
|
|
|
|
|
|
- if (!nr_moved) {
|
|
|
|
|
|
+ if (unlikely(all_pinned)) {
|
|
cpu_clear(cpu_of(busiest), cpus);
|
|
cpu_clear(cpu_of(busiest), cpus);
|
|
if (!cpus_empty(cpus))
|
|
if (!cpus_empty(cpus))
|
|
goto redo;
|
|
goto redo;
|