|
@@ -2833,20 +2833,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
|
|
|
spin_unlock(&target_rq->lock);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * rebalance_tick will get called every timer tick, on every CPU.
|
|
|
- *
|
|
|
- * It checks each scheduling domain to see if it is due to be balanced,
|
|
|
- * and initiates a balancing operation if so.
|
|
|
- *
|
|
|
- * Balancing parameters are set up in arch_init_sched_domains.
|
|
|
- */
|
|
|
-
|
|
|
-static void
|
|
|
-rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
|
|
|
+static void update_load(struct rq *this_rq)
|
|
|
{
|
|
|
- unsigned long this_load, interval;
|
|
|
- struct sched_domain *sd;
|
|
|
+ unsigned long this_load;
|
|
|
int i, scale;
|
|
|
|
|
|
this_load = this_rq->raw_weighted_load;
|
|
@@ -2866,6 +2855,22 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
|
|
|
new_load += scale-1;
|
|
|
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * rebalance_tick will get called every timer tick, on every CPU.
|
|
|
+ *
|
|
|
+ * It checks each scheduling domain to see if it is due to be balanced,
|
|
|
+ * and initiates a balancing operation if so.
|
|
|
+ *
|
|
|
+ * Balancing parameters are set up in arch_init_sched_domains.
|
|
|
+ */
|
|
|
+
|
|
|
+static void
|
|
|
+rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
|
|
|
+{
|
|
|
+ unsigned long interval;
|
|
|
+ struct sched_domain *sd;
|
|
|
|
|
|
for_each_domain(this_cpu, sd) {
|
|
|
if (!(sd->flags & SD_LOAD_BALANCE))
|
|
@@ -2897,12 +2902,15 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
|
|
|
/*
|
|
|
* on UP we do not need to balance between CPUs:
|
|
|
*/
|
|
|
-static inline void rebalance_tick(int cpu, struct rq *rq, enum idle_type idle)
|
|
|
+static inline void rebalance_tick(int cpu, struct rq *rq)
|
|
|
{
|
|
|
}
|
|
|
static inline void idle_balance(int cpu, struct rq *rq)
|
|
|
{
|
|
|
}
|
|
|
+static inline void update_load(struct rq *this_rq)
|
|
|
+{
|
|
|
+}
|
|
|
#endif
|
|
|
|
|
|
static inline int wake_priority_sleeper(struct rq *rq)
|
|
@@ -3052,35 +3060,12 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
|
|
|
cpustat->steal = cputime64_add(cpustat->steal, tmp);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * This function gets called by the timer code, with HZ frequency.
|
|
|
- * We call it with interrupts disabled.
|
|
|
- *
|
|
|
- * It also gets called by the fork code, when changing the parent's
|
|
|
- * timeslices.
|
|
|
- */
|
|
|
-void scheduler_tick(void)
|
|
|
+static void task_running_tick(struct rq *rq, struct task_struct *p)
|
|
|
{
|
|
|
- unsigned long long now = sched_clock();
|
|
|
- struct task_struct *p = current;
|
|
|
- int cpu = smp_processor_id();
|
|
|
- struct rq *rq = cpu_rq(cpu);
|
|
|
-
|
|
|
- update_cpu_clock(p, rq, now);
|
|
|
-
|
|
|
- rq->timestamp_last_tick = now;
|
|
|
-
|
|
|
- if (p == rq->idle) {
|
|
|
- if (wake_priority_sleeper(rq))
|
|
|
- goto out;
|
|
|
- rebalance_tick(cpu, rq, SCHED_IDLE);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- /* Task might have expired already, but not scheduled off yet */
|
|
|
if (p->array != rq->active) {
|
|
|
+ /* Task has expired but was not scheduled yet */
|
|
|
set_tsk_need_resched(p);
|
|
|
- goto out;
|
|
|
+ return;
|
|
|
}
|
|
|
spin_lock(&rq->lock);
|
|
|
/*
|
|
@@ -3148,8 +3133,35 @@ void scheduler_tick(void)
|
|
|
}
|
|
|
out_unlock:
|
|
|
spin_unlock(&rq->lock);
|
|
|
-out:
|
|
|
- rebalance_tick(cpu, rq, NOT_IDLE);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * This function gets called by the timer code, with HZ frequency.
|
|
|
+ * We call it with interrupts disabled.
|
|
|
+ *
|
|
|
+ * It also gets called by the fork code, when changing the parent's
|
|
|
+ * timeslices.
|
|
|
+ */
|
|
|
+void scheduler_tick(void)
|
|
|
+{
|
|
|
+ unsigned long long now = sched_clock();
|
|
|
+ struct task_struct *p = current;
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
|
+ enum idle_type idle = NOT_IDLE;
|
|
|
+
|
|
|
+ update_cpu_clock(p, rq, now);
|
|
|
+
|
|
|
+ rq->timestamp_last_tick = now;
|
|
|
+
|
|
|
+ if (p == rq->idle) {
|
|
|
+ /* Task on the idle queue */
|
|
|
+ if (!wake_priority_sleeper(rq))
|
|
|
+ idle = SCHED_IDLE;
|
|
|
+ } else
|
|
|
+ task_running_tick(rq, p);
|
|
|
+ update_load(rq);
|
|
|
+ rebalance_tick(cpu, rq, idle);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SCHED_SMT
|