|
@@ -1815,7 +1815,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static void calc_load_account_active(struct rq *this_rq);
|
|
|
+static void calc_load_account_idle(struct rq *this_rq);
|
|
|
static void update_sysctl(void);
|
|
|
static int get_update_sysctl_factor(void);
|
|
|
|
|
@@ -2950,6 +2950,61 @@ static unsigned long calc_load_update;
|
|
|
unsigned long avenrun[3];
|
|
|
EXPORT_SYMBOL(avenrun);
|
|
|
|
|
|
+static long calc_load_fold_active(struct rq *this_rq)
|
|
|
+{
|
|
|
+ long nr_active, delta = 0;
|
|
|
+
|
|
|
+ nr_active = this_rq->nr_running;
|
|
|
+ nr_active += (long) this_rq->nr_uninterruptible;
|
|
|
+
|
|
|
+ if (nr_active != this_rq->calc_load_active) {
|
|
|
+ delta = nr_active - this_rq->calc_load_active;
|
|
|
+ this_rq->calc_load_active = nr_active;
|
|
|
+ }
|
|
|
+
|
|
|
+ return delta;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_NO_HZ
|
|
|
+/*
|
|
|
+ * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
|
|
|
+ *
|
|
|
+ * When making the ILB scale, we should try to pull this in as well.
|
|
|
+ */
|
|
|
+static atomic_long_t calc_load_tasks_idle;
|
|
|
+
|
|
|
+static void calc_load_account_idle(struct rq *this_rq)
|
|
|
+{
|
|
|
+ long delta;
|
|
|
+
|
|
|
+ delta = calc_load_fold_active(this_rq);
|
|
|
+ if (delta)
|
|
|
+ atomic_long_add(delta, &calc_load_tasks_idle);
|
|
|
+}
|
|
|
+
|
|
|
+static long calc_load_fold_idle(void)
|
|
|
+{
|
|
|
+ long delta = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Its got a race, we don't care...
|
|
|
+ */
|
|
|
+ if (atomic_long_read(&calc_load_tasks_idle))
|
|
|
+ delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
|
|
|
+
|
|
|
+ return delta;
|
|
|
+}
|
|
|
+#else
|
|
|
+static void calc_load_account_idle(struct rq *this_rq)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline long calc_load_fold_idle(void)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* get_avenrun - get the load average array
|
|
|
* @loads: pointer to dest load array
|
|
@@ -2996,20 +3051,22 @@ void calc_global_load(void)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Either called from update_cpu_load() or from a cpu going idle
|
|
|
+ * Called from update_cpu_load() to periodically update this CPU's
|
|
|
+ * active count.
|
|
|
*/
|
|
|
static void calc_load_account_active(struct rq *this_rq)
|
|
|
{
|
|
|
- long nr_active, delta;
|
|
|
+ long delta;
|
|
|
|
|
|
- nr_active = this_rq->nr_running;
|
|
|
- nr_active += (long) this_rq->nr_uninterruptible;
|
|
|
+ if (time_before(jiffies, this_rq->calc_load_update))
|
|
|
+ return;
|
|
|
|
|
|
- if (nr_active != this_rq->calc_load_active) {
|
|
|
- delta = nr_active - this_rq->calc_load_active;
|
|
|
- this_rq->calc_load_active = nr_active;
|
|
|
+ delta = calc_load_fold_active(this_rq);
|
|
|
+ delta += calc_load_fold_idle();
|
|
|
+ if (delta)
|
|
|
atomic_long_add(delta, &calc_load_tasks);
|
|
|
- }
|
|
|
+
|
|
|
+ this_rq->calc_load_update += LOAD_FREQ;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3041,10 +3098,7 @@ static void update_cpu_load(struct rq *this_rq)
|
|
|
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
|
|
|
}
|
|
|
|
|
|
- if (time_after_eq(jiffies, this_rq->calc_load_update)) {
|
|
|
- this_rq->calc_load_update += LOAD_FREQ;
|
|
|
- calc_load_account_active(this_rq);
|
|
|
- }
|
|
|
+ calc_load_account_active(this_rq);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|