|
@@ -457,6 +457,7 @@ struct rq {
|
|
unsigned long nr_running;
|
|
unsigned long nr_running;
|
|
#define CPU_LOAD_IDX_MAX 5
|
|
#define CPU_LOAD_IDX_MAX 5
|
|
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
|
|
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
|
|
|
|
+ unsigned long last_load_update_tick;
|
|
#ifdef CONFIG_NO_HZ
|
|
#ifdef CONFIG_NO_HZ
|
|
u64 nohz_stamp;
|
|
u64 nohz_stamp;
|
|
unsigned char in_nohz_recently;
|
|
unsigned char in_nohz_recently;
|
|
@@ -1803,6 +1804,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
|
|
static void calc_load_account_idle(struct rq *this_rq);
|
|
static void calc_load_account_idle(struct rq *this_rq);
|
|
static void update_sysctl(void);
|
|
static void update_sysctl(void);
|
|
static int get_update_sysctl_factor(void);
|
|
static int get_update_sysctl_factor(void);
|
|
|
|
+static void update_cpu_load(struct rq *this_rq);
|
|
|
|
|
|
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
{
|
|
{
|
|
@@ -3049,24 +3051,103 @@ static void calc_load_account_active(struct rq *this_rq)
|
|
this_rq->calc_load_update += LOAD_FREQ;
|
|
this_rq->calc_load_update += LOAD_FREQ;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * The exact cpuload at various idx values, calculated at every tick would be
|
|
|
|
+ * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
|
|
|
|
+ *
|
|
|
|
+ * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
|
|
|
|
+ * on nth tick when cpu may be busy, then we have:
|
|
|
|
+ * load = ((2^idx - 1) / 2^idx)^(n-1) * load
|
|
|
|
+ * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
|
|
|
|
+ *
|
|
|
|
+ * decay_load_missed() below does efficient calculation of
|
|
|
|
+ * load = ((2^idx - 1) / 2^idx)^(n-1) * load
|
|
|
|
+ * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
|
|
|
|
+ *
|
|
|
|
+ * The calculation is approximated on a 128 point scale.
|
|
|
|
+ * degrade_zero_ticks is the number of ticks after which load at any
|
|
|
|
+ * particular idx is approximated to be zero.
|
|
|
|
+ * degrade_factor is a precomputed table, a row for each load idx.
|
|
|
|
+ * Each column corresponds to degradation factor for a power of two ticks,
|
|
|
|
+ * based on 128 point scale.
|
|
|
|
+ * Example:
|
|
|
|
+ * row 2, col 3 (=12) says that the degradation at load idx 2 after
|
|
|
|
+ * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
|
|
|
|
+ *
|
|
|
|
+ * With this power of 2 load factors, we can degrade the load n times
|
|
|
|
+ * by looking at 1 bits in n and doing as many mult/shift instead of
|
|
|
|
+ * n mult/shifts needed by the exact degradation.
|
|
|
|
+ */
|
|
|
|
+#define DEGRADE_SHIFT 7
|
|
|
|
+static const unsigned char
|
|
|
|
+ degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
|
|
|
|
+static const unsigned char
|
|
|
|
+ degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
|
|
|
|
+ {0, 0, 0, 0, 0, 0, 0, 0},
|
|
|
|
+ {64, 32, 8, 0, 0, 0, 0, 0},
|
|
|
|
+ {96, 72, 40, 12, 1, 0, 0},
|
|
|
|
+ {112, 98, 75, 43, 15, 1, 0},
|
|
|
|
+ {120, 112, 98, 76, 45, 16, 2} };
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Update cpu_load for any missed ticks, due to tickless idle. The backlog
|
|
|
|
+ * would be when CPU is idle and so we just decay the old load without
|
|
|
|
+ * adding any new load.
|
|
|
|
+ */
|
|
|
|
+static unsigned long
|
|
|
|
+decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
|
|
|
|
+{
|
|
|
|
+ int j = 0;
|
|
|
|
+
|
|
|
|
+ if (!missed_updates)
|
|
|
|
+ return load;
|
|
|
|
+
|
|
|
|
+ if (missed_updates >= degrade_zero_ticks[idx])
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (idx == 1)
|
|
|
|
+ return load >> missed_updates;
|
|
|
|
+
|
|
|
|
+ while (missed_updates) {
|
|
|
|
+ if (missed_updates % 2)
|
|
|
|
+ load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
|
|
|
|
+
|
|
|
|
+ missed_updates >>= 1;
|
|
|
|
+ j++;
|
|
|
|
+ }
|
|
|
|
+ return load;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Update rq->cpu_load[] statistics. This function is usually called every
|
|
* Update rq->cpu_load[] statistics. This function is usually called every
|
|
- * scheduler tick (TICK_NSEC).
|
|
|
|
|
|
+ * scheduler tick (TICK_NSEC). With tickless idle this will not be called
|
|
|
|
+ * every tick. We fix it up based on jiffies.
|
|
*/
|
|
*/
|
|
static void update_cpu_load(struct rq *this_rq)
|
|
static void update_cpu_load(struct rq *this_rq)
|
|
{
|
|
{
|
|
unsigned long this_load = this_rq->load.weight;
|
|
unsigned long this_load = this_rq->load.weight;
|
|
|
|
+ unsigned long curr_jiffies = jiffies;
|
|
|
|
+ unsigned long pending_updates;
|
|
int i, scale;
|
|
int i, scale;
|
|
|
|
|
|
this_rq->nr_load_updates++;
|
|
this_rq->nr_load_updates++;
|
|
|
|
|
|
|
|
+ /* Avoid repeated calls on same jiffy, when moving in and out of idle */
|
|
|
|
+ if (curr_jiffies == this_rq->last_load_update_tick)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ pending_updates = curr_jiffies - this_rq->last_load_update_tick;
|
|
|
|
+ this_rq->last_load_update_tick = curr_jiffies;
|
|
|
|
+
|
|
/* Update our load: */
|
|
/* Update our load: */
|
|
- for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
|
|
|
|
|
|
+ this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
|
|
|
|
+ for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
|
|
unsigned long old_load, new_load;
|
|
unsigned long old_load, new_load;
|
|
|
|
|
|
/* scale is effectively 1 << i now, and >> i divides by scale */
|
|
/* scale is effectively 1 << i now, and >> i divides by scale */
|
|
|
|
|
|
old_load = this_rq->cpu_load[i];
|
|
old_load = this_rq->cpu_load[i];
|
|
|
|
+ old_load = decay_load_missed(old_load, pending_updates - 1, i);
|
|
new_load = this_load;
|
|
new_load = this_load;
|
|
/*
|
|
/*
|
|
* Round up the averaging division if load is increasing. This
|
|
* Round up the averaging division if load is increasing. This
|
|
@@ -3074,9 +3155,15 @@ static void update_cpu_load(struct rq *this_rq)
|
|
* example.
|
|
* example.
|
|
*/
|
|
*/
|
|
if (new_load > old_load)
|
|
if (new_load > old_load)
|
|
- new_load += scale-1;
|
|
|
|
- this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
|
|
|
|
|
|
+ new_load += scale - 1;
|
|
|
|
+
|
|
|
|
+ this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
|
|
}
|
|
}
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void update_cpu_load_active(struct rq *this_rq)
|
|
|
|
+{
|
|
|
|
+ update_cpu_load(this_rq);
|
|
|
|
|
|
calc_load_account_active(this_rq);
|
|
calc_load_account_active(this_rq);
|
|
}
|
|
}
|
|
@@ -3464,7 +3551,7 @@ void scheduler_tick(void)
|
|
|
|
|
|
raw_spin_lock(&rq->lock);
|
|
raw_spin_lock(&rq->lock);
|
|
update_rq_clock(rq);
|
|
update_rq_clock(rq);
|
|
- update_cpu_load(rq);
|
|
|
|
|
|
+ update_cpu_load_active(rq);
|
|
curr->sched_class->task_tick(rq, curr, 0);
|
|
curr->sched_class->task_tick(rq, curr, 0);
|
|
raw_spin_unlock(&rq->lock);
|
|
raw_spin_unlock(&rq->lock);
|
|
|
|
|
|
@@ -7688,6 +7775,9 @@ void __init sched_init(void)
|
|
|
|
|
|
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
|
|
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
|
|
rq->cpu_load[j] = 0;
|
|
rq->cpu_load[j] = 0;
|
|
|
|
+
|
|
|
|
+ rq->last_load_update_tick = jiffies;
|
|
|
|
+
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
rq->sd = NULL;
|
|
rq->sd = NULL;
|
|
rq->rd = NULL;
|
|
rq->rd = NULL;
|