|
@@ -692,8 +692,6 @@ int tg_nop(struct task_group *tg, void *data)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-void update_cpu_load(struct rq *this_rq);
|
|
|
-
|
|
|
static void set_load_weight(struct task_struct *p)
|
|
|
{
|
|
|
int prio = p->static_prio - MAX_RT_PRIO;
|
|
@@ -2486,22 +2484,13 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
|
|
|
* scheduler tick (TICK_NSEC). With tickless idle this will not be called
|
|
|
* every tick. We fix it up based on jiffies.
|
|
|
*/
|
|
|
-void update_cpu_load(struct rq *this_rq)
|
|
|
+static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
|
|
|
+ unsigned long pending_updates)
|
|
|
{
|
|
|
- unsigned long this_load = this_rq->load.weight;
|
|
|
- unsigned long curr_jiffies = jiffies;
|
|
|
- unsigned long pending_updates;
|
|
|
int i, scale;
|
|
|
|
|
|
this_rq->nr_load_updates++;
|
|
|
|
|
|
- /* Avoid repeated calls on same jiffy, when moving in and out of idle */
|
|
|
- if (curr_jiffies == this_rq->last_load_update_tick)
|
|
|
- return;
|
|
|
-
|
|
|
- pending_updates = curr_jiffies - this_rq->last_load_update_tick;
|
|
|
- this_rq->last_load_update_tick = curr_jiffies;
|
|
|
-
|
|
|
/* Update our load: */
|
|
|
this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
|
|
|
for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
|
|
@@ -2526,9 +2515,45 @@ void update_cpu_load(struct rq *this_rq)
|
|
|
sched_avg_update(this_rq);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Called from nohz_idle_balance() to update the load ratings before doing the
|
|
|
+ * idle balance.
|
|
|
+ */
|
|
|
+void update_idle_cpu_load(struct rq *this_rq)
|
|
|
+{
|
|
|
+ unsigned long curr_jiffies = jiffies;
|
|
|
+ unsigned long load = this_rq->load.weight;
|
|
|
+ unsigned long pending_updates;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Bloody broken means of dealing with nohz, but better than nothing..
|
|
|
+ * jiffies is updated by one cpu, another cpu can drift wrt the jiffy
|
|
|
+ * update and see 0 difference the one time and 2 the next, even though
|
|
|
+ * we ticked at roughtly the same rate.
|
|
|
+ *
|
|
|
+ * Hence we only use this from nohz_idle_balance() and skip this
|
|
|
+ * nonsense when called from the scheduler_tick() since that's
|
|
|
+ * guaranteed a stable rate.
|
|
|
+ */
|
|
|
+ if (load || curr_jiffies == this_rq->last_load_update_tick)
|
|
|
+ return;
|
|
|
+
|
|
|
+ pending_updates = curr_jiffies - this_rq->last_load_update_tick;
|
|
|
+ this_rq->last_load_update_tick = curr_jiffies;
|
|
|
+
|
|
|
+ __update_cpu_load(this_rq, load, pending_updates);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Called from scheduler_tick()
|
|
|
+ */
|
|
|
static void update_cpu_load_active(struct rq *this_rq)
|
|
|
{
|
|
|
- update_cpu_load(this_rq);
|
|
|
+ /*
|
|
|
+ * See the mess in update_idle_cpu_load().
|
|
|
+ */
|
|
|
+ this_rq->last_load_update_tick = jiffies;
|
|
|
+ __update_cpu_load(this_rq, this_rq->load.weight, 1);
|
|
|
|
|
|
calc_load_account_active(this_rq);
|
|
|
}
|