|
@@ -215,6 +215,8 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
|
|
|
|
|
const struct sched_class fair_sched_class;
|
|
|
|
|
|
+static unsigned long __read_mostly max_load_balance_interval = HZ/10;
|
|
|
+
|
|
|
/**************************************************************
|
|
|
* CFS operations on generic schedulable entities:
|
|
|
*/
|
|
@@ -3776,6 +3778,11 @@ void update_group_power(struct sched_domain *sd, int cpu)
|
|
|
struct sched_domain *child = sd->child;
|
|
|
struct sched_group *group, *sdg = sd->groups;
|
|
|
unsigned long power;
|
|
|
+ unsigned long interval;
|
|
|
+
|
|
|
+ interval = msecs_to_jiffies(sd->balance_interval);
|
|
|
+ interval = clamp(interval, 1UL, max_load_balance_interval);
|
|
|
+ sdg->sgp->next_update = jiffies + interval;
|
|
|
|
|
|
if (!child) {
|
|
|
update_cpu_power(sd, cpu);
|
|
@@ -3883,12 +3890,15 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
|
|
|
* domains. In the newly idle case, we will allow all the cpu's
|
|
|
* to do the newly idle load balance.
|
|
|
*/
|
|
|
- if (idle != CPU_NEWLY_IDLE && local_group) {
|
|
|
- if (balance_cpu != this_cpu) {
|
|
|
- *balance = 0;
|
|
|
- return;
|
|
|
- }
|
|
|
- update_group_power(sd, this_cpu);
|
|
|
+ if (local_group) {
|
|
|
+ if (idle != CPU_NEWLY_IDLE) {
|
|
|
+ if (balance_cpu != this_cpu) {
|
|
|
+ *balance = 0;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ update_group_power(sd, this_cpu);
|
|
|
+ } else if (time_after_eq(jiffies, group->sgp->next_update))
|
|
|
+ update_group_power(sd, this_cpu);
|
|
|
}
|
|
|
|
|
|
/* Adjust by relative CPU power of the group */
|
|
@@ -4945,8 +4955,6 @@ static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
|
|
|
|
|
|
static DEFINE_SPINLOCK(balancing);
|
|
|
|
|
|
-static unsigned long __read_mostly max_load_balance_interval = HZ/10;
|
|
|
-
|
|
|
/*
|
|
|
* Scale the max load_balance interval with the number of CPUs in the system.
|
|
|
* This trades load-balance latency on larger machines for less cross talk.
|