|
@@ -808,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
|
|
|
|
|
|
/*
|
|
/*
|
|
* ratelimit for updating the group shares.
|
|
* ratelimit for updating the group shares.
|
|
- * default: 0.5ms
|
|
|
|
|
|
+ * default: 0.25ms
|
|
*/
|
|
*/
|
|
-const_debug unsigned int sysctl_sched_shares_ratelimit = 500000;
|
|
|
|
|
|
+unsigned int sysctl_sched_shares_ratelimit = 250000;
|
|
|
|
|
|
/*
|
|
/*
|
|
* period over which we measure -rt task cpu usage in us.
|
|
* period over which we measure -rt task cpu usage in us.
|
|
@@ -5786,6 +5786,8 @@ static inline void sched_init_granularity(void)
|
|
sysctl_sched_latency = limit;
|
|
sysctl_sched_latency = limit;
|
|
|
|
|
|
sysctl_sched_wakeup_granularity *= factor;
|
|
sysctl_sched_wakeup_granularity *= factor;
|
|
|
|
+
|
|
|
|
+ sysctl_sched_shares_ratelimit *= factor;
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
@@ -8508,8 +8510,8 @@ struct task_group *sched_create_group(struct task_group *parent)
|
|
WARN_ON(!parent); /* root should already exist */
|
|
WARN_ON(!parent); /* root should already exist */
|
|
|
|
|
|
tg->parent = parent;
|
|
tg->parent = parent;
|
|
- list_add_rcu(&tg->siblings, &parent->children);
|
|
|
|
INIT_LIST_HEAD(&tg->children);
|
|
INIT_LIST_HEAD(&tg->children);
|
|
|
|
+ list_add_rcu(&tg->siblings, &parent->children);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
|
|
|
|
|
return tg;
|
|
return tg;
|