|
@@ -89,6 +89,13 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
|
|
|
|
|
|
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
|
|
|
|
|
|
+/*
|
|
|
+ * The exponential sliding window over which load is averaged for shares
|
|
|
+ * distribution.
|
|
|
+ * (default: 10msec)
|
|
|
+ */
|
|
|
+unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
|
|
|
+
|
|
|
static const struct sched_class fair_sched_class;
|
|
|
|
|
|
/**************************************************************
|
|
@@ -688,7 +695,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
|
|
|
static void update_cfs_load(struct cfs_rq *cfs_rq)
|
|
|
{
|
|
|
- u64 period = sched_avg_period();
|
|
|
+ u64 period = sysctl_sched_shares_window;
|
|
|
u64 now, delta;
|
|
|
unsigned long load = cfs_rq->load.weight;
|
|
|
|