|
@@ -191,12 +191,12 @@ struct task_group init_task_group = {
|
|
};
|
|
};
|
|
|
|
|
|
#ifdef CONFIG_FAIR_USER_SCHED
|
|
#ifdef CONFIG_FAIR_USER_SCHED
|
|
-# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD
|
|
|
|
|
|
+# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD
|
|
#else
|
|
#else
|
|
-# define INIT_TASK_GRP_LOAD NICE_0_LOAD
|
|
|
|
|
|
+# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-static int init_task_group_load = INIT_TASK_GRP_LOAD;
|
|
|
|
|
|
+static int init_task_group_load = INIT_TASK_GROUP_LOAD;
|
|
|
|
|
|
/* return group to which a task belongs */
|
|
/* return group to which a task belongs */
|
|
static inline struct task_group *task_group(struct task_struct *p)
|
|
static inline struct task_group *task_group(struct task_struct *p)
|
|
@@ -881,21 +881,6 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
|
|
|
|
|
|
#define sched_class_highest (&rt_sched_class)
|
|
#define sched_class_highest (&rt_sched_class)
|
|
|
|
|
|
-/*
|
|
|
|
- * Update delta_exec, delta_fair fields for rq.
|
|
|
|
- *
|
|
|
|
- * delta_fair clock advances at a rate inversely proportional to
|
|
|
|
- * total load (rq->load.weight) on the runqueue, while
|
|
|
|
- * delta_exec advances at the same rate as wall-clock (provided
|
|
|
|
- * cpu is not idle).
|
|
|
|
- *
|
|
|
|
- * delta_exec / delta_fair is a measure of the (smoothened) load on this
|
|
|
|
- * runqueue over any given interval. This (smoothened) load is used
|
|
|
|
- * during load balance.
|
|
|
|
- *
|
|
|
|
- * This function is called /before/ updating rq->load
|
|
|
|
- * and when switching tasks.
|
|
|
|
- */
|
|
|
|
static inline void inc_load(struct rq *rq, const struct task_struct *p)
|
|
static inline void inc_load(struct rq *rq, const struct task_struct *p)
|
|
{
|
|
{
|
|
update_load_add(&rq->load, p->se.load.weight);
|
|
update_load_add(&rq->load, p->se.load.weight);
|