|
@@ -892,24 +892,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
|
|
#define WF_FORK 0x02 /* child wakeup after fork */
|
|
|
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
|
|
|
|
|
|
-static inline void update_load_add(struct load_weight *lw, unsigned long inc)
|
|
|
-{
|
|
|
- lw->weight += inc;
|
|
|
- lw->inv_weight = 0;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
|
|
|
-{
|
|
|
- lw->weight -= dec;
|
|
|
- lw->inv_weight = 0;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void update_load_set(struct load_weight *lw, unsigned long w)
|
|
|
-{
|
|
|
- lw->weight = w;
|
|
|
- lw->inv_weight = 0;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* To aid in avoiding the subversion of "niceness" due to uneven distribution
|
|
|
* of tasks with abnormal "nice" values across CPUs the contribution that
|