|
@@ -1075,7 +1075,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
|
|
|
* to gain a more accurate current total weight. See
|
|
|
* update_cfs_rq_load_contribution().
|
|
|
*/
|
|
|
- tg_weight = atomic64_read(&tg->load_avg);
|
|
|
+ tg_weight = atomic_long_read(&tg->load_avg);
|
|
|
tg_weight -= cfs_rq->tg_load_contrib;
|
|
|
tg_weight += cfs_rq->load.weight;
|
|
|
|
|
@@ -1356,13 +1356,13 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
|
|
|
int force_update)
|
|
|
{
|
|
|
struct task_group *tg = cfs_rq->tg;
|
|
|
- s64 tg_contrib;
|
|
|
+ long tg_contrib;
|
|
|
|
|
|
tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
|
|
|
tg_contrib -= cfs_rq->tg_load_contrib;
|
|
|
|
|
|
- if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
|
|
|
- atomic64_add(tg_contrib, &tg->load_avg);
|
|
|
+ if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
|
|
|
+ atomic_long_add(tg_contrib, &tg->load_avg);
|
|
|
cfs_rq->tg_load_contrib += tg_contrib;
|
|
|
}
|
|
|
}
|
|
@@ -1397,8 +1397,8 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
|
|
|
u64 contrib;
|
|
|
|
|
|
contrib = cfs_rq->tg_load_contrib * tg->shares;
|
|
|
- se->avg.load_avg_contrib = div64_u64(contrib,
|
|
|
- atomic64_read(&tg->load_avg) + 1);
|
|
|
+ se->avg.load_avg_contrib = div_u64(contrib,
|
|
|
+ atomic_long_read(&tg->load_avg) + 1);
|
|
|
|
|
|
/*
|
|
|
* For group entities we need to compute a correction term in the case
|