|
@@ -519,6 +519,10 @@ struct rq {
|
|
|
u64 avg_idle;
|
|
|
#endif
|
|
|
|
|
|
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
|
|
+ u64 prev_irq_time;
|
|
|
+#endif
|
|
|
+
|
|
|
/* calc_load related fields */
|
|
|
unsigned long calc_load_update;
|
|
|
long calc_load_active;
|
|
@@ -643,6 +647,7 @@ static inline struct task_group *task_group(struct task_struct *p)
|
|
|
#endif /* CONFIG_CGROUP_SCHED */
|
|
|
|
|
|
static u64 irq_time_cpu(int cpu);
|
|
|
+static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
|
|
|
|
|
|
inline void update_rq_clock(struct rq *rq)
|
|
|
{
|
|
@@ -654,6 +659,8 @@ inline void update_rq_clock(struct rq *rq)
|
|
|
irq_time = irq_time_cpu(cpu);
|
|
|
if (rq->clock - irq_time > rq->clock_task)
|
|
|
rq->clock_task = rq->clock - irq_time;
|
|
|
+
|
|
|
+ sched_irq_time_avg_update(rq, irq_time);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1985,6 +1992,15 @@ void account_system_vtime(struct task_struct *curr)
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
|
|
|
+{
|
|
|
+ if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
|
|
|
+ u64 delta_irq = curr_irq_time - rq->prev_irq_time;
|
|
|
+ rq->prev_irq_time = curr_irq_time;
|
|
|
+ sched_rt_avg_update(rq, delta_irq);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
#else
|
|
|
|
|
|
static u64 irq_time_cpu(int cpu)
|
|
@@ -1992,6 +2008,8 @@ static u64 irq_time_cpu(int cpu)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
|
|
|
+
|
|
|
#endif
|
|
|
|
|
|
#include "sched_idletask.c"
|