|
@@ -1908,6 +1908,55 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
|
|
|
dec_nr_running(rq);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
|
|
+
|
|
|
+static DEFINE_PER_CPU(u64, cpu_hardirq_time);
|
|
|
+static DEFINE_PER_CPU(u64, cpu_softirq_time);
|
|
|
+
|
|
|
+static DEFINE_PER_CPU(u64, irq_start_time);
|
|
|
+static int sched_clock_irqtime;
|
|
|
+
|
|
|
+void enable_sched_clock_irqtime(void)
|
|
|
+{
|
|
|
+ sched_clock_irqtime = 1;
|
|
|
+}
|
|
|
+
|
|
|
+void disable_sched_clock_irqtime(void)
|
|
|
+{
|
|
|
+ sched_clock_irqtime = 0;
|
|
|
+}
|
|
|
+
|
|
|
+void account_system_vtime(struct task_struct *curr)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int cpu;
|
|
|
+ u64 now, delta;
|
|
|
+
|
|
|
+ if (!sched_clock_irqtime)
|
|
|
+ return;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+
|
|
|
+ now = sched_clock();
|
|
|
+ cpu = smp_processor_id();
|
|
|
+ delta = now - per_cpu(irq_start_time, cpu);
|
|
|
+ per_cpu(irq_start_time, cpu) = now;
|
|
|
+ /*
|
|
|
+ * We do not account for softirq time from ksoftirqd here.
|
|
|
+ * We want to continue accounting softirq time to ksoftirqd thread
|
|
|
+ * in that case, so as not to confuse scheduler with a special task
|
|
|
+ * that do not consume any time, but still wants to run.
|
|
|
+ */
|
|
|
+ if (hardirq_count())
|
|
|
+ per_cpu(cpu_hardirq_time, cpu) += delta;
|
|
|
+ else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
|
|
|
+ per_cpu(cpu_softirq_time, cpu) += delta;
|
|
|
+
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
+
|
|
|
+#endif
|
|
|
+
|
|
|
#include "sched_idletask.c"
|
|
|
#include "sched_fair.c"
|
|
|
#include "sched_rt.c"
|