|
@@ -2253,6 +2253,20 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
|
|
struct rq *rq;
|
|
|
u64 ns = 0;
|
|
|
|
|
|
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
|
|
|
+ /*
|
|
|
+ * 64-bit doesn't need locks to atomically read a 64bit value.
|
|
|
+ * So we have a optimization chance when the task's delta_exec is 0.
|
|
|
+ * Reading ->on_cpu is racy, but this is ok.
|
|
|
+ *
|
|
|
+ * If we race with it leaving cpu, we'll take a lock. So we're correct.
|
|
|
+ * If we race with it entering cpu, unaccounted time is 0. This is
|
|
|
+ * indistinguishable from the read occurring a few cycles earlier.
|
|
|
+ */
|
|
|
+ if (!p->on_cpu)
|
|
|
+ return p->se.sum_exec_runtime;
|
|
|
+#endif
|
|
|
+
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
|
|
|
task_rq_unlock(rq, p, &flags);
|