|
@@ -388,82 +388,10 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
|
|
|
struct rq *rq) {}
|
|
|
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
|
|
|
|
|
|
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
|
|
-/*
|
|
|
- * Account a single tick of cpu time.
|
|
|
- * @p: the process that the cpu time gets accounted to
|
|
|
- * @user_tick: indicates if the tick is a user or a system tick
|
|
|
- */
|
|
|
-void account_process_tick(struct task_struct *p, int user_tick)
|
|
|
-{
|
|
|
- cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
|
|
- struct rq *rq = this_rq();
|
|
|
-
|
|
|
- if (vtime_accounting_enabled())
|
|
|
- return;
|
|
|
-
|
|
|
- if (sched_clock_irqtime) {
|
|
|
- irqtime_account_process_tick(p, user_tick, rq);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (steal_account_process_tick())
|
|
|
- return;
|
|
|
-
|
|
|
- if (user_tick)
|
|
|
- account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
|
|
- else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
|
|
|
- account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
|
|
|
- one_jiffy_scaled);
|
|
|
- else
|
|
|
- account_idle_time(cputime_one_jiffy);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Account multiple ticks of steal time.
|
|
|
- * @p: the process from which the cpu time has been stolen
|
|
|
- * @ticks: number of stolen ticks
|
|
|
- */
|
|
|
-void account_steal_ticks(unsigned long ticks)
|
|
|
-{
|
|
|
- account_steal_time(jiffies_to_cputime(ticks));
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Account multiple ticks of idle time.
|
|
|
- * @ticks: number of stolen ticks
|
|
|
- */
|
|
|
-void account_idle_ticks(unsigned long ticks)
|
|
|
-{
|
|
|
-
|
|
|
- if (sched_clock_irqtime) {
|
|
|
- irqtime_account_idle_ticks(ticks);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- account_idle_time(jiffies_to_cputime(ticks));
|
|
|
-}
|
|
|
-#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
|
|
-
|
|
|
/*
|
|
|
* Use precise platform statistics if available:
|
|
|
*/
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
|
|
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
|
|
-{
|
|
|
- *ut = p->utime;
|
|
|
- *st = p->stime;
|
|
|
-}
|
|
|
-
|
|
|
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
|
|
-{
|
|
|
- struct task_cputime cputime;
|
|
|
-
|
|
|
- thread_group_cputime(p, &cputime);
|
|
|
-
|
|
|
- *ut = cputime.utime;
|
|
|
- *st = cputime.stime;
|
|
|
-}
|
|
|
|
|
|
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
|
|
|
void vtime_task_switch(struct task_struct *prev)
|
|
@@ -518,8 +446,80 @@ void vtime_account_irq_enter(struct task_struct *tsk)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
|
|
|
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
|
|
|
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
|
|
|
+
|
|
|
+
|
|
|
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
|
|
+void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
|
|
+{
|
|
|
+ *ut = p->utime;
|
|
|
+ *st = p->stime;
|
|
|
+}
|
|
|
|
|
|
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
|
|
+void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
|
|
+{
|
|
|
+ struct task_cputime cputime;
|
|
|
+
|
|
|
+ thread_group_cputime(p, &cputime);
|
|
|
+
|
|
|
+ *ut = cputime.utime;
|
|
|
+ *st = cputime.stime;
|
|
|
+}
|
|
|
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
|
|
+/*
|
|
|
+ * Account a single tick of cpu time.
|
|
|
+ * @p: the process that the cpu time gets accounted to
|
|
|
+ * @user_tick: indicates if the tick is a user or a system tick
|
|
|
+ */
|
|
|
+void account_process_tick(struct task_struct *p, int user_tick)
|
|
|
+{
|
|
|
+ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
|
|
+ struct rq *rq = this_rq();
|
|
|
+
|
|
|
+ if (vtime_accounting_enabled())
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (sched_clock_irqtime) {
|
|
|
+ irqtime_account_process_tick(p, user_tick, rq);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (steal_account_process_tick())
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (user_tick)
|
|
|
+ account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
|
|
+ else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
|
|
|
+ account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
|
|
|
+ one_jiffy_scaled);
|
|
|
+ else
|
|
|
+ account_idle_time(cputime_one_jiffy);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Account multiple ticks of steal time.
|
|
|
+ * @p: the process from which the cpu time has been stolen
|
|
|
+ * @ticks: number of stolen ticks
|
|
|
+ */
|
|
|
+void account_steal_ticks(unsigned long ticks)
|
|
|
+{
|
|
|
+ account_steal_time(jiffies_to_cputime(ticks));
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Account multiple ticks of idle time.
|
|
|
+ * @ticks: number of stolen ticks
|
|
|
+ */
|
|
|
+void account_idle_ticks(unsigned long ticks)
|
|
|
+{
|
|
|
+
|
|
|
+ if (sched_clock_irqtime) {
|
|
|
+ irqtime_account_idle_ticks(ticks);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ account_idle_time(jiffies_to_cputime(ticks));
|
|
|
+}
|
|
|
|
|
|
static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total)
|
|
|
{
|
|
@@ -545,6 +545,12 @@ static void cputime_adjust(struct task_cputime *curr,
|
|
|
{
|
|
|
cputime_t rtime, stime, total;
|
|
|
|
|
|
+ if (vtime_accounting_enabled()) {
|
|
|
+ *ut = curr->utime;
|
|
|
+ *st = curr->stime;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
stime = curr->stime;
|
|
|
total = stime + curr->utime;
|
|
|
|
|
@@ -597,7 +603,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
|
|
|
thread_group_cputime(p, &cputime);
|
|
|
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
|
|
|
}
|
|
|
-#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
|
|
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
|
|
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
|
static unsigned long long vtime_delta(struct task_struct *tsk)
|