|
@@ -4139,7 +4139,6 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
|
|
|
cputime_t cputime, cputime_t cputime_scaled)
|
|
|
{
|
|
|
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
|
|
- struct rq *rq = this_rq();
|
|
|
cputime64_t tmp;
|
|
|
|
|
|
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
|
|
@@ -4158,37 +4157,84 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
|
|
|
cpustat->irq = cputime64_add(cpustat->irq, tmp);
|
|
|
else if (softirq_count())
|
|
|
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
|
|
|
- else if (p != rq->idle)
|
|
|
- cpustat->system = cputime64_add(cpustat->system, tmp);
|
|
|
- else if (atomic_read(&rq->nr_iowait) > 0)
|
|
|
- cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
|
|
|
else
|
|
|
- cpustat->idle = cputime64_add(cpustat->idle, tmp);
|
|
|
+ cpustat->system = cputime64_add(cpustat->system, tmp);
|
|
|
+
|
|
|
/* Account for system time used */
|
|
|
acct_update_integrals(p);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* Account for involuntary wait time.
|
|
|
- * @p: the process from which the cpu time has been stolen
|
|
|
* @steal: the cpu time spent in involuntary wait
|
|
|
*/
|
|
|
-void account_steal_time(struct task_struct *p, cputime_t steal)
|
|
|
+void account_steal_time(cputime_t cputime)
|
|
|
+{
|
|
|
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
|
|
+ cputime64_t cputime64 = cputime_to_cputime64(cputime);
|
|
|
+
|
|
|
+ cpustat->steal = cputime64_add(cpustat->steal, cputime64);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Account for idle time.
|
|
|
+ * @cputime: the cpu time spent in idle wait
|
|
|
+ */
|
|
|
+void account_idle_time(cputime_t cputime)
|
|
|
{
|
|
|
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
|
|
- cputime64_t tmp = cputime_to_cputime64(steal);
|
|
|
+ cputime64_t cputime64 = cputime_to_cputime64(cputime);
|
|
|
struct rq *rq = this_rq();
|
|
|
|
|
|
- if (p == rq->idle) {
|
|
|
- p->stime = cputime_add(p->stime, steal);
|
|
|
- if (atomic_read(&rq->nr_iowait) > 0)
|
|
|
- cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
|
|
|
- else
|
|
|
- cpustat->idle = cputime64_add(cpustat->idle, tmp);
|
|
|
- } else
|
|
|
- cpustat->steal = cputime64_add(cpustat->steal, tmp);
|
|
|
+ if (atomic_read(&rq->nr_iowait) > 0)
|
|
|
+ cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
|
|
|
+ else
|
|
|
+ cpustat->idle = cputime64_add(cpustat->idle, cputime64);
|
|
|
+}
|
|
|
+
|
|
|
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
|
|
+
|
|
|
+/*
|
|
|
+ * Account a single tick of cpu time.
|
|
|
+ * @p: the process that the cpu time gets accounted to
|
|
|
+ * @user_tick: indicates if the tick is a user or a system tick
|
|
|
+ */
|
|
|
+void account_process_tick(struct task_struct *p, int user_tick)
|
|
|
+{
|
|
|
+ cputime_t one_jiffy = jiffies_to_cputime(1);
|
|
|
+ cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
|
|
|
+ struct rq *rq = this_rq();
|
|
|
+
|
|
|
+ if (user_tick)
|
|
|
+ account_user_time(p, one_jiffy, one_jiffy_scaled);
|
|
|
+ else if (p != rq->idle)
|
|
|
+ account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
|
|
|
+ one_jiffy_scaled);
|
|
|
+ else
|
|
|
+ account_idle_time(one_jiffy);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Account multiple ticks of steal time.
|
|
|
+ * @p: the process from which the cpu time has been stolen
|
|
|
+ * @ticks: number of stolen ticks
|
|
|
+ */
|
|
|
+void account_steal_ticks(unsigned long ticks)
|
|
|
+{
|
|
|
+ account_steal_time(jiffies_to_cputime(ticks));
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Account multiple ticks of idle time.
|
|
|
+ * @ticks: number of stolen ticks
|
|
|
+ */
|
|
|
+void account_idle_ticks(unsigned long ticks)
|
|
|
+{
|
|
|
+ account_idle_time(jiffies_to_cputime(ticks));
|
|
|
}
|
|
|
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Use precise platform statistics if available:
|
|
|
*/
|