cputime.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832
  1. #include <linux/export.h>
  2. #include <linux/sched.h>
  3. #include <linux/tsacct_kern.h>
  4. #include <linux/kernel_stat.h>
  5. #include <linux/static_key.h>
  6. #include <linux/context_tracking.h>
  7. #include "sched.h"
  8. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  9. /*
  10. * There are no locks covering percpu hardirq/softirq time.
  11. * They are only modified in vtime_account, on corresponding CPU
  12. * with interrupts disabled. So, writes are safe.
  13. * They are read and saved off onto struct rq in update_rq_clock().
  14. * This may result in other CPU reading this CPU's irq time and can
  15. * race with irq/vtime_account on this CPU. We would either get old
  16. * or new value with a side effect of accounting a slice of irq time to wrong
  17. * task when irq is in progress while we read rq->clock. That is a worthy
  18. * compromise in place of having locks on each irq in account_system_time.
  19. */
  20. DEFINE_PER_CPU(u64, cpu_hardirq_time);
  21. DEFINE_PER_CPU(u64, cpu_softirq_time);
  22. static DEFINE_PER_CPU(u64, irq_start_time);
  23. static int sched_clock_irqtime;
  24. void enable_sched_clock_irqtime(void)
  25. {
  26. sched_clock_irqtime = 1;
  27. }
  28. void disable_sched_clock_irqtime(void)
  29. {
  30. sched_clock_irqtime = 0;
  31. }
  32. #ifndef CONFIG_64BIT
  33. DEFINE_PER_CPU(seqcount_t, irq_time_seq);
  34. #endif /* CONFIG_64BIT */
  35. /*
  36. * Called before incrementing preempt_count on {soft,}irq_enter
  37. * and before decrementing preempt_count on {soft,}irq_exit.
  38. */
  39. void irqtime_account_irq(struct task_struct *curr)
  40. {
  41. unsigned long flags;
  42. s64 delta;
  43. int cpu;
  44. if (!sched_clock_irqtime)
  45. return;
  46. local_irq_save(flags);
  47. cpu = smp_processor_id();
  48. delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
  49. __this_cpu_add(irq_start_time, delta);
  50. irq_time_write_begin();
  51. /*
  52. * We do not account for softirq time from ksoftirqd here.
  53. * We want to continue accounting softirq time to ksoftirqd thread
  54. * in that case, so as not to confuse scheduler with a special task
  55. * that do not consume any time, but still wants to run.
  56. */
  57. if (hardirq_count())
  58. __this_cpu_add(cpu_hardirq_time, delta);
  59. else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
  60. __this_cpu_add(cpu_softirq_time, delta);
  61. irq_time_write_end();
  62. local_irq_restore(flags);
  63. }
  64. EXPORT_SYMBOL_GPL(irqtime_account_irq);
  65. static int irqtime_account_hi_update(void)
  66. {
  67. u64 *cpustat = kcpustat_this_cpu->cpustat;
  68. unsigned long flags;
  69. u64 latest_ns;
  70. int ret = 0;
  71. local_irq_save(flags);
  72. latest_ns = this_cpu_read(cpu_hardirq_time);
  73. if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
  74. ret = 1;
  75. local_irq_restore(flags);
  76. return ret;
  77. }
  78. static int irqtime_account_si_update(void)
  79. {
  80. u64 *cpustat = kcpustat_this_cpu->cpustat;
  81. unsigned long flags;
  82. u64 latest_ns;
  83. int ret = 0;
  84. local_irq_save(flags);
  85. latest_ns = this_cpu_read(cpu_softirq_time);
  86. if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
  87. ret = 1;
  88. local_irq_restore(flags);
  89. return ret;
  90. }
  91. #else /* CONFIG_IRQ_TIME_ACCOUNTING */
  92. #define sched_clock_irqtime (0)
  93. #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
  94. static inline void task_group_account_field(struct task_struct *p, int index,
  95. u64 tmp)
  96. {
  97. /*
  98. * Since all updates are sure to touch the root cgroup, we
  99. * get ourselves ahead and touch it first. If the root cgroup
  100. * is the only cgroup, then nothing else should be necessary.
  101. *
  102. */
  103. __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
  104. cpuacct_account_field(p, index, tmp);
  105. }
  106. /*
  107. * Account user cpu time to a process.
  108. * @p: the process that the cpu time gets accounted to
  109. * @cputime: the cpu time spent in user space since the last update
  110. * @cputime_scaled: cputime scaled by cpu frequency
  111. */
  112. void account_user_time(struct task_struct *p, cputime_t cputime,
  113. cputime_t cputime_scaled)
  114. {
  115. int index;
  116. /* Add user time to process. */
  117. p->utime += cputime;
  118. p->utimescaled += cputime_scaled;
  119. account_group_user_time(p, cputime);
  120. index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
  121. /* Add user time to cpustat. */
  122. task_group_account_field(p, index, (__force u64) cputime);
  123. /* Account for user time used */
  124. acct_account_cputime(p);
  125. }
  126. /*
  127. * Account guest cpu time to a process.
  128. * @p: the process that the cpu time gets accounted to
  129. * @cputime: the cpu time spent in virtual machine since the last update
  130. * @cputime_scaled: cputime scaled by cpu frequency
  131. */
  132. static void account_guest_time(struct task_struct *p, cputime_t cputime,
  133. cputime_t cputime_scaled)
  134. {
  135. u64 *cpustat = kcpustat_this_cpu->cpustat;
  136. /* Add guest time to process. */
  137. p->utime += cputime;
  138. p->utimescaled += cputime_scaled;
  139. account_group_user_time(p, cputime);
  140. p->gtime += cputime;
  141. /* Add guest time to cpustat. */
  142. if (TASK_NICE(p) > 0) {
  143. cpustat[CPUTIME_NICE] += (__force u64) cputime;
  144. cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
  145. } else {
  146. cpustat[CPUTIME_USER] += (__force u64) cputime;
  147. cpustat[CPUTIME_GUEST] += (__force u64) cputime;
  148. }
  149. }
  150. /*
  151. * Account system cpu time to a process and desired cpustat field
  152. * @p: the process that the cpu time gets accounted to
  153. * @cputime: the cpu time spent in kernel space since the last update
  154. * @cputime_scaled: cputime scaled by cpu frequency
  155. * @target_cputime64: pointer to cpustat field that has to be updated
  156. */
  157. static inline
  158. void __account_system_time(struct task_struct *p, cputime_t cputime,
  159. cputime_t cputime_scaled, int index)
  160. {
  161. /* Add system time to process. */
  162. p->stime += cputime;
  163. p->stimescaled += cputime_scaled;
  164. account_group_system_time(p, cputime);
  165. /* Add system time to cpustat. */
  166. task_group_account_field(p, index, (__force u64) cputime);
  167. /* Account for system time used */
  168. acct_account_cputime(p);
  169. }
  170. /*
  171. * Account system cpu time to a process.
  172. * @p: the process that the cpu time gets accounted to
  173. * @hardirq_offset: the offset to subtract from hardirq_count()
  174. * @cputime: the cpu time spent in kernel space since the last update
  175. * @cputime_scaled: cputime scaled by cpu frequency
  176. */
  177. void account_system_time(struct task_struct *p, int hardirq_offset,
  178. cputime_t cputime, cputime_t cputime_scaled)
  179. {
  180. int index;
  181. if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
  182. account_guest_time(p, cputime, cputime_scaled);
  183. return;
  184. }
  185. if (hardirq_count() - hardirq_offset)
  186. index = CPUTIME_IRQ;
  187. else if (in_serving_softirq())
  188. index = CPUTIME_SOFTIRQ;
  189. else
  190. index = CPUTIME_SYSTEM;
  191. __account_system_time(p, cputime, cputime_scaled, index);
  192. }
  193. /*
  194. * Account for involuntary wait time.
  195. * @cputime: the cpu time spent in involuntary wait
  196. */
  197. void account_steal_time(cputime_t cputime)
  198. {
  199. u64 *cpustat = kcpustat_this_cpu->cpustat;
  200. cpustat[CPUTIME_STEAL] += (__force u64) cputime;
  201. }
  202. /*
  203. * Account for idle time.
  204. * @cputime: the cpu time spent in idle wait
  205. */
  206. void account_idle_time(cputime_t cputime)
  207. {
  208. u64 *cpustat = kcpustat_this_cpu->cpustat;
  209. struct rq *rq = this_rq();
  210. if (atomic_read(&rq->nr_iowait) > 0)
  211. cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
  212. else
  213. cpustat[CPUTIME_IDLE] += (__force u64) cputime;
  214. }
  215. static __always_inline bool steal_account_process_tick(void)
  216. {
  217. #ifdef CONFIG_PARAVIRT
  218. if (static_key_false(&paravirt_steal_enabled)) {
  219. u64 steal, st = 0;
  220. steal = paravirt_steal_clock(smp_processor_id());
  221. steal -= this_rq()->prev_steal_time;
  222. st = steal_ticks(steal);
  223. this_rq()->prev_steal_time += st * TICK_NSEC;
  224. account_steal_time(st);
  225. return st;
  226. }
  227. #endif
  228. return false;
  229. }
  230. /*
  231. * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
  232. * tasks (sum on group iteration) belonging to @tsk's group.
  233. */
  234. void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
  235. {
  236. struct signal_struct *sig = tsk->signal;
  237. cputime_t utime, stime;
  238. struct task_struct *t;
  239. times->utime = sig->utime;
  240. times->stime = sig->stime;
  241. times->sum_exec_runtime = sig->sum_sched_runtime;
  242. rcu_read_lock();
  243. /* make sure we can trust tsk->thread_group list */
  244. if (!likely(pid_alive(tsk)))
  245. goto out;
  246. t = tsk;
  247. do {
  248. task_cputime(t, &utime, &stime);
  249. times->utime += utime;
  250. times->stime += stime;
  251. times->sum_exec_runtime += task_sched_runtime(t);
  252. } while_each_thread(tsk, t);
  253. out:
  254. rcu_read_unlock();
  255. }
  256. #ifdef CONFIG_IRQ_TIME_ACCOUNTING
  257. /*
  258. * Account a tick to a process and cpustat
  259. * @p: the process that the cpu time gets accounted to
  260. * @user_tick: is the tick from userspace
  261. * @rq: the pointer to rq
  262. *
  263. * Tick demultiplexing follows the order
  264. * - pending hardirq update
  265. * - pending softirq update
  266. * - user_time
  267. * - idle_time
  268. * - system time
  269. * - check for guest_time
  270. * - else account as system_time
  271. *
  272. * Check for hardirq is done both for system and user time as there is
  273. * no timer going off while we are on hardirq and hence we may never get an
  274. * opportunity to update it solely in system time.
  275. * p->stime and friends are only updated on system time and not on irq
  276. * softirq as those do not count in task exec_runtime any more.
  277. */
  278. static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
  279. struct rq *rq)
  280. {
  281. cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
  282. u64 *cpustat = kcpustat_this_cpu->cpustat;
  283. if (steal_account_process_tick())
  284. return;
  285. if (irqtime_account_hi_update()) {
  286. cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
  287. } else if (irqtime_account_si_update()) {
  288. cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
  289. } else if (this_cpu_ksoftirqd() == p) {
  290. /*
  291. * ksoftirqd time do not get accounted in cpu_softirq_time.
  292. * So, we have to handle it separately here.
  293. * Also, p->stime needs to be updated for ksoftirqd.
  294. */
  295. __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
  296. CPUTIME_SOFTIRQ);
  297. } else if (user_tick) {
  298. account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
  299. } else if (p == rq->idle) {
  300. account_idle_time(cputime_one_jiffy);
  301. } else if (p->flags & PF_VCPU) { /* System time or guest time */
  302. account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
  303. } else {
  304. __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
  305. CPUTIME_SYSTEM);
  306. }
  307. }
  308. static void irqtime_account_idle_ticks(int ticks)
  309. {
  310. int i;
  311. struct rq *rq = this_rq();
  312. for (i = 0; i < ticks; i++)
  313. irqtime_account_process_tick(current, 0, rq);
  314. }
  315. #else /* CONFIG_IRQ_TIME_ACCOUNTING */
  316. static inline void irqtime_account_idle_ticks(int ticks) {}
  317. static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
  318. struct rq *rq) {}
  319. #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
  320. /*
  321. * Use precise platform statistics if available:
  322. */
  323. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  324. #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
  325. void vtime_common_task_switch(struct task_struct *prev)
  326. {
  327. if (is_idle_task(prev))
  328. vtime_account_idle(prev);
  329. else
  330. vtime_account_system(prev);
  331. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  332. vtime_account_user(prev);
  333. #endif
  334. arch_vtime_task_switch(prev);
  335. }
  336. #endif
  337. /*
  338. * Archs that account the whole time spent in the idle task
  339. * (outside irq) as idle time can rely on this and just implement
  340. * vtime_account_system() and vtime_account_idle(). Archs that
  341. * have other meaning of the idle time (s390 only includes the
  342. * time spent by the CPU when it's in low power mode) must override
  343. * vtime_account().
  344. */
  345. #ifndef __ARCH_HAS_VTIME_ACCOUNT
  346. void vtime_common_account_irq_enter(struct task_struct *tsk)
  347. {
  348. if (!in_interrupt()) {
  349. /*
  350. * If we interrupted user, context_tracking_in_user()
  351. * is 1 because the context tracking don't hook
  352. * on irq entry/exit. This way we know if
  353. * we need to flush user time on kernel entry.
  354. */
  355. if (context_tracking_in_user()) {
  356. vtime_account_user(tsk);
  357. return;
  358. }
  359. if (is_idle_task(tsk)) {
  360. vtime_account_idle(tsk);
  361. return;
  362. }
  363. }
  364. vtime_account_system(tsk);
  365. }
  366. EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
  367. #endif /* __ARCH_HAS_VTIME_ACCOUNT */
  368. #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
  369. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  370. void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
  371. {
  372. *ut = p->utime;
  373. *st = p->stime;
  374. }
  375. void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
  376. {
  377. struct task_cputime cputime;
  378. thread_group_cputime(p, &cputime);
  379. *ut = cputime.utime;
  380. *st = cputime.stime;
  381. }
  382. #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  383. /*
  384. * Account a single tick of cpu time.
  385. * @p: the process that the cpu time gets accounted to
  386. * @user_tick: indicates if the tick is a user or a system tick
  387. */
  388. void account_process_tick(struct task_struct *p, int user_tick)
  389. {
  390. cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
  391. struct rq *rq = this_rq();
  392. if (vtime_accounting_enabled())
  393. return;
  394. if (sched_clock_irqtime) {
  395. irqtime_account_process_tick(p, user_tick, rq);
  396. return;
  397. }
  398. if (steal_account_process_tick())
  399. return;
  400. if (user_tick)
  401. account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
  402. else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
  403. account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
  404. one_jiffy_scaled);
  405. else
  406. account_idle_time(cputime_one_jiffy);
  407. }
  408. /*
  409. * Account multiple ticks of steal time.
  410. * @p: the process from which the cpu time has been stolen
  411. * @ticks: number of stolen ticks
  412. */
  413. void account_steal_ticks(unsigned long ticks)
  414. {
  415. account_steal_time(jiffies_to_cputime(ticks));
  416. }
  417. /*
  418. * Account multiple ticks of idle time.
  419. * @ticks: number of stolen ticks
  420. */
  421. void account_idle_ticks(unsigned long ticks)
  422. {
  423. if (sched_clock_irqtime) {
  424. irqtime_account_idle_ticks(ticks);
  425. return;
  426. }
  427. account_idle_time(jiffies_to_cputime(ticks));
  428. }
  429. /*
  430. * Perform (stime * rtime) / total, but avoid multiplication overflow by
  431. * loosing precision when the numbers are big.
  432. */
  433. static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
  434. {
  435. u64 scaled;
  436. for (;;) {
  437. /* Make sure "rtime" is the bigger of stime/rtime */
  438. if (stime > rtime)
  439. swap(rtime, stime);
  440. /* Make sure 'total' fits in 32 bits */
  441. if (total >> 32)
  442. goto drop_precision;
  443. /* Does rtime (and thus stime) fit in 32 bits? */
  444. if (!(rtime >> 32))
  445. break;
  446. /* Can we just balance rtime/stime rather than dropping bits? */
  447. if (stime >> 31)
  448. goto drop_precision;
  449. /* We can grow stime and shrink rtime and try to make them both fit */
  450. stime <<= 1;
  451. rtime >>= 1;
  452. continue;
  453. drop_precision:
  454. /* We drop from rtime, it has more bits than stime */
  455. rtime >>= 1;
  456. total >>= 1;
  457. }
  458. /*
  459. * Make sure gcc understands that this is a 32x32->64 multiply,
  460. * followed by a 64/32->64 divide.
  461. */
  462. scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
  463. return (__force cputime_t) scaled;
  464. }
  465. /*
  466. * Adjust tick based cputime random precision against scheduler
  467. * runtime accounting.
  468. */
  469. static void cputime_adjust(struct task_cputime *curr,
  470. struct cputime *prev,
  471. cputime_t *ut, cputime_t *st)
  472. {
  473. cputime_t rtime, stime, utime;
  474. /*
  475. * Tick based cputime accounting depend on random scheduling
  476. * timeslices of a task to be interrupted or not by the timer.
  477. * Depending on these circumstances, the number of these interrupts
  478. * may be over or under-optimistic, matching the real user and system
  479. * cputime with a variable precision.
  480. *
  481. * Fix this by scaling these tick based values against the total
  482. * runtime accounted by the CFS scheduler.
  483. */
  484. rtime = nsecs_to_cputime(curr->sum_exec_runtime);
  485. /*
  486. * Update userspace visible utime/stime values only if actual execution
  487. * time is bigger than already exported. Note that can happen, that we
  488. * provided bigger values due to scaling inaccuracy on big numbers.
  489. */
  490. if (prev->stime + prev->utime >= rtime)
  491. goto out;
  492. stime = curr->stime;
  493. utime = curr->utime;
  494. if (utime == 0) {
  495. stime = rtime;
  496. } else if (stime == 0) {
  497. utime = rtime;
  498. } else {
  499. cputime_t total = stime + utime;
  500. stime = scale_stime((__force u64)stime,
  501. (__force u64)rtime, (__force u64)total);
  502. utime = rtime - stime;
  503. }
  504. /*
  505. * If the tick based count grows faster than the scheduler one,
  506. * the result of the scaling may go backward.
  507. * Let's enforce monotonicity.
  508. */
  509. prev->stime = max(prev->stime, stime);
  510. prev->utime = max(prev->utime, utime);
  511. out:
  512. *ut = prev->utime;
  513. *st = prev->stime;
  514. }
  515. void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
  516. {
  517. struct task_cputime cputime = {
  518. .sum_exec_runtime = p->se.sum_exec_runtime,
  519. };
  520. task_cputime(p, &cputime.utime, &cputime.stime);
  521. cputime_adjust(&cputime, &p->prev_cputime, ut, st);
  522. }
  523. /*
  524. * Must be called with siglock held.
  525. */
  526. void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
  527. {
  528. struct task_cputime cputime;
  529. thread_group_cputime(p, &cputime);
  530. cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
  531. }
  532. #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  533. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  534. static unsigned long long vtime_delta(struct task_struct *tsk)
  535. {
  536. unsigned long long clock;
  537. clock = local_clock();
  538. if (clock < tsk->vtime_snap)
  539. return 0;
  540. return clock - tsk->vtime_snap;
  541. }
  542. static cputime_t get_vtime_delta(struct task_struct *tsk)
  543. {
  544. unsigned long long delta = vtime_delta(tsk);
  545. WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
  546. tsk->vtime_snap += delta;
  547. /* CHECKME: always safe to convert nsecs to cputime? */
  548. return nsecs_to_cputime(delta);
  549. }
  550. static void __vtime_account_system(struct task_struct *tsk)
  551. {
  552. cputime_t delta_cpu = get_vtime_delta(tsk);
  553. account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
  554. }
  555. void vtime_account_system(struct task_struct *tsk)
  556. {
  557. write_seqlock(&tsk->vtime_seqlock);
  558. __vtime_account_system(tsk);
  559. write_sequnlock(&tsk->vtime_seqlock);
  560. }
  561. void vtime_gen_account_irq_exit(struct task_struct *tsk)
  562. {
  563. write_seqlock(&tsk->vtime_seqlock);
  564. __vtime_account_system(tsk);
  565. if (context_tracking_in_user())
  566. tsk->vtime_snap_whence = VTIME_USER;
  567. write_sequnlock(&tsk->vtime_seqlock);
  568. }
  569. void vtime_account_user(struct task_struct *tsk)
  570. {
  571. cputime_t delta_cpu;
  572. write_seqlock(&tsk->vtime_seqlock);
  573. delta_cpu = get_vtime_delta(tsk);
  574. tsk->vtime_snap_whence = VTIME_SYS;
  575. account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
  576. write_sequnlock(&tsk->vtime_seqlock);
  577. }
  578. void vtime_user_enter(struct task_struct *tsk)
  579. {
  580. write_seqlock(&tsk->vtime_seqlock);
  581. __vtime_account_system(tsk);
  582. tsk->vtime_snap_whence = VTIME_USER;
  583. write_sequnlock(&tsk->vtime_seqlock);
  584. }
  585. void vtime_guest_enter(struct task_struct *tsk)
  586. {
  587. /*
  588. * The flags must be updated under the lock with
  589. * the vtime_snap flush and update.
  590. * That enforces a right ordering and update sequence
  591. * synchronization against the reader (task_gtime())
  592. * that can thus safely catch up with a tickless delta.
  593. */
  594. write_seqlock(&tsk->vtime_seqlock);
  595. __vtime_account_system(tsk);
  596. current->flags |= PF_VCPU;
  597. write_sequnlock(&tsk->vtime_seqlock);
  598. }
  599. EXPORT_SYMBOL_GPL(vtime_guest_enter);
  600. void vtime_guest_exit(struct task_struct *tsk)
  601. {
  602. write_seqlock(&tsk->vtime_seqlock);
  603. __vtime_account_system(tsk);
  604. current->flags &= ~PF_VCPU;
  605. write_sequnlock(&tsk->vtime_seqlock);
  606. }
  607. EXPORT_SYMBOL_GPL(vtime_guest_exit);
  608. void vtime_account_idle(struct task_struct *tsk)
  609. {
  610. cputime_t delta_cpu = get_vtime_delta(tsk);
  611. account_idle_time(delta_cpu);
  612. }
  613. void arch_vtime_task_switch(struct task_struct *prev)
  614. {
  615. write_seqlock(&prev->vtime_seqlock);
  616. prev->vtime_snap_whence = VTIME_SLEEPING;
  617. write_sequnlock(&prev->vtime_seqlock);
  618. write_seqlock(&current->vtime_seqlock);
  619. current->vtime_snap_whence = VTIME_SYS;
  620. current->vtime_snap = sched_clock_cpu(smp_processor_id());
  621. write_sequnlock(&current->vtime_seqlock);
  622. }
  623. void vtime_init_idle(struct task_struct *t, int cpu)
  624. {
  625. unsigned long flags;
  626. write_seqlock_irqsave(&t->vtime_seqlock, flags);
  627. t->vtime_snap_whence = VTIME_SYS;
  628. t->vtime_snap = sched_clock_cpu(cpu);
  629. write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
  630. }
  631. cputime_t task_gtime(struct task_struct *t)
  632. {
  633. unsigned int seq;
  634. cputime_t gtime;
  635. do {
  636. seq = read_seqbegin(&t->vtime_seqlock);
  637. gtime = t->gtime;
  638. if (t->flags & PF_VCPU)
  639. gtime += vtime_delta(t);
  640. } while (read_seqretry(&t->vtime_seqlock, seq));
  641. return gtime;
  642. }
  643. /*
  644. * Fetch cputime raw values from fields of task_struct and
  645. * add up the pending nohz execution time since the last
  646. * cputime snapshot.
  647. */
  648. static void
  649. fetch_task_cputime(struct task_struct *t,
  650. cputime_t *u_dst, cputime_t *s_dst,
  651. cputime_t *u_src, cputime_t *s_src,
  652. cputime_t *udelta, cputime_t *sdelta)
  653. {
  654. unsigned int seq;
  655. unsigned long long delta;
  656. do {
  657. *udelta = 0;
  658. *sdelta = 0;
  659. seq = read_seqbegin(&t->vtime_seqlock);
  660. if (u_dst)
  661. *u_dst = *u_src;
  662. if (s_dst)
  663. *s_dst = *s_src;
  664. /* Task is sleeping, nothing to add */
  665. if (t->vtime_snap_whence == VTIME_SLEEPING ||
  666. is_idle_task(t))
  667. continue;
  668. delta = vtime_delta(t);
  669. /*
  670. * Task runs either in user or kernel space, add pending nohz time to
  671. * the right place.
  672. */
  673. if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
  674. *udelta = delta;
  675. } else {
  676. if (t->vtime_snap_whence == VTIME_SYS)
  677. *sdelta = delta;
  678. }
  679. } while (read_seqretry(&t->vtime_seqlock, seq));
  680. }
  681. void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
  682. {
  683. cputime_t udelta, sdelta;
  684. fetch_task_cputime(t, utime, stime, &t->utime,
  685. &t->stime, &udelta, &sdelta);
  686. if (utime)
  687. *utime += udelta;
  688. if (stime)
  689. *stime += sdelta;
  690. }
  691. void task_cputime_scaled(struct task_struct *t,
  692. cputime_t *utimescaled, cputime_t *stimescaled)
  693. {
  694. cputime_t udelta, sdelta;
  695. fetch_task_cputime(t, utimescaled, stimescaled,
  696. &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
  697. if (utimescaled)
  698. *utimescaled += cputime_to_scaled(udelta);
  699. if (stimescaled)
  700. *stimescaled += cputime_to_scaled(sdelta);
  701. }
  702. #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */