cpuacct.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. #include <linux/cgroup.h>
  2. #include <linux/slab.h>
  3. #include <linux/percpu.h>
  4. #include <linux/spinlock.h>
  5. #include <linux/cpumask.h>
  6. #include <linux/seq_file.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/kernel_stat.h>
  9. #include <linux/err.h>
  10. #include "sched.h"
  11. /*
  12. * CPU accounting code for task groups.
  13. *
  14. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  15. * (balbir@in.ibm.com).
  16. */
  17. /* Time spent by the tasks of the cpu accounting group executing in ... */
  18. enum cpuacct_stat_index {
  19. CPUACCT_STAT_USER, /* ... user mode */
  20. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  21. CPUACCT_STAT_NSTATS,
  22. };
  23. /* track cpu usage of a group of tasks and its child groups */
  24. struct cpuacct {
  25. struct cgroup_subsys_state css;
  26. /* cpuusage holds pointer to a u64-type object on every cpu */
  27. u64 __percpu *cpuusage;
  28. struct kernel_cpustat __percpu *cpustat;
  29. };
  30. /* return cpu accounting group corresponding to this container */
  31. static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
  32. {
  33. return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
  34. struct cpuacct, css);
  35. }
  36. /* return cpu accounting group to which this task belongs */
  37. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  38. {
  39. return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
  40. struct cpuacct, css);
  41. }
  42. static inline struct cpuacct *__parent_ca(struct cpuacct *ca)
  43. {
  44. return cgroup_ca(ca->css.cgroup->parent);
  45. }
  46. static inline struct cpuacct *parent_ca(struct cpuacct *ca)
  47. {
  48. if (!ca->css.cgroup->parent)
  49. return NULL;
  50. return cgroup_ca(ca->css.cgroup->parent);
  51. }
  52. static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
  53. static struct cpuacct root_cpuacct = {
  54. .cpustat = &kernel_cpustat,
  55. .cpuusage = &root_cpuacct_cpuusage,
  56. };
  57. /* create a new cpu accounting group */
  58. static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
  59. {
  60. struct cpuacct *ca;
  61. if (!cgrp->parent)
  62. return &root_cpuacct.css;
  63. ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  64. if (!ca)
  65. goto out;
  66. ca->cpuusage = alloc_percpu(u64);
  67. if (!ca->cpuusage)
  68. goto out_free_ca;
  69. ca->cpustat = alloc_percpu(struct kernel_cpustat);
  70. if (!ca->cpustat)
  71. goto out_free_cpuusage;
  72. return &ca->css;
  73. out_free_cpuusage:
  74. free_percpu(ca->cpuusage);
  75. out_free_ca:
  76. kfree(ca);
  77. out:
  78. return ERR_PTR(-ENOMEM);
  79. }
  80. /* destroy an existing cpu accounting group */
  81. static void cpuacct_css_free(struct cgroup *cgrp)
  82. {
  83. struct cpuacct *ca = cgroup_ca(cgrp);
  84. free_percpu(ca->cpustat);
  85. free_percpu(ca->cpuusage);
  86. kfree(ca);
  87. }
  88. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
  89. {
  90. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  91. u64 data;
  92. #ifndef CONFIG_64BIT
  93. /*
  94. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  95. */
  96. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  97. data = *cpuusage;
  98. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  99. #else
  100. data = *cpuusage;
  101. #endif
  102. return data;
  103. }
  104. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  105. {
  106. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  107. #ifndef CONFIG_64BIT
  108. /*
  109. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  110. */
  111. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  112. *cpuusage = val;
  113. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  114. #else
  115. *cpuusage = val;
  116. #endif
  117. }
  118. /* return total cpu usage (in nanoseconds) of a group */
  119. static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
  120. {
  121. struct cpuacct *ca = cgroup_ca(cgrp);
  122. u64 totalcpuusage = 0;
  123. int i;
  124. for_each_present_cpu(i)
  125. totalcpuusage += cpuacct_cpuusage_read(ca, i);
  126. return totalcpuusage;
  127. }
  128. static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
  129. u64 reset)
  130. {
  131. struct cpuacct *ca = cgroup_ca(cgrp);
  132. int err = 0;
  133. int i;
  134. if (reset) {
  135. err = -EINVAL;
  136. goto out;
  137. }
  138. for_each_present_cpu(i)
  139. cpuacct_cpuusage_write(ca, i, 0);
  140. out:
  141. return err;
  142. }
  143. static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
  144. struct seq_file *m)
  145. {
  146. struct cpuacct *ca = cgroup_ca(cgroup);
  147. u64 percpu;
  148. int i;
  149. for_each_present_cpu(i) {
  150. percpu = cpuacct_cpuusage_read(ca, i);
  151. seq_printf(m, "%llu ", (unsigned long long) percpu);
  152. }
  153. seq_printf(m, "\n");
  154. return 0;
  155. }
  156. static const char * const cpuacct_stat_desc[] = {
  157. [CPUACCT_STAT_USER] = "user",
  158. [CPUACCT_STAT_SYSTEM] = "system",
  159. };
  160. static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
  161. struct cgroup_map_cb *cb)
  162. {
  163. struct cpuacct *ca = cgroup_ca(cgrp);
  164. int cpu;
  165. s64 val = 0;
  166. for_each_online_cpu(cpu) {
  167. struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
  168. val += kcpustat->cpustat[CPUTIME_USER];
  169. val += kcpustat->cpustat[CPUTIME_NICE];
  170. }
  171. val = cputime64_to_clock_t(val);
  172. cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
  173. val = 0;
  174. for_each_online_cpu(cpu) {
  175. struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
  176. val += kcpustat->cpustat[CPUTIME_SYSTEM];
  177. val += kcpustat->cpustat[CPUTIME_IRQ];
  178. val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
  179. }
  180. val = cputime64_to_clock_t(val);
  181. cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
  182. return 0;
  183. }
  184. static struct cftype files[] = {
  185. {
  186. .name = "usage",
  187. .read_u64 = cpuusage_read,
  188. .write_u64 = cpuusage_write,
  189. },
  190. {
  191. .name = "usage_percpu",
  192. .read_seq_string = cpuacct_percpu_seq_read,
  193. },
  194. {
  195. .name = "stat",
  196. .read_map = cpuacct_stats_show,
  197. },
  198. { } /* terminate */
  199. };
  200. /*
  201. * charge this task's execution time to its accounting group.
  202. *
  203. * called with rq->lock held.
  204. */
  205. void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  206. {
  207. struct cpuacct *ca;
  208. int cpu;
  209. cpu = task_cpu(tsk);
  210. rcu_read_lock();
  211. ca = task_ca(tsk);
  212. while (true) {
  213. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  214. *cpuusage += cputime;
  215. ca = parent_ca(ca);
  216. if (!ca)
  217. break;
  218. }
  219. rcu_read_unlock();
  220. }
  221. /*
  222. * Add user/system time to cpuacct.
  223. *
  224. * Note: it's the caller that updates the account of the root cgroup.
  225. */
  226. void cpuacct_account_field(struct task_struct *p, int index, u64 val)
  227. {
  228. struct kernel_cpustat *kcpustat;
  229. struct cpuacct *ca;
  230. rcu_read_lock();
  231. ca = task_ca(p);
  232. while (ca != &root_cpuacct) {
  233. kcpustat = this_cpu_ptr(ca->cpustat);
  234. kcpustat->cpustat[index] += val;
  235. ca = __parent_ca(ca);
  236. }
  237. rcu_read_unlock();
  238. }
  239. struct cgroup_subsys cpuacct_subsys = {
  240. .name = "cpuacct",
  241. .css_alloc = cpuacct_css_alloc,
  242. .css_free = cpuacct_css_free,
  243. .subsys_id = cpuacct_subsys_id,
  244. .base_cftypes = files,
  245. .early_init = 1,
  246. };