cpu_acct.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /*
  2. * kernel/cpu_acct.c - CPU accounting cgroup subsystem
  3. *
  4. * Copyright (C) Google Inc, 2006
  5. *
  6. * Developed by Paul Menage (menage@google.com) and Balbir Singh
  7. * (balbir@in.ibm.com)
  8. *
  9. */
  10. /*
  11. * Example cgroup subsystem for reporting total CPU usage of tasks in a
  12. * cgroup, along with percentage load over a time interval
  13. */
  14. #include <linux/module.h>
  15. #include <linux/cgroup.h>
  16. #include <linux/fs.h>
  17. #include <linux/rcupdate.h>
  18. #include <asm/div64.h>
  19. struct cpuacct {
  20. struct cgroup_subsys_state css;
  21. spinlock_t lock;
  22. /* total time used by this class */
  23. cputime64_t time;
  24. /* time when next load calculation occurs */
  25. u64 next_interval_check;
  26. /* time used in current period */
  27. cputime64_t current_interval_time;
  28. /* time used in last period */
  29. cputime64_t last_interval_time;
  30. };
  31. struct cgroup_subsys cpuacct_subsys;
  32. static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
  33. {
  34. return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
  35. struct cpuacct, css);
  36. }
  37. static inline struct cpuacct *task_ca(struct task_struct *task)
  38. {
  39. return container_of(task_subsys_state(task, cpuacct_subsys_id),
  40. struct cpuacct, css);
  41. }
  42. #define INTERVAL (HZ * 10)
  43. static inline u64 next_interval_boundary(u64 now)
  44. {
  45. /* calculate the next interval boundary beyond the
  46. * current time */
  47. do_div(now, INTERVAL);
  48. return (now + 1) * INTERVAL;
  49. }
  50. static struct cgroup_subsys_state *cpuacct_create(
  51. struct cgroup_subsys *ss, struct cgroup *cont)
  52. {
  53. struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  54. if (!ca)
  55. return ERR_PTR(-ENOMEM);
  56. spin_lock_init(&ca->lock);
  57. ca->next_interval_check = next_interval_boundary(get_jiffies_64());
  58. return &ca->css;
  59. }
  60. static void cpuacct_destroy(struct cgroup_subsys *ss,
  61. struct cgroup *cont)
  62. {
  63. kfree(cgroup_ca(cont));
  64. }
  65. /* Lazily update the load calculation if necessary. Called with ca locked */
  66. static void cpuusage_update(struct cpuacct *ca)
  67. {
  68. u64 now = get_jiffies_64();
  69. /* If we're not due for an update, return */
  70. if (ca->next_interval_check > now)
  71. return;
  72. if (ca->next_interval_check <= (now - INTERVAL)) {
  73. /* If it's been more than an interval since the last
  74. * check, then catch up - the last interval must have
  75. * been zero load */
  76. ca->last_interval_time = 0;
  77. ca->next_interval_check = next_interval_boundary(now);
  78. } else {
  79. /* If a steal takes the last interval time negative,
  80. * then we just ignore it */
  81. if ((s64)ca->current_interval_time > 0)
  82. ca->last_interval_time = ca->current_interval_time;
  83. else
  84. ca->last_interval_time = 0;
  85. ca->next_interval_check += INTERVAL;
  86. }
  87. ca->current_interval_time = 0;
  88. }
  89. static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
  90. {
  91. struct cpuacct *ca = cgroup_ca(cont);
  92. u64 time;
  93. spin_lock_irq(&ca->lock);
  94. cpuusage_update(ca);
  95. time = cputime64_to_jiffies64(ca->time);
  96. spin_unlock_irq(&ca->lock);
  97. /* Convert 64-bit jiffies to seconds */
  98. time *= 1000;
  99. do_div(time, HZ);
  100. return time;
  101. }
  102. static u64 load_read(struct cgroup *cont, struct cftype *cft)
  103. {
  104. struct cpuacct *ca = cgroup_ca(cont);
  105. u64 time;
  106. /* Find the time used in the previous interval */
  107. spin_lock_irq(&ca->lock);
  108. cpuusage_update(ca);
  109. time = cputime64_to_jiffies64(ca->last_interval_time);
  110. spin_unlock_irq(&ca->lock);
  111. /* Convert time to a percentage, to give the load in the
  112. * previous period */
  113. time *= 100;
  114. do_div(time, INTERVAL);
  115. return time;
  116. }
  117. static struct cftype files[] = {
  118. {
  119. .name = "usage",
  120. .read_uint = cpuusage_read,
  121. },
  122. {
  123. .name = "load",
  124. .read_uint = load_read,
  125. }
  126. };
  127. static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
  128. {
  129. return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
  130. }
  131. void cpuacct_charge(struct task_struct *task, cputime_t cputime)
  132. {
  133. struct cpuacct *ca;
  134. unsigned long flags;
  135. if (!cpuacct_subsys.active)
  136. return;
  137. rcu_read_lock();
  138. ca = task_ca(task);
  139. if (ca) {
  140. spin_lock_irqsave(&ca->lock, flags);
  141. cpuusage_update(ca);
  142. ca->time = cputime64_add(ca->time, cputime);
  143. ca->current_interval_time =
  144. cputime64_add(ca->current_interval_time, cputime);
  145. spin_unlock_irqrestore(&ca->lock, flags);
  146. }
  147. rcu_read_unlock();
  148. }
  149. struct cgroup_subsys cpuacct_subsys = {
  150. .name = "cpuacct",
  151. .create = cpuacct_create,
  152. .destroy = cpuacct_destroy,
  153. .populate = cpuacct_populate,
  154. .subsys_id = cpuacct_subsys_id,
  155. };