res_counter.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. #ifndef __RES_COUNTER_H__
  2. #define __RES_COUNTER_H__
  3. /*
  4. * Resource Counters
  5. * Contain common data types and routines for resource accounting
  6. *
  7. * Copyright 2007 OpenVZ SWsoft Inc
  8. *
  9. * Author: Pavel Emelianov <xemul@openvz.org>
  10. *
  11. * See Documentation/cgroups/resource_counter.txt for more
  12. * info about what this counter is.
  13. */
  14. #include <linux/spinlock.h>
  15. #include <linux/errno.h>
  16. /*
  17. * The core object. the cgroup that wishes to account for some
  18. * resource may include this counter into its structures and use
  19. * the helpers described beyond
  20. */
  21. struct res_counter {
  22. /*
  23. * the current resource consumption level
  24. */
  25. unsigned long long usage;
  26. /*
  27. * the maximal value of the usage from the counter creation
  28. */
  29. unsigned long long max_usage;
  30. /*
  31. * the limit that usage cannot exceed
  32. */
  33. unsigned long long limit;
  34. /*
  35. * the limit that usage can be exceed
  36. */
  37. unsigned long long soft_limit;
  38. /*
  39. * the number of unsuccessful attempts to consume the resource
  40. */
  41. unsigned long long failcnt;
  42. /*
  43. * the lock to protect all of the above.
  44. * the routines below consider this to be IRQ-safe
  45. */
  46. spinlock_t lock;
  47. /*
  48. * Parent counter, used for hierarchial resource accounting
  49. */
  50. struct res_counter *parent;
  51. };
  52. #define RES_COUNTER_MAX ULLONG_MAX
  53. /**
  54. * Helpers to interact with userspace
  55. * res_counter_read_u64() - returns the value of the specified member.
  56. * res_counter_read/_write - put/get the specified fields from the
  57. * res_counter struct to/from the user
  58. *
  59. * @counter: the counter in question
  60. * @member: the field to work with (see RES_xxx below)
  61. * @buf: the buffer to opeate on,...
  62. * @nbytes: its size...
  63. * @pos: and the offset.
  64. */
  65. u64 res_counter_read_u64(struct res_counter *counter, int member);
  66. ssize_t res_counter_read(struct res_counter *counter, int member,
  67. const char __user *buf, size_t nbytes, loff_t *pos,
  68. int (*read_strategy)(unsigned long long val, char *s));
  69. int res_counter_memparse_write_strategy(const char *buf,
  70. unsigned long long *res);
  71. /*
  72. * the field descriptors. one for each member of res_counter
  73. */
  74. enum {
  75. RES_USAGE,
  76. RES_MAX_USAGE,
  77. RES_LIMIT,
  78. RES_FAILCNT,
  79. RES_SOFT_LIMIT,
  80. };
  81. /*
  82. * helpers for accounting
  83. */
  84. void res_counter_init(struct res_counter *counter, struct res_counter *parent);
  85. /*
  86. * charge - try to consume more resource.
  87. *
  88. * @counter: the counter
  89. * @val: the amount of the resource. each controller defines its own
  90. * units, e.g. numbers, bytes, Kbytes, etc
  91. *
  92. * returns 0 on success and <0 if the counter->usage will exceed the
  93. * counter->limit _locked call expects the counter->lock to be taken
  94. *
  95. * charge_nofail works the same, except that it charges the resource
  96. * counter unconditionally, and returns < 0 if the after the current
  97. * charge we are over limit.
  98. */
  99. int __must_check res_counter_charge_locked(struct res_counter *counter,
  100. unsigned long val, bool force);
  101. int __must_check res_counter_charge(struct res_counter *counter,
  102. unsigned long val, struct res_counter **limit_fail_at);
  103. int res_counter_charge_nofail(struct res_counter *counter,
  104. unsigned long val, struct res_counter **limit_fail_at);
  105. /*
  106. * uncharge - tell that some portion of the resource is released
  107. *
  108. * @counter: the counter
  109. * @val: the amount of the resource
  110. *
  111. * these calls check for usage underflow and show a warning on the console
  112. * _locked call expects the counter->lock to be taken
  113. *
  114. * returns the total charges still present in @counter.
  115. */
  116. u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
  117. u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
  118. u64 res_counter_uncharge_until(struct res_counter *counter,
  119. struct res_counter *top,
  120. unsigned long val);
  121. /**
  122. * res_counter_margin - calculate chargeable space of a counter
  123. * @cnt: the counter
  124. *
  125. * Returns the difference between the hard limit and the current usage
  126. * of resource counter @cnt.
  127. */
  128. static inline unsigned long long res_counter_margin(struct res_counter *cnt)
  129. {
  130. unsigned long long margin;
  131. unsigned long flags;
  132. spin_lock_irqsave(&cnt->lock, flags);
  133. if (cnt->limit > cnt->usage)
  134. margin = cnt->limit - cnt->usage;
  135. else
  136. margin = 0;
  137. spin_unlock_irqrestore(&cnt->lock, flags);
  138. return margin;
  139. }
  140. /**
  141. * Get the difference between the usage and the soft limit
  142. * @cnt: The counter
  143. *
  144. * Returns 0 if usage is less than or equal to soft limit
  145. * The difference between usage and soft limit, otherwise.
  146. */
  147. static inline unsigned long long
  148. res_counter_soft_limit_excess(struct res_counter *cnt)
  149. {
  150. unsigned long long excess;
  151. unsigned long flags;
  152. spin_lock_irqsave(&cnt->lock, flags);
  153. if (cnt->usage <= cnt->soft_limit)
  154. excess = 0;
  155. else
  156. excess = cnt->usage - cnt->soft_limit;
  157. spin_unlock_irqrestore(&cnt->lock, flags);
  158. return excess;
  159. }
  160. static inline void res_counter_reset_max(struct res_counter *cnt)
  161. {
  162. unsigned long flags;
  163. spin_lock_irqsave(&cnt->lock, flags);
  164. cnt->max_usage = cnt->usage;
  165. spin_unlock_irqrestore(&cnt->lock, flags);
  166. }
  167. static inline void res_counter_reset_failcnt(struct res_counter *cnt)
  168. {
  169. unsigned long flags;
  170. spin_lock_irqsave(&cnt->lock, flags);
  171. cnt->failcnt = 0;
  172. spin_unlock_irqrestore(&cnt->lock, flags);
  173. }
  174. static inline int res_counter_set_limit(struct res_counter *cnt,
  175. unsigned long long limit)
  176. {
  177. unsigned long flags;
  178. int ret = -EBUSY;
  179. spin_lock_irqsave(&cnt->lock, flags);
  180. if (cnt->usage <= limit) {
  181. cnt->limit = limit;
  182. ret = 0;
  183. }
  184. spin_unlock_irqrestore(&cnt->lock, flags);
  185. return ret;
  186. }
  187. static inline int
  188. res_counter_set_soft_limit(struct res_counter *cnt,
  189. unsigned long long soft_limit)
  190. {
  191. unsigned long flags;
  192. spin_lock_irqsave(&cnt->lock, flags);
  193. cnt->soft_limit = soft_limit;
  194. spin_unlock_irqrestore(&cnt->lock, flags);
  195. return 0;
  196. }
  197. #endif