res_counter.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * resource cgroups
  3. *
  4. * Copyright 2007 OpenVZ SWsoft Inc
  5. *
  6. * Author: Pavel Emelianov <xemul@openvz.org>
  7. *
  8. */
  9. #include <linux/types.h>
  10. #include <linux/parser.h>
  11. #include <linux/fs.h>
  12. #include <linux/slab.h>
  13. #include <linux/res_counter.h>
  14. #include <linux/uaccess.h>
  15. void res_counter_init(struct res_counter *counter)
  16. {
  17. spin_lock_init(&counter->lock);
  18. counter->limit = (unsigned long long)LLONG_MAX;
  19. }
  20. int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
  21. {
  22. if (counter->usage + val > counter->limit) {
  23. counter->failcnt++;
  24. return -ENOMEM;
  25. }
  26. counter->usage += val;
  27. return 0;
  28. }
  29. int res_counter_charge(struct res_counter *counter, unsigned long val)
  30. {
  31. int ret;
  32. unsigned long flags;
  33. spin_lock_irqsave(&counter->lock, flags);
  34. ret = res_counter_charge_locked(counter, val);
  35. spin_unlock_irqrestore(&counter->lock, flags);
  36. return ret;
  37. }
  38. void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
  39. {
  40. if (WARN_ON(counter->usage < val))
  41. val = counter->usage;
  42. counter->usage -= val;
  43. }
  44. void res_counter_uncharge(struct res_counter *counter, unsigned long val)
  45. {
  46. unsigned long flags;
  47. spin_lock_irqsave(&counter->lock, flags);
  48. res_counter_uncharge_locked(counter, val);
  49. spin_unlock_irqrestore(&counter->lock, flags);
  50. }
  51. static inline unsigned long long *
  52. res_counter_member(struct res_counter *counter, int member)
  53. {
  54. switch (member) {
  55. case RES_USAGE:
  56. return &counter->usage;
  57. case RES_LIMIT:
  58. return &counter->limit;
  59. case RES_FAILCNT:
  60. return &counter->failcnt;
  61. };
  62. BUG();
  63. return NULL;
  64. }
  65. ssize_t res_counter_read(struct res_counter *counter, int member,
  66. const char __user *userbuf, size_t nbytes, loff_t *pos,
  67. int (*read_strategy)(unsigned long long val, char *st_buf))
  68. {
  69. unsigned long long *val;
  70. char buf[64], *s;
  71. s = buf;
  72. val = res_counter_member(counter, member);
  73. if (read_strategy)
  74. s += read_strategy(*val, s);
  75. else
  76. s += sprintf(s, "%llu\n", *val);
  77. return simple_read_from_buffer((void __user *)userbuf, nbytes,
  78. pos, buf, s - buf);
  79. }
  80. u64 res_counter_read_u64(struct res_counter *counter, int member)
  81. {
  82. return *res_counter_member(counter, member);
  83. }
  84. ssize_t res_counter_write(struct res_counter *counter, int member,
  85. const char __user *userbuf, size_t nbytes, loff_t *pos,
  86. int (*write_strategy)(char *st_buf, unsigned long long *val))
  87. {
  88. int ret;
  89. char *buf, *end;
  90. unsigned long flags;
  91. unsigned long long tmp, *val;
  92. buf = kmalloc(nbytes + 1, GFP_KERNEL);
  93. ret = -ENOMEM;
  94. if (buf == NULL)
  95. goto out;
  96. buf[nbytes] = '\0';
  97. ret = -EFAULT;
  98. if (copy_from_user(buf, userbuf, nbytes))
  99. goto out_free;
  100. ret = -EINVAL;
  101. strstrip(buf);
  102. if (write_strategy) {
  103. if (write_strategy(buf, &tmp)) {
  104. goto out_free;
  105. }
  106. } else {
  107. tmp = simple_strtoull(buf, &end, 10);
  108. if (*end != '\0')
  109. goto out_free;
  110. }
  111. spin_lock_irqsave(&counter->lock, flags);
  112. val = res_counter_member(counter, member);
  113. *val = tmp;
  114. spin_unlock_irqrestore(&counter->lock, flags);
  115. ret = nbytes;
  116. out_free:
  117. kfree(buf);
  118. out:
  119. return ret;
  120. }