hugetlb_cgroup.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. /*
  2. *
  3. * Copyright IBM Corporation, 2012
  4. * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2.1 of the GNU Lesser General Public License
  8. * as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it would be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. */
  15. #include <linux/cgroup.h>
  16. #include <linux/slab.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/hugetlb_cgroup.h>
  19. struct hugetlb_cgroup {
  20. struct cgroup_subsys_state css;
  21. /*
  22. * the counter to account for hugepages from hugetlb.
  23. */
  24. struct res_counter hugepage[HUGE_MAX_HSTATE];
  25. };
  26. #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
  27. #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
  28. #define MEMFILE_ATTR(val) ((val) & 0xffff)
  29. struct cgroup_subsys hugetlb_subsys __read_mostly;
  30. static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
  31. static inline
  32. struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
  33. {
  34. return container_of(s, struct hugetlb_cgroup, css);
  35. }
  36. static inline
  37. struct hugetlb_cgroup *hugetlb_cgroup_from_cgroup(struct cgroup *cgroup)
  38. {
  39. return hugetlb_cgroup_from_css(cgroup_subsys_state(cgroup,
  40. hugetlb_subsys_id));
  41. }
  42. static inline
  43. struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
  44. {
  45. return hugetlb_cgroup_from_css(task_subsys_state(task,
  46. hugetlb_subsys_id));
  47. }
  48. static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
  49. {
  50. return (h_cg == root_h_cgroup);
  51. }
  52. static inline struct hugetlb_cgroup *parent_hugetlb_cgroup(struct cgroup *cg)
  53. {
  54. if (!cg->parent)
  55. return NULL;
  56. return hugetlb_cgroup_from_cgroup(cg->parent);
  57. }
  58. static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
  59. {
  60. int idx;
  61. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cg);
  62. for (idx = 0; idx < hugetlb_max_hstate; idx++) {
  63. if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
  64. return true;
  65. }
  66. return false;
  67. }
  68. static struct cgroup_subsys_state *hugetlb_cgroup_create(struct cgroup *cgroup)
  69. {
  70. int idx;
  71. struct cgroup *parent_cgroup;
  72. struct hugetlb_cgroup *h_cgroup, *parent_h_cgroup;
  73. h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
  74. if (!h_cgroup)
  75. return ERR_PTR(-ENOMEM);
  76. parent_cgroup = cgroup->parent;
  77. if (parent_cgroup) {
  78. parent_h_cgroup = hugetlb_cgroup_from_cgroup(parent_cgroup);
  79. for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
  80. res_counter_init(&h_cgroup->hugepage[idx],
  81. &parent_h_cgroup->hugepage[idx]);
  82. } else {
  83. root_h_cgroup = h_cgroup;
  84. for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
  85. res_counter_init(&h_cgroup->hugepage[idx], NULL);
  86. }
  87. return &h_cgroup->css;
  88. }
  89. static void hugetlb_cgroup_destroy(struct cgroup *cgroup)
  90. {
  91. struct hugetlb_cgroup *h_cgroup;
  92. h_cgroup = hugetlb_cgroup_from_cgroup(cgroup);
  93. kfree(h_cgroup);
  94. }
  95. /*
  96. * Should be called with hugetlb_lock held.
  97. * Since we are holding hugetlb_lock, pages cannot get moved from
  98. * active list or uncharged from the cgroup, So no need to get
  99. * page reference and test for page active here. This function
  100. * cannot fail.
  101. */
  102. static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
  103. struct page *page)
  104. {
  105. int csize;
  106. struct res_counter *counter;
  107. struct res_counter *fail_res;
  108. struct hugetlb_cgroup *page_hcg;
  109. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
  110. struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);
  111. page_hcg = hugetlb_cgroup_from_page(page);
  112. /*
  113. * We can have pages in active list without any cgroup
  114. * ie, hugepage with less than 3 pages. We can safely
  115. * ignore those pages.
  116. */
  117. if (!page_hcg || page_hcg != h_cg)
  118. goto out;
  119. csize = PAGE_SIZE << compound_order(page);
  120. if (!parent) {
  121. parent = root_h_cgroup;
  122. /* root has no limit */
  123. res_counter_charge_nofail(&parent->hugepage[idx],
  124. csize, &fail_res);
  125. }
  126. counter = &h_cg->hugepage[idx];
  127. res_counter_uncharge_until(counter, counter->parent, csize);
  128. set_hugetlb_cgroup(page, parent);
  129. out:
  130. return;
  131. }
  132. /*
  133. * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
  134. * the parent cgroup.
  135. */
  136. static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup)
  137. {
  138. struct hstate *h;
  139. struct page *page;
  140. int ret = 0, idx = 0;
  141. do {
  142. if (cgroup_task_count(cgroup) ||
  143. !list_empty(&cgroup->children)) {
  144. ret = -EBUSY;
  145. goto out;
  146. }
  147. for_each_hstate(h) {
  148. spin_lock(&hugetlb_lock);
  149. list_for_each_entry(page, &h->hugepage_activelist, lru)
  150. hugetlb_cgroup_move_parent(idx, cgroup, page);
  151. spin_unlock(&hugetlb_lock);
  152. idx++;
  153. }
  154. cond_resched();
  155. } while (hugetlb_cgroup_have_usage(cgroup));
  156. out:
  157. return ret;
  158. }
  159. int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
  160. struct hugetlb_cgroup **ptr)
  161. {
  162. int ret = 0;
  163. struct res_counter *fail_res;
  164. struct hugetlb_cgroup *h_cg = NULL;
  165. unsigned long csize = nr_pages * PAGE_SIZE;
  166. if (hugetlb_cgroup_disabled())
  167. goto done;
  168. /*
  169. * We don't charge any cgroup if the compound page have less
  170. * than 3 pages.
  171. */
  172. if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  173. goto done;
  174. again:
  175. rcu_read_lock();
  176. h_cg = hugetlb_cgroup_from_task(current);
  177. if (!css_tryget(&h_cg->css)) {
  178. rcu_read_unlock();
  179. goto again;
  180. }
  181. rcu_read_unlock();
  182. ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
  183. css_put(&h_cg->css);
  184. done:
  185. *ptr = h_cg;
  186. return ret;
  187. }
  188. /* Should be called with hugetlb_lock held */
  189. void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
  190. struct hugetlb_cgroup *h_cg,
  191. struct page *page)
  192. {
  193. if (hugetlb_cgroup_disabled() || !h_cg)
  194. return;
  195. set_hugetlb_cgroup(page, h_cg);
  196. return;
  197. }
  198. /*
  199. * Should be called with hugetlb_lock held
  200. */
  201. void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
  202. struct page *page)
  203. {
  204. struct hugetlb_cgroup *h_cg;
  205. unsigned long csize = nr_pages * PAGE_SIZE;
  206. if (hugetlb_cgroup_disabled())
  207. return;
  208. VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
  209. h_cg = hugetlb_cgroup_from_page(page);
  210. if (unlikely(!h_cg))
  211. return;
  212. set_hugetlb_cgroup(page, NULL);
  213. res_counter_uncharge(&h_cg->hugepage[idx], csize);
  214. return;
  215. }
  216. void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
  217. struct hugetlb_cgroup *h_cg)
  218. {
  219. unsigned long csize = nr_pages * PAGE_SIZE;
  220. if (hugetlb_cgroup_disabled() || !h_cg)
  221. return;
  222. if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  223. return;
  224. res_counter_uncharge(&h_cg->hugepage[idx], csize);
  225. return;
  226. }
  227. static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
  228. struct file *file, char __user *buf,
  229. size_t nbytes, loff_t *ppos)
  230. {
  231. u64 val;
  232. char str[64];
  233. int idx, name, len;
  234. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
  235. idx = MEMFILE_IDX(cft->private);
  236. name = MEMFILE_ATTR(cft->private);
  237. val = res_counter_read_u64(&h_cg->hugepage[idx], name);
  238. len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
  239. return simple_read_from_buffer(buf, nbytes, ppos, str, len);
  240. }
  241. static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
  242. const char *buffer)
  243. {
  244. int idx, name, ret;
  245. unsigned long long val;
  246. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
  247. idx = MEMFILE_IDX(cft->private);
  248. name = MEMFILE_ATTR(cft->private);
  249. switch (name) {
  250. case RES_LIMIT:
  251. if (hugetlb_cgroup_is_root(h_cg)) {
  252. /* Can't set limit on root */
  253. ret = -EINVAL;
  254. break;
  255. }
  256. /* This function does all necessary parse...reuse it */
  257. ret = res_counter_memparse_write_strategy(buffer, &val);
  258. if (ret)
  259. break;
  260. ret = res_counter_set_limit(&h_cg->hugepage[idx], val);
  261. break;
  262. default:
  263. ret = -EINVAL;
  264. break;
  265. }
  266. return ret;
  267. }
  268. static int hugetlb_cgroup_reset(struct cgroup *cgroup, unsigned int event)
  269. {
  270. int idx, name, ret = 0;
  271. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
  272. idx = MEMFILE_IDX(event);
  273. name = MEMFILE_ATTR(event);
  274. switch (name) {
  275. case RES_MAX_USAGE:
  276. res_counter_reset_max(&h_cg->hugepage[idx]);
  277. break;
  278. case RES_FAILCNT:
  279. res_counter_reset_failcnt(&h_cg->hugepage[idx]);
  280. break;
  281. default:
  282. ret = -EINVAL;
  283. break;
  284. }
  285. return ret;
  286. }
  287. static char *mem_fmt(char *buf, int size, unsigned long hsize)
  288. {
  289. if (hsize >= (1UL << 30))
  290. snprintf(buf, size, "%luGB", hsize >> 30);
  291. else if (hsize >= (1UL << 20))
  292. snprintf(buf, size, "%luMB", hsize >> 20);
  293. else
  294. snprintf(buf, size, "%luKB", hsize >> 10);
  295. return buf;
  296. }
  297. int __init hugetlb_cgroup_file_init(int idx)
  298. {
  299. char buf[32];
  300. struct cftype *cft;
  301. struct hstate *h = &hstates[idx];
  302. /* format the size */
  303. mem_fmt(buf, 32, huge_page_size(h));
  304. /* Add the limit file */
  305. cft = &h->cgroup_files[0];
  306. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
  307. cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
  308. cft->read = hugetlb_cgroup_read;
  309. cft->write_string = hugetlb_cgroup_write;
  310. /* Add the usage file */
  311. cft = &h->cgroup_files[1];
  312. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
  313. cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
  314. cft->read = hugetlb_cgroup_read;
  315. /* Add the MAX usage file */
  316. cft = &h->cgroup_files[2];
  317. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
  318. cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
  319. cft->trigger = hugetlb_cgroup_reset;
  320. cft->read = hugetlb_cgroup_read;
  321. /* Add the failcntfile */
  322. cft = &h->cgroup_files[3];
  323. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
  324. cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
  325. cft->trigger = hugetlb_cgroup_reset;
  326. cft->read = hugetlb_cgroup_read;
  327. /* NULL terminate the last cft */
  328. cft = &h->cgroup_files[4];
  329. memset(cft, 0, sizeof(*cft));
  330. WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files));
  331. return 0;
  332. }
  333. /*
  334. * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
  335. * when we migrate hugepages
  336. */
  337. void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
  338. {
  339. struct hugetlb_cgroup *h_cg;
  340. struct hstate *h = page_hstate(oldhpage);
  341. if (hugetlb_cgroup_disabled())
  342. return;
  343. VM_BUG_ON(!PageHuge(oldhpage));
  344. spin_lock(&hugetlb_lock);
  345. h_cg = hugetlb_cgroup_from_page(oldhpage);
  346. set_hugetlb_cgroup(oldhpage, NULL);
  347. /* move the h_cg details to new cgroup */
  348. set_hugetlb_cgroup(newhpage, h_cg);
  349. list_move(&newhpage->lru, &h->hugepage_activelist);
  350. spin_unlock(&hugetlb_lock);
  351. return;
  352. }
  353. struct cgroup_subsys hugetlb_subsys = {
  354. .name = "hugetlb",
  355. .create = hugetlb_cgroup_create,
  356. .pre_destroy = hugetlb_cgroup_pre_destroy,
  357. .destroy = hugetlb_cgroup_destroy,
  358. .subsys_id = hugetlb_subsys_id,
  359. };