hugetlb_cgroup.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. *
  3. * Copyright IBM Corporation, 2012
  4. * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2.1 of the GNU Lesser General Public License
  8. * as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it would be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. */
  15. #include <linux/cgroup.h>
  16. #include <linux/slab.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/hugetlb_cgroup.h>
  19. struct hugetlb_cgroup {
  20. struct cgroup_subsys_state css;
  21. /*
  22. * the counter to account for hugepages from hugetlb.
  23. */
  24. struct res_counter hugepage[HUGE_MAX_HSTATE];
  25. };
  26. #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
  27. #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
  28. #define MEMFILE_ATTR(val) ((val) & 0xffff)
  29. struct cgroup_subsys hugetlb_subsys __read_mostly;
  30. static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
  31. static inline
  32. struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
  33. {
  34. return container_of(s, struct hugetlb_cgroup, css);
  35. }
  36. static inline
  37. struct hugetlb_cgroup *hugetlb_cgroup_from_cgroup(struct cgroup *cgroup)
  38. {
  39. return hugetlb_cgroup_from_css(cgroup_subsys_state(cgroup,
  40. hugetlb_subsys_id));
  41. }
  42. static inline
  43. struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
  44. {
  45. return hugetlb_cgroup_from_css(task_subsys_state(task,
  46. hugetlb_subsys_id));
  47. }
  48. static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
  49. {
  50. return (h_cg == root_h_cgroup);
  51. }
  52. static inline struct hugetlb_cgroup *parent_hugetlb_cgroup(struct cgroup *cg)
  53. {
  54. if (!cg->parent)
  55. return NULL;
  56. return hugetlb_cgroup_from_cgroup(cg->parent);
  57. }
  58. static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
  59. {
  60. int idx;
  61. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cg);
  62. for (idx = 0; idx < hugetlb_max_hstate; idx++) {
  63. if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
  64. return true;
  65. }
  66. return false;
  67. }
  68. static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgroup)
  69. {
  70. int idx;
  71. struct cgroup *parent_cgroup;
  72. struct hugetlb_cgroup *h_cgroup, *parent_h_cgroup;
  73. h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
  74. if (!h_cgroup)
  75. return ERR_PTR(-ENOMEM);
  76. parent_cgroup = cgroup->parent;
  77. if (parent_cgroup) {
  78. parent_h_cgroup = hugetlb_cgroup_from_cgroup(parent_cgroup);
  79. for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
  80. res_counter_init(&h_cgroup->hugepage[idx],
  81. &parent_h_cgroup->hugepage[idx]);
  82. } else {
  83. root_h_cgroup = h_cgroup;
  84. for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
  85. res_counter_init(&h_cgroup->hugepage[idx], NULL);
  86. }
  87. return &h_cgroup->css;
  88. }
  89. static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
  90. {
  91. struct hugetlb_cgroup *h_cgroup;
  92. h_cgroup = hugetlb_cgroup_from_cgroup(cgroup);
  93. kfree(h_cgroup);
  94. }
  95. /*
  96. * Should be called with hugetlb_lock held.
  97. * Since we are holding hugetlb_lock, pages cannot get moved from
  98. * active list or uncharged from the cgroup, So no need to get
  99. * page reference and test for page active here. This function
  100. * cannot fail.
  101. */
  102. static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
  103. struct page *page)
  104. {
  105. int csize;
  106. struct res_counter *counter;
  107. struct res_counter *fail_res;
  108. struct hugetlb_cgroup *page_hcg;
  109. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
  110. struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);
  111. page_hcg = hugetlb_cgroup_from_page(page);
  112. /*
  113. * We can have pages in active list without any cgroup
  114. * ie, hugepage with less than 3 pages. We can safely
  115. * ignore those pages.
  116. */
  117. if (!page_hcg || page_hcg != h_cg)
  118. goto out;
  119. csize = PAGE_SIZE << compound_order(page);
  120. if (!parent) {
  121. parent = root_h_cgroup;
  122. /* root has no limit */
  123. res_counter_charge_nofail(&parent->hugepage[idx],
  124. csize, &fail_res);
  125. }
  126. counter = &h_cg->hugepage[idx];
  127. res_counter_uncharge_until(counter, counter->parent, csize);
  128. set_hugetlb_cgroup(page, parent);
  129. out:
  130. return;
  131. }
  132. /*
  133. * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
  134. * the parent cgroup.
  135. */
  136. static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
  137. {
  138. struct hstate *h;
  139. struct page *page;
  140. int idx = 0;
  141. do {
  142. for_each_hstate(h) {
  143. spin_lock(&hugetlb_lock);
  144. list_for_each_entry(page, &h->hugepage_activelist, lru)
  145. hugetlb_cgroup_move_parent(idx, cgroup, page);
  146. spin_unlock(&hugetlb_lock);
  147. idx++;
  148. }
  149. cond_resched();
  150. } while (hugetlb_cgroup_have_usage(cgroup));
  151. }
  152. int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
  153. struct hugetlb_cgroup **ptr)
  154. {
  155. int ret = 0;
  156. struct res_counter *fail_res;
  157. struct hugetlb_cgroup *h_cg = NULL;
  158. unsigned long csize = nr_pages * PAGE_SIZE;
  159. if (hugetlb_cgroup_disabled())
  160. goto done;
  161. /*
  162. * We don't charge any cgroup if the compound page have less
  163. * than 3 pages.
  164. */
  165. if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  166. goto done;
  167. again:
  168. rcu_read_lock();
  169. h_cg = hugetlb_cgroup_from_task(current);
  170. if (!css_tryget(&h_cg->css)) {
  171. rcu_read_unlock();
  172. goto again;
  173. }
  174. rcu_read_unlock();
  175. ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
  176. css_put(&h_cg->css);
  177. done:
  178. *ptr = h_cg;
  179. return ret;
  180. }
  181. /* Should be called with hugetlb_lock held */
  182. void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
  183. struct hugetlb_cgroup *h_cg,
  184. struct page *page)
  185. {
  186. if (hugetlb_cgroup_disabled() || !h_cg)
  187. return;
  188. set_hugetlb_cgroup(page, h_cg);
  189. return;
  190. }
  191. /*
  192. * Should be called with hugetlb_lock held
  193. */
  194. void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
  195. struct page *page)
  196. {
  197. struct hugetlb_cgroup *h_cg;
  198. unsigned long csize = nr_pages * PAGE_SIZE;
  199. if (hugetlb_cgroup_disabled())
  200. return;
  201. VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
  202. h_cg = hugetlb_cgroup_from_page(page);
  203. if (unlikely(!h_cg))
  204. return;
  205. set_hugetlb_cgroup(page, NULL);
  206. res_counter_uncharge(&h_cg->hugepage[idx], csize);
  207. return;
  208. }
  209. void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
  210. struct hugetlb_cgroup *h_cg)
  211. {
  212. unsigned long csize = nr_pages * PAGE_SIZE;
  213. if (hugetlb_cgroup_disabled() || !h_cg)
  214. return;
  215. if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  216. return;
  217. res_counter_uncharge(&h_cg->hugepage[idx], csize);
  218. return;
  219. }
  220. static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
  221. struct file *file, char __user *buf,
  222. size_t nbytes, loff_t *ppos)
  223. {
  224. u64 val;
  225. char str[64];
  226. int idx, name, len;
  227. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
  228. idx = MEMFILE_IDX(cft->private);
  229. name = MEMFILE_ATTR(cft->private);
  230. val = res_counter_read_u64(&h_cg->hugepage[idx], name);
  231. len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
  232. return simple_read_from_buffer(buf, nbytes, ppos, str, len);
  233. }
  234. static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
  235. const char *buffer)
  236. {
  237. int idx, name, ret;
  238. unsigned long long val;
  239. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
  240. idx = MEMFILE_IDX(cft->private);
  241. name = MEMFILE_ATTR(cft->private);
  242. switch (name) {
  243. case RES_LIMIT:
  244. if (hugetlb_cgroup_is_root(h_cg)) {
  245. /* Can't set limit on root */
  246. ret = -EINVAL;
  247. break;
  248. }
  249. /* This function does all necessary parse...reuse it */
  250. ret = res_counter_memparse_write_strategy(buffer, &val);
  251. if (ret)
  252. break;
  253. ret = res_counter_set_limit(&h_cg->hugepage[idx], val);
  254. break;
  255. default:
  256. ret = -EINVAL;
  257. break;
  258. }
  259. return ret;
  260. }
  261. static int hugetlb_cgroup_reset(struct cgroup *cgroup, unsigned int event)
  262. {
  263. int idx, name, ret = 0;
  264. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
  265. idx = MEMFILE_IDX(event);
  266. name = MEMFILE_ATTR(event);
  267. switch (name) {
  268. case RES_MAX_USAGE:
  269. res_counter_reset_max(&h_cg->hugepage[idx]);
  270. break;
  271. case RES_FAILCNT:
  272. res_counter_reset_failcnt(&h_cg->hugepage[idx]);
  273. break;
  274. default:
  275. ret = -EINVAL;
  276. break;
  277. }
  278. return ret;
  279. }
  280. static char *mem_fmt(char *buf, int size, unsigned long hsize)
  281. {
  282. if (hsize >= (1UL << 30))
  283. snprintf(buf, size, "%luGB", hsize >> 30);
  284. else if (hsize >= (1UL << 20))
  285. snprintf(buf, size, "%luMB", hsize >> 20);
  286. else
  287. snprintf(buf, size, "%luKB", hsize >> 10);
  288. return buf;
  289. }
  290. int __init hugetlb_cgroup_file_init(int idx)
  291. {
  292. char buf[32];
  293. struct cftype *cft;
  294. struct hstate *h = &hstates[idx];
  295. /* format the size */
  296. mem_fmt(buf, 32, huge_page_size(h));
  297. /* Add the limit file */
  298. cft = &h->cgroup_files[0];
  299. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
  300. cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
  301. cft->read = hugetlb_cgroup_read;
  302. cft->write_string = hugetlb_cgroup_write;
  303. /* Add the usage file */
  304. cft = &h->cgroup_files[1];
  305. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
  306. cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
  307. cft->read = hugetlb_cgroup_read;
  308. /* Add the MAX usage file */
  309. cft = &h->cgroup_files[2];
  310. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
  311. cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
  312. cft->trigger = hugetlb_cgroup_reset;
  313. cft->read = hugetlb_cgroup_read;
  314. /* Add the failcntfile */
  315. cft = &h->cgroup_files[3];
  316. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
  317. cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
  318. cft->trigger = hugetlb_cgroup_reset;
  319. cft->read = hugetlb_cgroup_read;
  320. /* NULL terminate the last cft */
  321. cft = &h->cgroup_files[4];
  322. memset(cft, 0, sizeof(*cft));
  323. WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files));
  324. return 0;
  325. }
  326. /*
  327. * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
  328. * when we migrate hugepages
  329. */
  330. void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
  331. {
  332. struct hugetlb_cgroup *h_cg;
  333. struct hstate *h = page_hstate(oldhpage);
  334. if (hugetlb_cgroup_disabled())
  335. return;
  336. VM_BUG_ON(!PageHuge(oldhpage));
  337. spin_lock(&hugetlb_lock);
  338. h_cg = hugetlb_cgroup_from_page(oldhpage);
  339. set_hugetlb_cgroup(oldhpage, NULL);
  340. /* move the h_cg details to new cgroup */
  341. set_hugetlb_cgroup(newhpage, h_cg);
  342. list_move(&newhpage->lru, &h->hugepage_activelist);
  343. spin_unlock(&hugetlb_lock);
  344. return;
  345. }
  346. struct cgroup_subsys hugetlb_subsys = {
  347. .name = "hugetlb",
  348. .css_alloc = hugetlb_cgroup_css_alloc,
  349. .css_offline = hugetlb_cgroup_css_offline,
  350. .css_free = hugetlb_cgroup_css_free,
  351. .subsys_id = hugetlb_subsys_id,
  352. };