memcontrol.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. /* memcontrol.h - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #ifndef _LINUX_MEMCONTROL_H
  20. #define _LINUX_MEMCONTROL_H
  21. #include <linux/cgroup.h>
  22. #include <linux/vm_event_item.h>
  23. struct mem_cgroup;
  24. struct page_cgroup;
  25. struct page;
  26. struct mm_struct;
  27. /* Stats that can be updated by kernel. */
  28. enum mem_cgroup_page_stat_item {
  29. MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
  30. };
  31. struct mem_cgroup_reclaim_cookie {
  32. struct zone *zone;
  33. int priority;
  34. unsigned int generation;
  35. };
  36. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  37. /*
  38. * All "charge" functions with gfp_mask should use GFP_KERNEL or
  39. * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
  40. * alloc memory but reclaims memory from all available zones. So, "where I want
  41. * memory from" bits of gfp_mask has no meaning. So any bits of that field is
  42. * available but adding a rule is better. charge functions' gfp_mask should
  43. * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
  44. * codes.
  45. * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
  46. */
  47. extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
  48. gfp_t gfp_mask);
  49. /* for swap handling */
  50. extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  51. struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
  52. extern void mem_cgroup_commit_charge_swapin(struct page *page,
  53. struct mem_cgroup *memcg);
  54. extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
  55. extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  56. gfp_t gfp_mask);
  57. struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
  58. struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
  59. enum lru_list);
  60. void mem_cgroup_lru_del_list(struct page *, enum lru_list);
  61. void mem_cgroup_lru_del(struct page *);
  62. struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
  63. enum lru_list, enum lru_list);
  64. /* For coalescing uncharge for reducing memcg' overhead*/
  65. extern void mem_cgroup_uncharge_start(void);
  66. extern void mem_cgroup_uncharge_end(void);
  67. extern void mem_cgroup_uncharge_page(struct page *page);
  68. extern void mem_cgroup_uncharge_cache_page(struct page *page);
  69. extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
  70. int order);
  71. bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
  72. struct mem_cgroup *memcg);
  73. int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
  74. extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
  75. extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
  76. extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
  77. extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
  78. extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
  79. static inline
  80. int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
  81. {
  82. struct mem_cgroup *memcg;
  83. int match;
  84. rcu_read_lock();
  85. memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
  86. match = __mem_cgroup_same_or_subtree(cgroup, memcg);
  87. rcu_read_unlock();
  88. return match;
  89. }
  90. extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
  91. extern int
  92. mem_cgroup_prepare_migration(struct page *page,
  93. struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
  94. extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
  95. struct page *oldpage, struct page *newpage, bool migration_ok);
  96. struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
  97. struct mem_cgroup *,
  98. struct mem_cgroup_reclaim_cookie *);
  99. void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
  100. /*
  101. * For memory reclaim.
  102. */
  103. int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
  104. struct zone *zone);
  105. int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
  106. struct zone *zone);
  107. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
  108. unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
  109. int nid, int zid, unsigned int lrumask);
  110. struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
  111. struct zone *zone);
  112. struct zone_reclaim_stat*
  113. mem_cgroup_get_reclaim_stat_from_page(struct page *page);
  114. extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  115. struct task_struct *p);
  116. extern void mem_cgroup_replace_page_cache(struct page *oldpage,
  117. struct page *newpage);
  118. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  119. extern int do_swap_account;
  120. #endif
  121. static inline bool mem_cgroup_disabled(void)
  122. {
  123. if (mem_cgroup_subsys.disabled)
  124. return true;
  125. return false;
  126. }
  127. void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
  128. unsigned long *flags);
  129. extern atomic_t memcg_moving;
  130. static inline void mem_cgroup_begin_update_page_stat(struct page *page,
  131. bool *locked, unsigned long *flags)
  132. {
  133. if (mem_cgroup_disabled())
  134. return;
  135. rcu_read_lock();
  136. *locked = false;
  137. if (atomic_read(&memcg_moving))
  138. __mem_cgroup_begin_update_page_stat(page, locked, flags);
  139. }
  140. void __mem_cgroup_end_update_page_stat(struct page *page,
  141. unsigned long *flags);
  142. static inline void mem_cgroup_end_update_page_stat(struct page *page,
  143. bool *locked, unsigned long *flags)
  144. {
  145. if (mem_cgroup_disabled())
  146. return;
  147. if (*locked)
  148. __mem_cgroup_end_update_page_stat(page, flags);
  149. rcu_read_unlock();
  150. }
  151. void mem_cgroup_update_page_stat(struct page *page,
  152. enum mem_cgroup_page_stat_item idx,
  153. int val);
  154. static inline void mem_cgroup_inc_page_stat(struct page *page,
  155. enum mem_cgroup_page_stat_item idx)
  156. {
  157. mem_cgroup_update_page_stat(page, idx, 1);
  158. }
  159. static inline void mem_cgroup_dec_page_stat(struct page *page,
  160. enum mem_cgroup_page_stat_item idx)
  161. {
  162. mem_cgroup_update_page_stat(page, idx, -1);
  163. }
  164. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  165. gfp_t gfp_mask,
  166. unsigned long *total_scanned);
  167. u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
  168. void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
  169. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  170. void mem_cgroup_split_huge_fixup(struct page *head);
  171. #endif
  172. #ifdef CONFIG_DEBUG_VM
  173. bool mem_cgroup_bad_page_check(struct page *page);
  174. void mem_cgroup_print_bad_page(struct page *page);
  175. #endif
  176. #else /* CONFIG_CGROUP_MEM_RES_CTLR */
  177. struct mem_cgroup;
  178. static inline int mem_cgroup_newpage_charge(struct page *page,
  179. struct mm_struct *mm, gfp_t gfp_mask)
  180. {
  181. return 0;
  182. }
  183. static inline int mem_cgroup_cache_charge(struct page *page,
  184. struct mm_struct *mm, gfp_t gfp_mask)
  185. {
  186. return 0;
  187. }
  188. static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  189. struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
  190. {
  191. return 0;
  192. }
  193. static inline void mem_cgroup_commit_charge_swapin(struct page *page,
  194. struct mem_cgroup *memcg)
  195. {
  196. }
  197. static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
  198. {
  199. }
  200. static inline void mem_cgroup_uncharge_start(void)
  201. {
  202. }
  203. static inline void mem_cgroup_uncharge_end(void)
  204. {
  205. }
  206. static inline void mem_cgroup_uncharge_page(struct page *page)
  207. {
  208. }
  209. static inline void mem_cgroup_uncharge_cache_page(struct page *page)
  210. {
  211. }
  212. static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
  213. struct mem_cgroup *memcg)
  214. {
  215. return &zone->lruvec;
  216. }
  217. static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
  218. struct page *page,
  219. enum lru_list lru)
  220. {
  221. return &zone->lruvec;
  222. }
  223. static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
  224. {
  225. }
  226. static inline void mem_cgroup_lru_del(struct page *page)
  227. {
  228. }
  229. static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
  230. struct page *page,
  231. enum lru_list from,
  232. enum lru_list to)
  233. {
  234. return &zone->lruvec;
  235. }
  236. static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  237. {
  238. return NULL;
  239. }
  240. static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
  241. {
  242. return NULL;
  243. }
  244. static inline int mm_match_cgroup(struct mm_struct *mm,
  245. struct mem_cgroup *memcg)
  246. {
  247. return 1;
  248. }
  249. static inline int task_in_mem_cgroup(struct task_struct *task,
  250. const struct mem_cgroup *memcg)
  251. {
  252. return 1;
  253. }
  254. static inline struct cgroup_subsys_state
  255. *mem_cgroup_css(struct mem_cgroup *memcg)
  256. {
  257. return NULL;
  258. }
  259. static inline int
  260. mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
  261. struct mem_cgroup **memcgp, gfp_t gfp_mask)
  262. {
  263. return 0;
  264. }
  265. static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
  266. struct page *oldpage, struct page *newpage, bool migration_ok)
  267. {
  268. }
  269. static inline struct mem_cgroup *
  270. mem_cgroup_iter(struct mem_cgroup *root,
  271. struct mem_cgroup *prev,
  272. struct mem_cgroup_reclaim_cookie *reclaim)
  273. {
  274. return NULL;
  275. }
  276. static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
  277. struct mem_cgroup *prev)
  278. {
  279. }
  280. static inline bool mem_cgroup_disabled(void)
  281. {
  282. return true;
  283. }
  284. static inline int
  285. mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
  286. {
  287. return 1;
  288. }
  289. static inline int
  290. mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
  291. {
  292. return 1;
  293. }
  294. static inline unsigned long
  295. mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
  296. unsigned int lru_mask)
  297. {
  298. return 0;
  299. }
  300. static inline struct zone_reclaim_stat*
  301. mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
  302. {
  303. return NULL;
  304. }
  305. static inline struct zone_reclaim_stat*
  306. mem_cgroup_get_reclaim_stat_from_page(struct page *page)
  307. {
  308. return NULL;
  309. }
  310. static inline void
  311. mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  312. {
  313. }
  314. static inline void mem_cgroup_begin_update_page_stat(struct page *page,
  315. bool *locked, unsigned long *flags)
  316. {
  317. }
  318. static inline void mem_cgroup_end_update_page_stat(struct page *page,
  319. bool *locked, unsigned long *flags)
  320. {
  321. }
  322. static inline void mem_cgroup_inc_page_stat(struct page *page,
  323. enum mem_cgroup_page_stat_item idx)
  324. {
  325. }
  326. static inline void mem_cgroup_dec_page_stat(struct page *page,
  327. enum mem_cgroup_page_stat_item idx)
  328. {
  329. }
  330. static inline
  331. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  332. gfp_t gfp_mask,
  333. unsigned long *total_scanned)
  334. {
  335. return 0;
  336. }
  337. static inline
  338. u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
  339. {
  340. return 0;
  341. }
  342. static inline void mem_cgroup_split_huge_fixup(struct page *head)
  343. {
  344. }
  345. static inline
  346. void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  347. {
  348. }
  349. static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
  350. struct page *newpage)
  351. {
  352. }
  353. #endif /* CONFIG_CGROUP_MEM_RES_CTLR */
  354. #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
  355. static inline bool
  356. mem_cgroup_bad_page_check(struct page *page)
  357. {
  358. return false;
  359. }
  360. static inline void
  361. mem_cgroup_print_bad_page(struct page *page)
  362. {
  363. }
  364. #endif
  365. enum {
  366. UNDER_LIMIT,
  367. SOFT_LIMIT,
  368. OVER_LIMIT,
  369. };
  370. struct sock;
  371. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
  372. void sock_update_memcg(struct sock *sk);
  373. void sock_release_memcg(struct sock *sk);
  374. #else
  375. static inline void sock_update_memcg(struct sock *sk)
  376. {
  377. }
  378. static inline void sock_release_memcg(struct sock *sk)
  379. {
  380. }
  381. #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
  382. #endif /* _LINUX_MEMCONTROL_H */