memcontrol.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. /* memcontrol.h - Memory Controller
  2. *
  3. * Copyright IBM Corporation, 2007
  4. * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5. *
  6. * Copyright 2007 OpenVZ SWsoft Inc
  7. * Author: Pavel Emelianov <xemul@openvz.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #ifndef _LINUX_MEMCONTROL_H
  20. #define _LINUX_MEMCONTROL_H
  21. #include <linux/cgroup.h>
  22. #include <linux/vm_event_item.h>
  23. struct mem_cgroup;
  24. struct page_cgroup;
  25. struct page;
  26. struct mm_struct;
  27. /* Stats that can be updated by kernel. */
  28. enum mem_cgroup_page_stat_item {
  29. MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
  30. };
  31. extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  32. struct list_head *dst,
  33. unsigned long *scanned, int order,
  34. int mode, struct zone *z,
  35. struct mem_cgroup *mem_cont,
  36. int active, int file);
  37. struct memcg_scanrecord {
  38. struct mem_cgroup *mem; /* scanend memory cgroup */
  39. struct mem_cgroup *root; /* scan target hierarchy root */
  40. int context; /* scanning context (see memcontrol.c) */
  41. unsigned long nr_scanned[2]; /* the number of scanned pages */
  42. unsigned long nr_rotated[2]; /* the number of rotated pages */
  43. unsigned long nr_freed[2]; /* the number of freed pages */
  44. unsigned long elapsed; /* nsec of time elapsed while scanning */
  45. };
  46. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  47. /*
  48. * All "charge" functions with gfp_mask should use GFP_KERNEL or
  49. * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
  50. * alloc memory but reclaims memory from all available zones. So, "where I want
  51. * memory from" bits of gfp_mask has no meaning. So any bits of that field is
  52. * available but adding a rule is better. charge functions' gfp_mask should
  53. * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
  54. * codes.
  55. * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
  56. */
  57. extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
  58. gfp_t gfp_mask);
  59. /* for swap handling */
  60. extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  61. struct page *page, gfp_t mask, struct mem_cgroup **ptr);
  62. extern void mem_cgroup_commit_charge_swapin(struct page *page,
  63. struct mem_cgroup *ptr);
  64. extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
  65. extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
  66. gfp_t gfp_mask);
  67. extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
  68. extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
  69. extern void mem_cgroup_rotate_reclaimable_page(struct page *page);
  70. extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
  71. extern void mem_cgroup_del_lru(struct page *page);
  72. extern void mem_cgroup_move_lists(struct page *page,
  73. enum lru_list from, enum lru_list to);
  74. /* For coalescing uncharge for reducing memcg' overhead*/
  75. extern void mem_cgroup_uncharge_start(void);
  76. extern void mem_cgroup_uncharge_end(void);
  77. extern void mem_cgroup_uncharge_page(struct page *page);
  78. extern void mem_cgroup_uncharge_cache_page(struct page *page);
  79. extern int mem_cgroup_shmem_charge_fallback(struct page *page,
  80. struct mm_struct *mm, gfp_t gfp_mask);
  81. extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
  82. int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
  83. extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
  84. extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
  85. extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
  86. static inline
  87. int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
  88. {
  89. struct mem_cgroup *mem;
  90. rcu_read_lock();
  91. mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
  92. rcu_read_unlock();
  93. return cgroup == mem;
  94. }
  95. extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
  96. extern int
  97. mem_cgroup_prepare_migration(struct page *page,
  98. struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
  99. extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
  100. struct page *oldpage, struct page *newpage, bool migration_ok);
  101. /*
  102. * For memory reclaim.
  103. */
  104. int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
  105. int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
  106. int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
  107. unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
  108. int nid, int zid, unsigned int lrumask);
  109. struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
  110. struct zone *zone);
  111. struct zone_reclaim_stat*
  112. mem_cgroup_get_reclaim_stat_from_page(struct page *page);
  113. extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
  114. struct task_struct *p);
  115. extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
  116. gfp_t gfp_mask, bool noswap,
  117. struct memcg_scanrecord *rec);
  118. extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
  119. gfp_t gfp_mask, bool noswap,
  120. struct zone *zone,
  121. struct memcg_scanrecord *rec,
  122. unsigned long *nr_scanned);
  123. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  124. extern int do_swap_account;
  125. #endif
  126. static inline bool mem_cgroup_disabled(void)
  127. {
  128. if (mem_cgroup_subsys.disabled)
  129. return true;
  130. return false;
  131. }
  132. void mem_cgroup_update_page_stat(struct page *page,
  133. enum mem_cgroup_page_stat_item idx,
  134. int val);
  135. static inline void mem_cgroup_inc_page_stat(struct page *page,
  136. enum mem_cgroup_page_stat_item idx)
  137. {
  138. mem_cgroup_update_page_stat(page, idx, 1);
  139. }
  140. static inline void mem_cgroup_dec_page_stat(struct page *page,
  141. enum mem_cgroup_page_stat_item idx)
  142. {
  143. mem_cgroup_update_page_stat(page, idx, -1);
  144. }
  145. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  146. gfp_t gfp_mask,
  147. unsigned long *total_scanned);
  148. u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
  149. void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
  150. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  151. void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
  152. #endif
  153. #ifdef CONFIG_DEBUG_VM
  154. bool mem_cgroup_bad_page_check(struct page *page);
  155. void mem_cgroup_print_bad_page(struct page *page);
  156. #endif
  157. #else /* CONFIG_CGROUP_MEM_RES_CTLR */
  158. struct mem_cgroup;
  159. static inline int mem_cgroup_newpage_charge(struct page *page,
  160. struct mm_struct *mm, gfp_t gfp_mask)
  161. {
  162. return 0;
  163. }
  164. static inline int mem_cgroup_cache_charge(struct page *page,
  165. struct mm_struct *mm, gfp_t gfp_mask)
  166. {
  167. return 0;
  168. }
  169. static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
  170. struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
  171. {
  172. return 0;
  173. }
  174. static inline void mem_cgroup_commit_charge_swapin(struct page *page,
  175. struct mem_cgroup *ptr)
  176. {
  177. }
  178. static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
  179. {
  180. }
  181. static inline void mem_cgroup_uncharge_start(void)
  182. {
  183. }
  184. static inline void mem_cgroup_uncharge_end(void)
  185. {
  186. }
  187. static inline void mem_cgroup_uncharge_page(struct page *page)
  188. {
  189. }
  190. static inline void mem_cgroup_uncharge_cache_page(struct page *page)
  191. {
  192. }
  193. static inline int mem_cgroup_shmem_charge_fallback(struct page *page,
  194. struct mm_struct *mm, gfp_t gfp_mask)
  195. {
  196. return 0;
  197. }
  198. static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
  199. {
  200. }
  201. static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
  202. {
  203. return ;
  204. }
  205. static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
  206. {
  207. return ;
  208. }
  209. static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
  210. {
  211. return ;
  212. }
  213. static inline void mem_cgroup_del_lru(struct page *page)
  214. {
  215. return ;
  216. }
  217. static inline void
  218. mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
  219. {
  220. }
  221. static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
  222. {
  223. return NULL;
  224. }
  225. static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
  226. {
  227. return NULL;
  228. }
  229. static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
  230. {
  231. return 1;
  232. }
  233. static inline int task_in_mem_cgroup(struct task_struct *task,
  234. const struct mem_cgroup *mem)
  235. {
  236. return 1;
  237. }
  238. static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
  239. {
  240. return NULL;
  241. }
  242. static inline int
  243. mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
  244. struct mem_cgroup **ptr, gfp_t gfp_mask)
  245. {
  246. return 0;
  247. }
  248. static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
  249. struct page *oldpage, struct page *newpage, bool migration_ok)
  250. {
  251. }
  252. static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
  253. {
  254. return 0;
  255. }
  256. static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
  257. int priority)
  258. {
  259. }
  260. static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
  261. int priority)
  262. {
  263. }
  264. static inline bool mem_cgroup_disabled(void)
  265. {
  266. return true;
  267. }
  268. static inline int
  269. mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
  270. {
  271. return 1;
  272. }
  273. static inline int
  274. mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
  275. {
  276. return 1;
  277. }
  278. static inline unsigned long
  279. mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
  280. unsigned int lru_mask)
  281. {
  282. return 0;
  283. }
  284. static inline struct zone_reclaim_stat*
  285. mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
  286. {
  287. return NULL;
  288. }
  289. static inline struct zone_reclaim_stat*
  290. mem_cgroup_get_reclaim_stat_from_page(struct page *page)
  291. {
  292. return NULL;
  293. }
  294. static inline void
  295. mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
  296. {
  297. }
  298. static inline void mem_cgroup_inc_page_stat(struct page *page,
  299. enum mem_cgroup_page_stat_item idx)
  300. {
  301. }
  302. static inline void mem_cgroup_dec_page_stat(struct page *page,
  303. enum mem_cgroup_page_stat_item idx)
  304. {
  305. }
  306. static inline
  307. unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
  308. gfp_t gfp_mask,
  309. unsigned long *total_scanned)
  310. {
  311. return 0;
  312. }
  313. static inline
  314. u64 mem_cgroup_get_limit(struct mem_cgroup *mem)
  315. {
  316. return 0;
  317. }
  318. static inline void mem_cgroup_split_huge_fixup(struct page *head,
  319. struct page *tail)
  320. {
  321. }
  322. static inline
  323. void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
  324. {
  325. }
  326. #endif /* CONFIG_CGROUP_MEM_CONT */
  327. #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
  328. static inline bool
  329. mem_cgroup_bad_page_check(struct page *page)
  330. {
  331. return false;
  332. }
  333. static inline void
  334. mem_cgroup_print_bad_page(struct page *page)
  335. {
  336. }
  337. #endif
  338. #endif /* _LINUX_MEMCONTROL_H */