page_cgroup.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. #ifndef __LINUX_PAGE_CGROUP_H
  2. #define __LINUX_PAGE_CGROUP_H
  3. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  4. #include <linux/bit_spinlock.h>
  5. /*
  6. * Page Cgroup can be considered as an extended mem_map.
  7. * A page_cgroup page is associated with every page descriptor. The
  8. * page_cgroup helps us identify information about the cgroup
  9. * All page cgroups are allocated at boot or memory hotplug event,
  10. * then the page cgroup for pfn always exists.
  11. */
  12. struct page_cgroup {
  13. unsigned long flags;
  14. struct mem_cgroup *mem_cgroup;
  15. struct page *page;
  16. struct list_head lru; /* per cgroup LRU list */
  17. };
  18. void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
  19. #ifdef CONFIG_SPARSEMEM
  20. static inline void __init page_cgroup_init_flatmem(void)
  21. {
  22. }
  23. extern void __init page_cgroup_init(void);
  24. #else
  25. void __init page_cgroup_init_flatmem(void);
  26. static inline void __init page_cgroup_init(void)
  27. {
  28. }
  29. #endif
  30. struct page_cgroup *lookup_page_cgroup(struct page *page);
  31. enum {
  32. /* flags for mem_cgroup */
  33. PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */
  34. PCG_CACHE, /* charged as cache */
  35. PCG_USED, /* this object is in use. */
  36. PCG_MIGRATION, /* under page migration */
  37. /* flags for mem_cgroup and file and I/O status */
  38. PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
  39. PCG_FILE_MAPPED, /* page is accounted as "mapped" */
  40. /* No lock in page_cgroup */
  41. PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
  42. };
  43. #define TESTPCGFLAG(uname, lname) \
  44. static inline int PageCgroup##uname(struct page_cgroup *pc) \
  45. { return test_bit(PCG_##lname, &pc->flags); }
  46. #define SETPCGFLAG(uname, lname) \
  47. static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
  48. { set_bit(PCG_##lname, &pc->flags); }
  49. #define CLEARPCGFLAG(uname, lname) \
  50. static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
  51. { clear_bit(PCG_##lname, &pc->flags); }
  52. #define TESTCLEARPCGFLAG(uname, lname) \
  53. static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
  54. { return test_and_clear_bit(PCG_##lname, &pc->flags); }
  55. /* Cache flag is set only once (at allocation) */
  56. TESTPCGFLAG(Cache, CACHE)
  57. CLEARPCGFLAG(Cache, CACHE)
  58. SETPCGFLAG(Cache, CACHE)
  59. TESTPCGFLAG(Used, USED)
  60. CLEARPCGFLAG(Used, USED)
  61. SETPCGFLAG(Used, USED)
  62. SETPCGFLAG(AcctLRU, ACCT_LRU)
  63. CLEARPCGFLAG(AcctLRU, ACCT_LRU)
  64. TESTPCGFLAG(AcctLRU, ACCT_LRU)
  65. TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
  66. SETPCGFLAG(FileMapped, FILE_MAPPED)
  67. CLEARPCGFLAG(FileMapped, FILE_MAPPED)
  68. TESTPCGFLAG(FileMapped, FILE_MAPPED)
  69. SETPCGFLAG(Migration, MIGRATION)
  70. CLEARPCGFLAG(Migration, MIGRATION)
  71. TESTPCGFLAG(Migration, MIGRATION)
  72. static inline void lock_page_cgroup(struct page_cgroup *pc)
  73. {
  74. /*
  75. * Don't take this lock in IRQ context.
  76. * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
  77. */
  78. bit_spin_lock(PCG_LOCK, &pc->flags);
  79. }
  80. static inline void unlock_page_cgroup(struct page_cgroup *pc)
  81. {
  82. bit_spin_unlock(PCG_LOCK, &pc->flags);
  83. }
  84. static inline int page_is_cgroup_locked(struct page_cgroup *pc)
  85. {
  86. return bit_spin_is_locked(PCG_LOCK, &pc->flags);
  87. }
  88. static inline void move_lock_page_cgroup(struct page_cgroup *pc,
  89. unsigned long *flags)
  90. {
  91. /*
  92. * We know updates to pc->flags of page cache's stats are from both of
  93. * usual context or IRQ context. Disable IRQ to avoid deadlock.
  94. */
  95. local_irq_save(*flags);
  96. bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
  97. }
  98. static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
  99. unsigned long *flags)
  100. {
  101. bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
  102. local_irq_restore(*flags);
  103. }
  104. #else /* CONFIG_CGROUP_MEM_RES_CTLR */
  105. struct page_cgroup;
  106. static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
  107. {
  108. }
  109. static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
  110. {
  111. return NULL;
  112. }
  113. static inline void page_cgroup_init(void)
  114. {
  115. }
  116. static inline void __init page_cgroup_init_flatmem(void)
  117. {
  118. }
  119. #endif
  120. #include <linux/swap.h>
  121. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  122. extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
  123. unsigned short old, unsigned short new);
  124. extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
  125. extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
  126. extern int swap_cgroup_swapon(int type, unsigned long max_pages);
  127. extern void swap_cgroup_swapoff(int type);
  128. #else
  129. static inline
  130. unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
  131. {
  132. return 0;
  133. }
  134. static inline
  135. unsigned short lookup_swap_cgroup(swp_entry_t ent)
  136. {
  137. return 0;
  138. }
  139. static inline int
  140. swap_cgroup_swapon(int type, unsigned long max_pages)
  141. {
  142. return 0;
  143. }
  144. static inline void swap_cgroup_swapoff(int type)
  145. {
  146. return;
  147. }
  148. #endif
  149. #endif