rmap.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. #ifndef _LINUX_RMAP_H
  2. #define _LINUX_RMAP_H
  3. /*
  4. * Declarations for Reverse Mapping functions in mm/rmap.c
  5. */
  6. #include <linux/list.h>
  7. #include <linux/slab.h>
  8. #include <linux/mm.h>
  9. #include <linux/mutex.h>
  10. #include <linux/memcontrol.h>
  11. /*
  12. * The anon_vma heads a list of private "related" vmas, to scan if
  13. * an anonymous page pointing to this anon_vma needs to be unmapped:
  14. * the vmas on the list will be related by forking, or by splitting.
  15. *
  16. * Since vmas come and go as they are split and merged (particularly
  17. * in mprotect), the mapping field of an anonymous page cannot point
  18. * directly to a vma: instead it points to an anon_vma, on whose list
  19. * the related vmas can be easily linked or unlinked.
  20. *
  21. * After unlinking the last vma on the list, we must garbage collect
  22. * the anon_vma object itself: we're guaranteed no page can be
  23. * pointing to this anon_vma once its vma list is empty.
  24. */
  25. struct anon_vma {
  26. struct anon_vma *root; /* Root of this anon_vma tree */
  27. struct mutex mutex; /* Serialize access to vma list */
  28. /*
  29. * The refcount is taken on an anon_vma when there is no
  30. * guarantee that the vma of page tables will exist for
  31. * the duration of the operation. A caller that takes
  32. * the reference is responsible for clearing up the
  33. * anon_vma if they are the last user on release
  34. */
  35. atomic_t refcount;
  36. /*
  37. * NOTE: the LSB of the rb_root.rb_node is set by
  38. * mm_take_all_locks() _after_ taking the above lock. So the
  39. * rb_root must only be read/written after taking the above lock
  40. * to be sure to see a valid next pointer. The LSB bit itself
  41. * is serialized by a system wide lock only visible to
  42. * mm_take_all_locks() (mm_all_locks_mutex).
  43. */
  44. struct rb_root rb_root; /* Interval tree of private "related" vmas */
  45. };
  46. /*
  47. * The copy-on-write semantics of fork mean that an anon_vma
  48. * can become associated with multiple processes. Furthermore,
  49. * each child process will have its own anon_vma, where new
  50. * pages for that process are instantiated.
  51. *
  52. * This structure allows us to find the anon_vmas associated
  53. * with a VMA, or the VMAs associated with an anon_vma.
  54. * The "same_vma" list contains the anon_vma_chains linking
  55. * all the anon_vmas associated with this VMA.
  56. * The "rb" field indexes on an interval tree the anon_vma_chains
  57. * which link all the VMAs associated with this anon_vma.
  58. */
  59. struct anon_vma_chain {
  60. struct vm_area_struct *vma;
  61. struct anon_vma *anon_vma;
  62. struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
  63. struct rb_node rb; /* locked by anon_vma->mutex */
  64. unsigned long rb_subtree_last;
  65. #ifdef CONFIG_DEBUG_VM_RB
  66. unsigned long cached_vma_start, cached_vma_last;
  67. #endif
  68. };
  69. enum ttu_flags {
  70. TTU_UNMAP = 0, /* unmap mode */
  71. TTU_MIGRATION = 1, /* migration mode */
  72. TTU_MUNLOCK = 2, /* munlock mode */
  73. TTU_ACTION_MASK = 0xff,
  74. TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
  75. TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
  76. TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
  77. };
  78. #ifdef CONFIG_MMU
  79. static inline void get_anon_vma(struct anon_vma *anon_vma)
  80. {
  81. atomic_inc(&anon_vma->refcount);
  82. }
  83. void __put_anon_vma(struct anon_vma *anon_vma);
  84. static inline void put_anon_vma(struct anon_vma *anon_vma)
  85. {
  86. if (atomic_dec_and_test(&anon_vma->refcount))
  87. __put_anon_vma(anon_vma);
  88. }
  89. static inline struct anon_vma *page_anon_vma(struct page *page)
  90. {
  91. if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
  92. PAGE_MAPPING_ANON)
  93. return NULL;
  94. return page_rmapping(page);
  95. }
  96. static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
  97. {
  98. struct anon_vma *anon_vma = vma->anon_vma;
  99. if (anon_vma)
  100. mutex_lock(&anon_vma->root->mutex);
  101. }
  102. static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
  103. {
  104. struct anon_vma *anon_vma = vma->anon_vma;
  105. if (anon_vma)
  106. mutex_unlock(&anon_vma->root->mutex);
  107. }
  108. static inline void anon_vma_lock(struct anon_vma *anon_vma)
  109. {
  110. mutex_lock(&anon_vma->root->mutex);
  111. }
  112. static inline void anon_vma_unlock(struct anon_vma *anon_vma)
  113. {
  114. mutex_unlock(&anon_vma->root->mutex);
  115. }
  116. /*
  117. * anon_vma helper functions.
  118. */
  119. void anon_vma_init(void); /* create anon_vma_cachep */
  120. int anon_vma_prepare(struct vm_area_struct *);
  121. void unlink_anon_vmas(struct vm_area_struct *);
  122. int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
  123. int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
  124. static inline void anon_vma_merge(struct vm_area_struct *vma,
  125. struct vm_area_struct *next)
  126. {
  127. VM_BUG_ON(vma->anon_vma != next->anon_vma);
  128. unlink_anon_vmas(next);
  129. }
  130. struct anon_vma *page_get_anon_vma(struct page *page);
  131. /*
  132. * rmap interfaces called when adding or removing pte of page
  133. */
  134. void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  135. void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  136. void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
  137. unsigned long, int);
  138. void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  139. void page_add_file_rmap(struct page *);
  140. void page_remove_rmap(struct page *);
  141. void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
  142. unsigned long);
  143. void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
  144. unsigned long);
  145. static inline void page_dup_rmap(struct page *page)
  146. {
  147. atomic_inc(&page->_mapcount);
  148. }
  149. /*
  150. * Called from mm/vmscan.c to handle paging out
  151. */
  152. int page_referenced(struct page *, int is_locked,
  153. struct mem_cgroup *memcg, unsigned long *vm_flags);
  154. int page_referenced_one(struct page *, struct vm_area_struct *,
  155. unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
  156. #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
  157. int try_to_unmap(struct page *, enum ttu_flags flags);
  158. int try_to_unmap_one(struct page *, struct vm_area_struct *,
  159. unsigned long address, enum ttu_flags flags);
  160. /*
  161. * Called from mm/filemap_xip.c to unmap empty zero page
  162. */
  163. pte_t *__page_check_address(struct page *, struct mm_struct *,
  164. unsigned long, spinlock_t **, int);
  165. static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
  166. unsigned long address,
  167. spinlock_t **ptlp, int sync)
  168. {
  169. pte_t *ptep;
  170. __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
  171. ptlp, sync));
  172. return ptep;
  173. }
  174. /*
  175. * Used by swapoff to help locate where page is expected in vma.
  176. */
  177. unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
  178. /*
  179. * Cleans the PTEs of shared mappings.
  180. * (and since clean PTEs should also be readonly, write protects them too)
  181. *
  182. * returns the number of cleaned PTEs.
  183. */
  184. int page_mkclean(struct page *);
  185. /*
  186. * called in munlock()/munmap() path to check for other vmas holding
  187. * the page mlocked.
  188. */
  189. int try_to_munlock(struct page *);
  190. /*
  191. * Called by memory-failure.c to kill processes.
  192. */
  193. struct anon_vma *page_lock_anon_vma(struct page *page);
  194. void page_unlock_anon_vma(struct anon_vma *anon_vma);
  195. int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
  196. /*
  197. * Called by migrate.c to remove migration ptes, but might be used more later.
  198. */
  199. int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
  200. struct vm_area_struct *, unsigned long, void *), void *arg);
  201. #else /* !CONFIG_MMU */
  202. #define anon_vma_init() do {} while (0)
  203. #define anon_vma_prepare(vma) (0)
  204. #define anon_vma_link(vma) do {} while (0)
  205. static inline int page_referenced(struct page *page, int is_locked,
  206. struct mem_cgroup *memcg,
  207. unsigned long *vm_flags)
  208. {
  209. *vm_flags = 0;
  210. return 0;
  211. }
  212. #define try_to_unmap(page, refs) SWAP_FAIL
  213. static inline int page_mkclean(struct page *page)
  214. {
  215. return 0;
  216. }
  217. #endif /* CONFIG_MMU */
  218. /*
  219. * Return values of try_to_unmap
  220. */
  221. #define SWAP_SUCCESS 0
  222. #define SWAP_AGAIN 1
  223. #define SWAP_FAIL 2
  224. #define SWAP_MLOCK 3
  225. #endif /* _LINUX_RMAP_H */