rmap.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. #ifndef _LINUX_RMAP_H
  2. #define _LINUX_RMAP_H
  3. /*
  4. * Declarations for Reverse Mapping functions in mm/rmap.c
  5. */
  6. #include <linux/list.h>
  7. #include <linux/slab.h>
  8. #include <linux/mm.h>
  9. #include <linux/rwsem.h>
  10. #include <linux/memcontrol.h>
  11. /*
  12. * The anon_vma heads a list of private "related" vmas, to scan if
  13. * an anonymous page pointing to this anon_vma needs to be unmapped:
  14. * the vmas on the list will be related by forking, or by splitting.
  15. *
  16. * Since vmas come and go as they are split and merged (particularly
  17. * in mprotect), the mapping field of an anonymous page cannot point
  18. * directly to a vma: instead it points to an anon_vma, on whose list
  19. * the related vmas can be easily linked or unlinked.
  20. *
  21. * After unlinking the last vma on the list, we must garbage collect
  22. * the anon_vma object itself: we're guaranteed no page can be
  23. * pointing to this anon_vma once its vma list is empty.
  24. */
  25. struct anon_vma {
  26. struct anon_vma *root; /* Root of this anon_vma tree */
  27. struct rw_semaphore rwsem; /* W: modification, R: walking the list */
  28. /*
  29. * The refcount is taken on an anon_vma when there is no
  30. * guarantee that the vma of page tables will exist for
  31. * the duration of the operation. A caller that takes
  32. * the reference is responsible for clearing up the
  33. * anon_vma if they are the last user on release
  34. */
  35. atomic_t refcount;
  36. /*
  37. * NOTE: the LSB of the rb_root.rb_node is set by
  38. * mm_take_all_locks() _after_ taking the above lock. So the
  39. * rb_root must only be read/written after taking the above lock
  40. * to be sure to see a valid next pointer. The LSB bit itself
  41. * is serialized by a system wide lock only visible to
  42. * mm_take_all_locks() (mm_all_locks_mutex).
  43. */
  44. struct rb_root rb_root; /* Interval tree of private "related" vmas */
  45. };
  46. /*
  47. * The copy-on-write semantics of fork mean that an anon_vma
  48. * can become associated with multiple processes. Furthermore,
  49. * each child process will have its own anon_vma, where new
  50. * pages for that process are instantiated.
  51. *
  52. * This structure allows us to find the anon_vmas associated
  53. * with a VMA, or the VMAs associated with an anon_vma.
  54. * The "same_vma" list contains the anon_vma_chains linking
  55. * all the anon_vmas associated with this VMA.
  56. * The "rb" field indexes on an interval tree the anon_vma_chains
  57. * which link all the VMAs associated with this anon_vma.
  58. */
  59. struct anon_vma_chain {
  60. struct vm_area_struct *vma;
  61. struct anon_vma *anon_vma;
  62. struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
  63. struct rb_node rb; /* locked by anon_vma->rwsem */
  64. unsigned long rb_subtree_last;
  65. #ifdef CONFIG_DEBUG_VM_RB
  66. unsigned long cached_vma_start, cached_vma_last;
  67. #endif
  68. };
  69. enum ttu_flags {
  70. TTU_UNMAP = 0, /* unmap mode */
  71. TTU_MIGRATION = 1, /* migration mode */
  72. TTU_MUNLOCK = 2, /* munlock mode */
  73. TTU_ACTION_MASK = 0xff,
  74. TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
  75. TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
  76. TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
  77. };
  78. #ifdef CONFIG_MMU
  79. static inline void get_anon_vma(struct anon_vma *anon_vma)
  80. {
  81. atomic_inc(&anon_vma->refcount);
  82. }
  83. void __put_anon_vma(struct anon_vma *anon_vma);
  84. static inline void put_anon_vma(struct anon_vma *anon_vma)
  85. {
  86. if (atomic_dec_and_test(&anon_vma->refcount))
  87. __put_anon_vma(anon_vma);
  88. }
  89. static inline struct anon_vma *page_anon_vma(struct page *page)
  90. {
  91. if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
  92. PAGE_MAPPING_ANON)
  93. return NULL;
  94. return page_rmapping(page);
  95. }
  96. static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
  97. {
  98. struct anon_vma *anon_vma = vma->anon_vma;
  99. if (anon_vma)
  100. down_write(&anon_vma->root->rwsem);
  101. }
  102. static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
  103. {
  104. struct anon_vma *anon_vma = vma->anon_vma;
  105. if (anon_vma)
  106. up_write(&anon_vma->root->rwsem);
  107. }
  108. static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
  109. {
  110. down_write(&anon_vma->root->rwsem);
  111. }
  112. static inline void anon_vma_unlock(struct anon_vma *anon_vma)
  113. {
  114. up_write(&anon_vma->root->rwsem);
  115. }
  116. static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
  117. {
  118. down_read(&anon_vma->root->rwsem);
  119. }
  120. static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
  121. {
  122. up_read(&anon_vma->root->rwsem);
  123. }
  124. /*
  125. * anon_vma helper functions.
  126. */
  127. void anon_vma_init(void); /* create anon_vma_cachep */
  128. int anon_vma_prepare(struct vm_area_struct *);
  129. void unlink_anon_vmas(struct vm_area_struct *);
  130. int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
  131. int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
  132. static inline void anon_vma_merge(struct vm_area_struct *vma,
  133. struct vm_area_struct *next)
  134. {
  135. VM_BUG_ON(vma->anon_vma != next->anon_vma);
  136. unlink_anon_vmas(next);
  137. }
  138. struct anon_vma *page_get_anon_vma(struct page *page);
  139. /*
  140. * rmap interfaces called when adding or removing pte of page
  141. */
  142. void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  143. void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  144. void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
  145. unsigned long, int);
  146. void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  147. void page_add_file_rmap(struct page *);
  148. void page_remove_rmap(struct page *);
  149. void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
  150. unsigned long);
  151. void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
  152. unsigned long);
  153. static inline void page_dup_rmap(struct page *page)
  154. {
  155. atomic_inc(&page->_mapcount);
  156. }
  157. /*
  158. * Called from mm/vmscan.c to handle paging out
  159. */
  160. int page_referenced(struct page *, int is_locked,
  161. struct mem_cgroup *memcg, unsigned long *vm_flags);
  162. int page_referenced_one(struct page *, struct vm_area_struct *,
  163. unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
  164. #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
  165. int try_to_unmap(struct page *, enum ttu_flags flags);
  166. int try_to_unmap_one(struct page *, struct vm_area_struct *,
  167. unsigned long address, enum ttu_flags flags);
  168. /*
  169. * Called from mm/filemap_xip.c to unmap empty zero page
  170. */
  171. pte_t *__page_check_address(struct page *, struct mm_struct *,
  172. unsigned long, spinlock_t **, int);
  173. static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
  174. unsigned long address,
  175. spinlock_t **ptlp, int sync)
  176. {
  177. pte_t *ptep;
  178. __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
  179. ptlp, sync));
  180. return ptep;
  181. }
  182. /*
  183. * Used by swapoff to help locate where page is expected in vma.
  184. */
  185. unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
  186. /*
  187. * Cleans the PTEs of shared mappings.
  188. * (and since clean PTEs should also be readonly, write protects them too)
  189. *
  190. * returns the number of cleaned PTEs.
  191. */
  192. int page_mkclean(struct page *);
  193. /*
  194. * called in munlock()/munmap() path to check for other vmas holding
  195. * the page mlocked.
  196. */
  197. int try_to_munlock(struct page *);
  198. /*
  199. * Called by memory-failure.c to kill processes.
  200. */
  201. struct anon_vma *page_lock_anon_vma_read(struct page *page);
  202. void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
  203. int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
  204. /*
  205. * Called by migrate.c to remove migration ptes, but might be used more later.
  206. */
  207. int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
  208. struct vm_area_struct *, unsigned long, void *), void *arg);
  209. #else /* !CONFIG_MMU */
  210. #define anon_vma_init() do {} while (0)
  211. #define anon_vma_prepare(vma) (0)
  212. #define anon_vma_link(vma) do {} while (0)
  213. static inline int page_referenced(struct page *page, int is_locked,
  214. struct mem_cgroup *memcg,
  215. unsigned long *vm_flags)
  216. {
  217. *vm_flags = 0;
  218. return 0;
  219. }
  220. #define try_to_unmap(page, refs) SWAP_FAIL
  221. static inline int page_mkclean(struct page *page)
  222. {
  223. return 0;
  224. }
  225. #endif /* CONFIG_MMU */
  226. /*
  227. * Return values of try_to_unmap
  228. */
  229. #define SWAP_SUCCESS 0
  230. #define SWAP_AGAIN 1
  231. #define SWAP_FAIL 2
  232. #define SWAP_MLOCK 3
  233. #endif /* _LINUX_RMAP_H */