rmap.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. #ifndef _LINUX_RMAP_H
  2. #define _LINUX_RMAP_H
  3. /*
  4. * Declarations for Reverse Mapping functions in mm/rmap.c
  5. */
  6. #include <linux/list.h>
  7. #include <linux/slab.h>
  8. #include <linux/mm.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/memcontrol.h>
  11. /*
  12. * The anon_vma heads a list of private "related" vmas, to scan if
  13. * an anonymous page pointing to this anon_vma needs to be unmapped:
  14. * the vmas on the list will be related by forking, or by splitting.
  15. *
  16. * Since vmas come and go as they are split and merged (particularly
  17. * in mprotect), the mapping field of an anonymous page cannot point
  18. * directly to a vma: instead it points to an anon_vma, on whose list
  19. * the related vmas can be easily linked or unlinked.
  20. *
  21. * After unlinking the last vma on the list, we must garbage collect
  22. * the anon_vma object itself: we're guaranteed no page can be
  23. * pointing to this anon_vma once its vma list is empty.
  24. */
  25. struct anon_vma {
  26. spinlock_t lock; /* Serialize access to vma list */
  27. #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
  28. /*
  29. * The external_refcount is taken by either KSM or page migration
  30. * to take a reference to an anon_vma when there is no
  31. * guarantee that the vma of page tables will exist for
  32. * the duration of the operation. A caller that takes
  33. * the reference is responsible for clearing up the
  34. * anon_vma if they are the last user on release
  35. */
  36. atomic_t external_refcount;
  37. #endif
  38. /*
  39. * NOTE: the LSB of the head.next is set by
  40. * mm_take_all_locks() _after_ taking the above lock. So the
  41. * head must only be read/written after taking the above lock
  42. * to be sure to see a valid next pointer. The LSB bit itself
  43. * is serialized by a system wide lock only visible to
  44. * mm_take_all_locks() (mm_all_locks_mutex).
  45. */
  46. struct list_head head; /* Chain of private "related" vmas */
  47. };
  48. /*
  49. * The copy-on-write semantics of fork mean that an anon_vma
  50. * can become associated with multiple processes. Furthermore,
  51. * each child process will have its own anon_vma, where new
  52. * pages for that process are instantiated.
  53. *
  54. * This structure allows us to find the anon_vmas associated
  55. * with a VMA, or the VMAs associated with an anon_vma.
  56. * The "same_vma" list contains the anon_vma_chains linking
  57. * all the anon_vmas associated with this VMA.
  58. * The "same_anon_vma" list contains the anon_vma_chains
  59. * which link all the VMAs associated with this anon_vma.
  60. */
  61. struct anon_vma_chain {
  62. struct vm_area_struct *vma;
  63. struct anon_vma *anon_vma;
  64. struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
  65. struct list_head same_anon_vma; /* locked by anon_vma->lock */
  66. };
  67. #ifdef CONFIG_MMU
  68. #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
  69. static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
  70. {
  71. atomic_set(&anon_vma->external_refcount, 0);
  72. }
  73. static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
  74. {
  75. return atomic_read(&anon_vma->external_refcount);
  76. }
  77. #else
  78. static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
  79. {
  80. }
  81. static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
  82. {
  83. return 0;
  84. }
  85. #endif /* CONFIG_KSM */
  86. static inline struct anon_vma *page_anon_vma(struct page *page)
  87. {
  88. if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
  89. PAGE_MAPPING_ANON)
  90. return NULL;
  91. return page_rmapping(page);
  92. }
  93. static inline void anon_vma_lock(struct vm_area_struct *vma)
  94. {
  95. struct anon_vma *anon_vma = vma->anon_vma;
  96. if (anon_vma)
  97. spin_lock(&anon_vma->lock);
  98. }
  99. static inline void anon_vma_unlock(struct vm_area_struct *vma)
  100. {
  101. struct anon_vma *anon_vma = vma->anon_vma;
  102. if (anon_vma)
  103. spin_unlock(&anon_vma->lock);
  104. }
  105. /*
  106. * anon_vma helper functions.
  107. */
  108. void anon_vma_init(void); /* create anon_vma_cachep */
  109. int anon_vma_prepare(struct vm_area_struct *);
  110. void unlink_anon_vmas(struct vm_area_struct *);
  111. int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
  112. int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
  113. void __anon_vma_link(struct vm_area_struct *);
  114. void anon_vma_free(struct anon_vma *);
  115. static inline void anon_vma_merge(struct vm_area_struct *vma,
  116. struct vm_area_struct *next)
  117. {
  118. VM_BUG_ON(vma->anon_vma != next->anon_vma);
  119. unlink_anon_vmas(next);
  120. }
  121. /*
  122. * rmap interfaces called when adding or removing pte of page
  123. */
  124. void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  125. void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  126. void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
  127. void page_add_file_rmap(struct page *);
  128. void page_remove_rmap(struct page *);
  129. void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
  130. unsigned long);
  131. void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
  132. unsigned long);
  133. static inline void page_dup_rmap(struct page *page)
  134. {
  135. atomic_inc(&page->_mapcount);
  136. }
  137. /*
  138. * Called from mm/vmscan.c to handle paging out
  139. */
  140. int page_referenced(struct page *, int is_locked,
  141. struct mem_cgroup *cnt, unsigned long *vm_flags);
  142. int page_referenced_one(struct page *, struct vm_area_struct *,
  143. unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
  144. enum ttu_flags {
  145. TTU_UNMAP = 0, /* unmap mode */
  146. TTU_MIGRATION = 1, /* migration mode */
  147. TTU_MUNLOCK = 2, /* munlock mode */
  148. TTU_ACTION_MASK = 0xff,
  149. TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
  150. TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
  151. TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
  152. };
  153. #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
  154. int try_to_unmap(struct page *, enum ttu_flags flags);
  155. int try_to_unmap_one(struct page *, struct vm_area_struct *,
  156. unsigned long address, enum ttu_flags flags);
  157. /*
  158. * Called from mm/filemap_xip.c to unmap empty zero page
  159. */
  160. pte_t *page_check_address(struct page *, struct mm_struct *,
  161. unsigned long, spinlock_t **, int);
  162. /*
  163. * Used by swapoff to help locate where page is expected in vma.
  164. */
  165. unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
  166. /*
  167. * Cleans the PTEs of shared mappings.
  168. * (and since clean PTEs should also be readonly, write protects them too)
  169. *
  170. * returns the number of cleaned PTEs.
  171. */
  172. int page_mkclean(struct page *);
  173. /*
  174. * called in munlock()/munmap() path to check for other vmas holding
  175. * the page mlocked.
  176. */
  177. int try_to_munlock(struct page *);
  178. /*
  179. * Called by memory-failure.c to kill processes.
  180. */
  181. struct anon_vma *page_lock_anon_vma(struct page *page);
  182. void page_unlock_anon_vma(struct anon_vma *anon_vma);
  183. int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
  184. /*
  185. * Called by migrate.c to remove migration ptes, but might be used more later.
  186. */
  187. int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
  188. struct vm_area_struct *, unsigned long, void *), void *arg);
  189. #else /* !CONFIG_MMU */
  190. #define anon_vma_init() do {} while (0)
  191. #define anon_vma_prepare(vma) (0)
  192. #define anon_vma_link(vma) do {} while (0)
  193. static inline int page_referenced(struct page *page, int is_locked,
  194. struct mem_cgroup *cnt,
  195. unsigned long *vm_flags)
  196. {
  197. *vm_flags = 0;
  198. return 0;
  199. }
  200. #define try_to_unmap(page, refs) SWAP_FAIL
  201. static inline int page_mkclean(struct page *page)
  202. {
  203. return 0;
  204. }
  205. #endif /* CONFIG_MMU */
  206. /*
  207. * Return values of try_to_unmap
  208. */
  209. #define SWAP_SUCCESS 0
  210. #define SWAP_AGAIN 1
  211. #define SWAP_FAIL 2
  212. #define SWAP_MLOCK 3
  213. #endif /* _LINUX_RMAP_H */