internal.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /* internal.h: mm/ internal definitions
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #ifndef __MM_INTERNAL_H
  12. #define __MM_INTERNAL_H
  13. #include <linux/mm.h>
  14. void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  15. unsigned long floor, unsigned long ceiling);
  16. extern void prep_compound_page(struct page *page, unsigned long order);
  17. extern void prep_compound_gigantic_page(struct page *page, unsigned long order);
  18. static inline void set_page_count(struct page *page, int v)
  19. {
  20. atomic_set(&page->_count, v);
  21. }
  22. /*
  23. * Turn a non-refcounted page (->_count == 0) into refcounted with
  24. * a count of one.
  25. */
  26. static inline void set_page_refcounted(struct page *page)
  27. {
  28. VM_BUG_ON(PageTail(page));
  29. VM_BUG_ON(atomic_read(&page->_count));
  30. set_page_count(page, 1);
  31. }
  32. static inline void __put_page(struct page *page)
  33. {
  34. atomic_dec(&page->_count);
  35. }
  36. /*
  37. * in mm/vmscan.c:
  38. */
  39. extern int isolate_lru_page(struct page *page);
  40. extern void putback_lru_page(struct page *page);
  41. /*
  42. * in mm/page_alloc.c
  43. */
  44. extern unsigned long highest_memmap_pfn;
  45. extern void __free_pages_bootmem(struct page *page, unsigned int order);
  46. /*
  47. * function for dealing with page's order in buddy system.
  48. * zone->lock is already acquired when we use these.
  49. * So, we don't need atomic page->flags operations here.
  50. */
  51. static inline unsigned long page_order(struct page *page)
  52. {
  53. VM_BUG_ON(!PageBuddy(page));
  54. return page_private(page);
  55. }
  56. #ifdef CONFIG_HAVE_MLOCK
  57. extern long mlock_vma_pages_range(struct vm_area_struct *vma,
  58. unsigned long start, unsigned long end);
  59. extern void munlock_vma_pages_range(struct vm_area_struct *vma,
  60. unsigned long start, unsigned long end);
  61. static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
  62. {
  63. munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
  64. }
  65. #endif
  66. #ifdef CONFIG_UNEVICTABLE_LRU
  67. /*
  68. * unevictable_migrate_page() called only from migrate_page_copy() to
  69. * migrate unevictable flag to new page.
  70. * Note that the old page has been isolated from the LRU lists at this
  71. * point so we don't need to worry about LRU statistics.
  72. */
  73. static inline void unevictable_migrate_page(struct page *new, struct page *old)
  74. {
  75. if (TestClearPageUnevictable(old))
  76. SetPageUnevictable(new);
  77. }
  78. #else
  79. static inline void unevictable_migrate_page(struct page *new, struct page *old)
  80. {
  81. }
  82. #endif
  83. #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
  84. /*
  85. * Called only in fault path via page_evictable() for a new page
  86. * to determine if it's being mapped into a LOCKED vma.
  87. * If so, mark page as mlocked.
  88. */
  89. static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
  90. {
  91. VM_BUG_ON(PageLRU(page));
  92. if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
  93. return 0;
  94. if (!TestSetPageMlocked(page)) {
  95. inc_zone_page_state(page, NR_MLOCK);
  96. count_vm_event(UNEVICTABLE_PGMLOCKED);
  97. }
  98. return 1;
  99. }
  100. /*
  101. * must be called with vma's mmap_sem held for read, and page locked.
  102. */
  103. extern void mlock_vma_page(struct page *page);
  104. /*
  105. * Clear the page's PageMlocked(). This can be useful in a situation where
  106. * we want to unconditionally remove a page from the pagecache -- e.g.,
  107. * on truncation or freeing.
  108. *
  109. * It is legal to call this function for any page, mlocked or not.
  110. * If called for a page that is still mapped by mlocked vmas, all we do
  111. * is revert to lazy LRU behaviour -- semantics are not broken.
  112. */
  113. extern void __clear_page_mlock(struct page *page);
  114. static inline void clear_page_mlock(struct page *page)
  115. {
  116. if (unlikely(TestClearPageMlocked(page)))
  117. __clear_page_mlock(page);
  118. }
  119. /*
  120. * mlock_migrate_page - called only from migrate_page_copy() to
  121. * migrate the Mlocked page flag; update statistics.
  122. */
  123. static inline void mlock_migrate_page(struct page *newpage, struct page *page)
  124. {
  125. if (TestClearPageMlocked(page)) {
  126. unsigned long flags;
  127. local_irq_save(flags);
  128. __dec_zone_page_state(page, NR_MLOCK);
  129. SetPageMlocked(newpage);
  130. __inc_zone_page_state(newpage, NR_MLOCK);
  131. local_irq_restore(flags);
  132. }
  133. }
  134. /*
  135. * free_page_mlock() -- clean up attempts to free and mlocked() page.
  136. * Page should not be on lru, so no need to fix that up.
  137. * free_pages_check() will verify...
  138. */
  139. static inline void free_page_mlock(struct page *page)
  140. {
  141. if (unlikely(TestClearPageMlocked(page))) {
  142. unsigned long flags;
  143. local_irq_save(flags);
  144. __dec_zone_page_state(page, NR_MLOCK);
  145. __count_vm_event(UNEVICTABLE_MLOCKFREED);
  146. local_irq_restore(flags);
  147. }
  148. }
  149. #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
  150. static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
  151. {
  152. return 0;
  153. }
  154. static inline void clear_page_mlock(struct page *page) { }
  155. static inline void mlock_vma_page(struct page *page) { }
  156. static inline void mlock_migrate_page(struct page *new, struct page *old) { }
  157. static inline void free_page_mlock(struct page *page) { }
  158. #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
  159. /*
  160. * Return the mem_map entry representing the 'offset' subpage within
  161. * the maximally aligned gigantic page 'base'. Handle any discontiguity
  162. * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
  163. */
  164. static inline struct page *mem_map_offset(struct page *base, int offset)
  165. {
  166. if (unlikely(offset >= MAX_ORDER_NR_PAGES))
  167. return pfn_to_page(page_to_pfn(base) + offset);
  168. return base + offset;
  169. }
  170. /*
  171. * Iterator over all subpages withing the maximally aligned gigantic
  172. * page 'base'. Handle any discontiguity in the mem_map.
  173. */
  174. static inline struct page *mem_map_next(struct page *iter,
  175. struct page *base, int offset)
  176. {
  177. if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
  178. unsigned long pfn = page_to_pfn(base) + offset;
  179. if (!pfn_valid(pfn))
  180. return NULL;
  181. return pfn_to_page(pfn);
  182. }
  183. return iter + 1;
  184. }
  185. /*
  186. * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
  187. * so all functions starting at paging_init should be marked __init
  188. * in those cases. SPARSEMEM, however, allows for memory hotplug,
  189. * and alloc_bootmem_node is not used.
  190. */
  191. #ifdef CONFIG_SPARSEMEM
  192. #define __paginginit __meminit
  193. #else
  194. #define __paginginit __init
  195. #endif
  196. /* Memory initialisation debug and verification */
  197. enum mminit_level {
  198. MMINIT_WARNING,
  199. MMINIT_VERIFY,
  200. MMINIT_TRACE
  201. };
  202. #ifdef CONFIG_DEBUG_MEMORY_INIT
  203. extern int mminit_loglevel;
  204. #define mminit_dprintk(level, prefix, fmt, arg...) \
  205. do { \
  206. if (level < mminit_loglevel) { \
  207. printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
  208. printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
  209. } \
  210. } while (0)
  211. extern void mminit_verify_pageflags_layout(void);
  212. extern void mminit_verify_page_links(struct page *page,
  213. enum zone_type zone, unsigned long nid, unsigned long pfn);
  214. extern void mminit_verify_zonelist(void);
  215. #else
  216. static inline void mminit_dprintk(enum mminit_level level,
  217. const char *prefix, const char *fmt, ...)
  218. {
  219. }
  220. static inline void mminit_verify_pageflags_layout(void)
  221. {
  222. }
  223. static inline void mminit_verify_page_links(struct page *page,
  224. enum zone_type zone, unsigned long nid, unsigned long pfn)
  225. {
  226. }
  227. static inline void mminit_verify_zonelist(void)
  228. {
  229. }
  230. #endif /* CONFIG_DEBUG_MEMORY_INIT */
  231. /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
  232. #if defined(CONFIG_SPARSEMEM)
  233. extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  234. unsigned long *end_pfn);
  235. #else
  236. static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  237. unsigned long *end_pfn)
  238. {
  239. }
  240. #endif /* CONFIG_SPARSEMEM */
  241. #define GUP_FLAGS_WRITE 0x1
  242. #define GUP_FLAGS_FORCE 0x2
  243. #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
  244. #define GUP_FLAGS_IGNORE_SIGKILL 0x8
  245. int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  246. unsigned long start, int len, int flags,
  247. struct page **pages, struct vm_area_struct **vmas);
  248. #endif