internal.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /* internal.h: mm/ internal definitions
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #ifndef __MM_INTERNAL_H
  12. #define __MM_INTERNAL_H
  13. #include <linux/mm.h>
  14. void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  15. unsigned long floor, unsigned long ceiling);
  16. extern void prep_compound_page(struct page *page, unsigned long order);
  17. static inline void set_page_count(struct page *page, int v)
  18. {
  19. atomic_set(&page->_count, v);
  20. }
  21. /*
  22. * Turn a non-refcounted page (->_count == 0) into refcounted with
  23. * a count of one.
  24. */
  25. static inline void set_page_refcounted(struct page *page)
  26. {
  27. VM_BUG_ON(PageTail(page));
  28. VM_BUG_ON(atomic_read(&page->_count));
  29. set_page_count(page, 1);
  30. }
  31. static inline void __put_page(struct page *page)
  32. {
  33. atomic_dec(&page->_count);
  34. }
  35. /*
  36. * in mm/vmscan.c:
  37. */
  38. extern int isolate_lru_page(struct page *page);
  39. extern void putback_lru_page(struct page *page);
  40. /*
  41. * in mm/page_alloc.c
  42. */
  43. extern void __free_pages_bootmem(struct page *page, unsigned int order);
  44. /*
  45. * function for dealing with page's order in buddy system.
  46. * zone->lock is already acquired when we use these.
  47. * So, we don't need atomic page->flags operations here.
  48. */
  49. static inline unsigned long page_order(struct page *page)
  50. {
  51. VM_BUG_ON(!PageBuddy(page));
  52. return page_private(page);
  53. }
  54. extern long mlock_vma_pages_range(struct vm_area_struct *vma,
  55. unsigned long start, unsigned long end);
  56. extern void munlock_vma_pages_range(struct vm_area_struct *vma,
  57. unsigned long start, unsigned long end);
  58. static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
  59. {
  60. munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
  61. }
  62. #ifdef CONFIG_UNEVICTABLE_LRU
  63. /*
  64. * unevictable_migrate_page() called only from migrate_page_copy() to
  65. * migrate unevictable flag to new page.
  66. * Note that the old page has been isolated from the LRU lists at this
  67. * point so we don't need to worry about LRU statistics.
  68. */
  69. static inline void unevictable_migrate_page(struct page *new, struct page *old)
  70. {
  71. if (TestClearPageUnevictable(old))
  72. SetPageUnevictable(new);
  73. }
  74. #else
  75. static inline void unevictable_migrate_page(struct page *new, struct page *old)
  76. {
  77. }
  78. #endif
  79. #ifdef CONFIG_UNEVICTABLE_LRU
  80. /*
  81. * Called only in fault path via page_evictable() for a new page
  82. * to determine if it's being mapped into a LOCKED vma.
  83. * If so, mark page as mlocked.
  84. */
  85. static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
  86. {
  87. VM_BUG_ON(PageLRU(page));
  88. if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
  89. return 0;
  90. SetPageMlocked(page);
  91. return 1;
  92. }
  93. /*
  94. * must be called with vma's mmap_sem held for read, and page locked.
  95. */
  96. extern void mlock_vma_page(struct page *page);
  97. /*
  98. * Clear the page's PageMlocked(). This can be useful in a situation where
  99. * we want to unconditionally remove a page from the pagecache -- e.g.,
  100. * on truncation or freeing.
  101. *
  102. * It is legal to call this function for any page, mlocked or not.
  103. * If called for a page that is still mapped by mlocked vmas, all we do
  104. * is revert to lazy LRU behaviour -- semantics are not broken.
  105. */
  106. extern void __clear_page_mlock(struct page *page);
  107. static inline void clear_page_mlock(struct page *page)
  108. {
  109. if (unlikely(TestClearPageMlocked(page)))
  110. __clear_page_mlock(page);
  111. }
  112. /*
  113. * mlock_migrate_page - called only from migrate_page_copy() to
  114. * migrate the Mlocked page flag
  115. */
  116. static inline void mlock_migrate_page(struct page *newpage, struct page *page)
  117. {
  118. if (TestClearPageMlocked(page))
  119. SetPageMlocked(newpage);
  120. }
  121. #else /* CONFIG_UNEVICTABLE_LRU */
  122. static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
  123. {
  124. return 0;
  125. }
  126. static inline void clear_page_mlock(struct page *page) { }
  127. static inline void mlock_vma_page(struct page *page) { }
  128. static inline void mlock_migrate_page(struct page *new, struct page *old) { }
  129. #endif /* CONFIG_UNEVICTABLE_LRU */
  130. /*
  131. * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
  132. * so all functions starting at paging_init should be marked __init
  133. * in those cases. SPARSEMEM, however, allows for memory hotplug,
  134. * and alloc_bootmem_node is not used.
  135. */
  136. #ifdef CONFIG_SPARSEMEM
  137. #define __paginginit __meminit
  138. #else
  139. #define __paginginit __init
  140. #endif
  141. /* Memory initialisation debug and verification */
  142. enum mminit_level {
  143. MMINIT_WARNING,
  144. MMINIT_VERIFY,
  145. MMINIT_TRACE
  146. };
  147. #ifdef CONFIG_DEBUG_MEMORY_INIT
  148. extern int mminit_loglevel;
  149. #define mminit_dprintk(level, prefix, fmt, arg...) \
  150. do { \
  151. if (level < mminit_loglevel) { \
  152. printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
  153. printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
  154. } \
  155. } while (0)
  156. extern void mminit_verify_pageflags_layout(void);
  157. extern void mminit_verify_page_links(struct page *page,
  158. enum zone_type zone, unsigned long nid, unsigned long pfn);
  159. extern void mminit_verify_zonelist(void);
  160. #else
  161. static inline void mminit_dprintk(enum mminit_level level,
  162. const char *prefix, const char *fmt, ...)
  163. {
  164. }
  165. static inline void mminit_verify_pageflags_layout(void)
  166. {
  167. }
  168. static inline void mminit_verify_page_links(struct page *page,
  169. enum zone_type zone, unsigned long nid, unsigned long pfn)
  170. {
  171. }
  172. static inline void mminit_verify_zonelist(void)
  173. {
  174. }
  175. #endif /* CONFIG_DEBUG_MEMORY_INIT */
  176. /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
  177. #if defined(CONFIG_SPARSEMEM)
  178. extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  179. unsigned long *end_pfn);
  180. #else
  181. static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  182. unsigned long *end_pfn)
  183. {
  184. }
  185. #endif /* CONFIG_SPARSEMEM */
  186. #define GUP_FLAGS_WRITE 0x1
  187. #define GUP_FLAGS_FORCE 0x2
  188. #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
  189. int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  190. unsigned long start, int len, int flags,
  191. struct page **pages, struct vm_area_struct **vmas);
  192. #endif