huge_mm.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. #ifndef _LINUX_HUGE_MM_H
  2. #define _LINUX_HUGE_MM_H
  3. extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
  4. struct vm_area_struct *vma,
  5. unsigned long address, pmd_t *pmd,
  6. unsigned int flags);
  7. extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  8. pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  9. struct vm_area_struct *vma);
  10. extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  11. unsigned long address, pmd_t *pmd,
  12. pmd_t orig_pmd);
  13. extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
  14. unsigned long addr,
  15. pmd_t *pmd,
  16. unsigned int flags);
  17. extern int zap_huge_pmd(struct mmu_gather *tlb,
  18. struct vm_area_struct *vma,
  19. pmd_t *pmd, unsigned long addr);
  20. extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  21. unsigned long addr, unsigned long end,
  22. unsigned char *vec);
  23. extern int move_huge_pmd(struct vm_area_struct *vma,
  24. struct vm_area_struct *new_vma,
  25. unsigned long old_addr,
  26. unsigned long new_addr, unsigned long old_end,
  27. pmd_t *old_pmd, pmd_t *new_pmd);
  28. extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  29. unsigned long addr, pgprot_t newprot,
  30. int prot_numa);
  31. enum transparent_hugepage_flag {
  32. TRANSPARENT_HUGEPAGE_FLAG,
  33. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  34. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  35. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
  36. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
  37. #ifdef CONFIG_DEBUG_VM
  38. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
  39. #endif
  40. };
  41. enum page_check_address_pmd_flag {
  42. PAGE_CHECK_ADDRESS_PMD_FLAG,
  43. PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
  44. PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
  45. };
  46. extern pmd_t *page_check_address_pmd(struct page *page,
  47. struct mm_struct *mm,
  48. unsigned long address,
  49. enum page_check_address_pmd_flag flag);
  50. #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
  51. #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
  52. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  53. #define HPAGE_PMD_SHIFT HPAGE_SHIFT
  54. #define HPAGE_PMD_MASK HPAGE_MASK
  55. #define HPAGE_PMD_SIZE HPAGE_SIZE
  56. extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
  57. #define transparent_hugepage_enabled(__vma) \
  58. ((transparent_hugepage_flags & \
  59. (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
  60. (transparent_hugepage_flags & \
  61. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
  62. ((__vma)->vm_flags & VM_HUGEPAGE))) && \
  63. !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
  64. !is_vma_temporary_stack(__vma))
  65. #define transparent_hugepage_defrag(__vma) \
  66. ((transparent_hugepage_flags & \
  67. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
  68. (transparent_hugepage_flags & \
  69. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
  70. (__vma)->vm_flags & VM_HUGEPAGE))
  71. #ifdef CONFIG_DEBUG_VM
  72. #define transparent_hugepage_debug_cow() \
  73. (transparent_hugepage_flags & \
  74. (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
  75. #else /* CONFIG_DEBUG_VM */
  76. #define transparent_hugepage_debug_cow() 0
  77. #endif /* CONFIG_DEBUG_VM */
  78. extern unsigned long transparent_hugepage_flags;
  79. extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  80. pmd_t *dst_pmd, pmd_t *src_pmd,
  81. struct vm_area_struct *vma,
  82. unsigned long addr, unsigned long end);
  83. extern int handle_pte_fault(struct mm_struct *mm,
  84. struct vm_area_struct *vma, unsigned long address,
  85. pte_t *pte, pmd_t *pmd, unsigned int flags);
  86. extern int split_huge_page(struct page *page);
  87. extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
  88. #define split_huge_page_pmd(__mm, __pmd) \
  89. do { \
  90. pmd_t *____pmd = (__pmd); \
  91. if (unlikely(pmd_trans_huge(*____pmd))) \
  92. __split_huge_page_pmd(__mm, ____pmd); \
  93. } while (0)
  94. #define wait_split_huge_page(__anon_vma, __pmd) \
  95. do { \
  96. pmd_t *____pmd = (__pmd); \
  97. anon_vma_lock_write(__anon_vma); \
  98. anon_vma_unlock(__anon_vma); \
  99. BUG_ON(pmd_trans_splitting(*____pmd) || \
  100. pmd_trans_huge(*____pmd)); \
  101. } while (0)
  102. #if HPAGE_PMD_ORDER > MAX_ORDER
  103. #error "hugepages can't be allocated by the buddy allocator"
  104. #endif
  105. extern int hugepage_madvise(struct vm_area_struct *vma,
  106. unsigned long *vm_flags, int advice);
  107. extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
  108. unsigned long start,
  109. unsigned long end,
  110. long adjust_next);
  111. extern int __pmd_trans_huge_lock(pmd_t *pmd,
  112. struct vm_area_struct *vma);
  113. /* mmap_sem must be held on entry */
  114. static inline int pmd_trans_huge_lock(pmd_t *pmd,
  115. struct vm_area_struct *vma)
  116. {
  117. VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
  118. if (pmd_trans_huge(*pmd))
  119. return __pmd_trans_huge_lock(pmd, vma);
  120. else
  121. return 0;
  122. }
  123. static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
  124. unsigned long start,
  125. unsigned long end,
  126. long adjust_next)
  127. {
  128. if (!vma->anon_vma || vma->vm_ops)
  129. return;
  130. __vma_adjust_trans_huge(vma, start, end, adjust_next);
  131. }
  132. static inline int hpage_nr_pages(struct page *page)
  133. {
  134. if (unlikely(PageTransHuge(page)))
  135. return HPAGE_PMD_NR;
  136. return 1;
  137. }
  138. static inline struct page *compound_trans_head(struct page *page)
  139. {
  140. if (PageTail(page)) {
  141. struct page *head;
  142. head = page->first_page;
  143. smp_rmb();
  144. /*
  145. * head may be a dangling pointer.
  146. * __split_huge_page_refcount clears PageTail before
  147. * overwriting first_page, so if PageTail is still
  148. * there it means the head pointer isn't dangling.
  149. */
  150. if (PageTail(page))
  151. return head;
  152. }
  153. return page;
  154. }
  155. extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
  156. unsigned long addr, pmd_t pmd, pmd_t *pmdp);
  157. #else /* CONFIG_TRANSPARENT_HUGEPAGE */
  158. #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
  159. #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
  160. #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
  161. #define hpage_nr_pages(x) 1
  162. #define transparent_hugepage_enabled(__vma) 0
  163. #define transparent_hugepage_flags 0UL
  164. static inline int split_huge_page(struct page *page)
  165. {
  166. return 0;
  167. }
  168. #define split_huge_page_pmd(__mm, __pmd) \
  169. do { } while (0)
  170. #define wait_split_huge_page(__anon_vma, __pmd) \
  171. do { } while (0)
  172. #define compound_trans_head(page) compound_head(page)
  173. static inline int hugepage_madvise(struct vm_area_struct *vma,
  174. unsigned long *vm_flags, int advice)
  175. {
  176. BUG();
  177. return 0;
  178. }
  179. static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
  180. unsigned long start,
  181. unsigned long end,
  182. long adjust_next)
  183. {
  184. }
  185. static inline int pmd_trans_huge_lock(pmd_t *pmd,
  186. struct vm_area_struct *vma)
  187. {
  188. return 0;
  189. }
  190. static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
  191. unsigned long addr, pmd_t pmd, pmd_t *pmdp)
  192. {
  193. return 0;
  194. }
  195. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  196. #endif /* _LINUX_HUGE_MM_H */