hugetlbpage.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. /*
  2. * SPARC64 Huge TLB page support.
  3. *
  4. * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  5. */
  6. #include <linux/init.h>
  7. #include <linux/fs.h>
  8. #include <linux/mm.h>
  9. #include <linux/hugetlb.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/sysctl.h>
  12. #include <asm/mman.h>
  13. #include <asm/pgalloc.h>
  14. #include <asm/tlb.h>
  15. #include <asm/tlbflush.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/mmu_context.h>
  18. /* Slightly simplified from the non-hugepage variant because by
  19. * definition we don't have to worry about any page coloring stuff
  20. */
  21. static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
  22. unsigned long addr,
  23. unsigned long len,
  24. unsigned long pgoff,
  25. unsigned long flags)
  26. {
  27. unsigned long task_size = TASK_SIZE;
  28. struct vm_unmapped_area_info info;
  29. if (test_thread_flag(TIF_32BIT))
  30. task_size = STACK_TOP32;
  31. info.flags = 0;
  32. info.length = len;
  33. info.low_limit = TASK_UNMAPPED_BASE;
  34. info.high_limit = min(task_size, VA_EXCLUDE_START);
  35. info.align_mask = PAGE_MASK & ~HPAGE_MASK;
  36. info.align_offset = 0;
  37. addr = vm_unmapped_area(&info);
  38. if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
  39. VM_BUG_ON(addr != -ENOMEM);
  40. info.low_limit = VA_EXCLUDE_END;
  41. info.high_limit = task_size;
  42. addr = vm_unmapped_area(&info);
  43. }
  44. return addr;
  45. }
  46. static unsigned long
  47. hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  48. const unsigned long len,
  49. const unsigned long pgoff,
  50. const unsigned long flags)
  51. {
  52. struct mm_struct *mm = current->mm;
  53. unsigned long addr = addr0;
  54. struct vm_unmapped_area_info info;
  55. /* This should only ever run for 32-bit processes. */
  56. BUG_ON(!test_thread_flag(TIF_32BIT));
  57. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  58. info.length = len;
  59. info.low_limit = PAGE_SIZE;
  60. info.high_limit = mm->mmap_base;
  61. info.align_mask = PAGE_MASK & ~HPAGE_MASK;
  62. info.align_offset = 0;
  63. addr = vm_unmapped_area(&info);
  64. /*
  65. * A failed mmap() very likely causes application failure,
  66. * so fall back to the bottom-up function here. This scenario
  67. * can happen with large stack limits and large mmap()
  68. * allocations.
  69. */
  70. if (addr & ~PAGE_MASK) {
  71. VM_BUG_ON(addr != -ENOMEM);
  72. info.flags = 0;
  73. info.low_limit = TASK_UNMAPPED_BASE;
  74. info.high_limit = STACK_TOP32;
  75. addr = vm_unmapped_area(&info);
  76. }
  77. return addr;
  78. }
  79. unsigned long
  80. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  81. unsigned long len, unsigned long pgoff, unsigned long flags)
  82. {
  83. struct mm_struct *mm = current->mm;
  84. struct vm_area_struct *vma;
  85. unsigned long task_size = TASK_SIZE;
  86. if (test_thread_flag(TIF_32BIT))
  87. task_size = STACK_TOP32;
  88. if (len & ~HPAGE_MASK)
  89. return -EINVAL;
  90. if (len > task_size)
  91. return -ENOMEM;
  92. if (flags & MAP_FIXED) {
  93. if (prepare_hugepage_range(file, addr, len))
  94. return -EINVAL;
  95. return addr;
  96. }
  97. if (addr) {
  98. addr = ALIGN(addr, HPAGE_SIZE);
  99. vma = find_vma(mm, addr);
  100. if (task_size - len >= addr &&
  101. (!vma || addr + len <= vma->vm_start))
  102. return addr;
  103. }
  104. if (mm->get_unmapped_area == arch_get_unmapped_area)
  105. return hugetlb_get_unmapped_area_bottomup(file, addr, len,
  106. pgoff, flags);
  107. else
  108. return hugetlb_get_unmapped_area_topdown(file, addr, len,
  109. pgoff, flags);
  110. }
  111. pte_t *huge_pte_alloc(struct mm_struct *mm,
  112. unsigned long addr, unsigned long sz)
  113. {
  114. pgd_t *pgd;
  115. pud_t *pud;
  116. pmd_t *pmd;
  117. pte_t *pte = NULL;
  118. /* We must align the address, because our caller will run
  119. * set_huge_pte_at() on whatever we return, which writes out
  120. * all of the sub-ptes for the hugepage range. So we have
  121. * to give it the first such sub-pte.
  122. */
  123. addr &= HPAGE_MASK;
  124. pgd = pgd_offset(mm, addr);
  125. pud = pud_alloc(mm, pgd, addr);
  126. if (pud) {
  127. pmd = pmd_alloc(mm, pud, addr);
  128. if (pmd)
  129. pte = pte_alloc_map(mm, NULL, pmd, addr);
  130. }
  131. return pte;
  132. }
  133. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  134. {
  135. pgd_t *pgd;
  136. pud_t *pud;
  137. pmd_t *pmd;
  138. pte_t *pte = NULL;
  139. addr &= HPAGE_MASK;
  140. pgd = pgd_offset(mm, addr);
  141. if (!pgd_none(*pgd)) {
  142. pud = pud_offset(pgd, addr);
  143. if (!pud_none(*pud)) {
  144. pmd = pmd_offset(pud, addr);
  145. if (!pmd_none(*pmd))
  146. pte = pte_offset_map(pmd, addr);
  147. }
  148. }
  149. return pte;
  150. }
  151. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  152. {
  153. return 0;
  154. }
  155. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  156. pte_t *ptep, pte_t entry)
  157. {
  158. int i;
  159. if (!pte_present(*ptep) && pte_present(entry))
  160. mm->context.huge_pte_count++;
  161. addr &= HPAGE_MASK;
  162. for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
  163. set_pte_at(mm, addr, ptep, entry);
  164. ptep++;
  165. addr += PAGE_SIZE;
  166. pte_val(entry) += PAGE_SIZE;
  167. }
  168. }
  169. pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  170. pte_t *ptep)
  171. {
  172. pte_t entry;
  173. int i;
  174. entry = *ptep;
  175. if (pte_present(entry))
  176. mm->context.huge_pte_count--;
  177. addr &= HPAGE_MASK;
  178. for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
  179. pte_clear(mm, addr, ptep);
  180. addr += PAGE_SIZE;
  181. ptep++;
  182. }
  183. return entry;
  184. }
  185. struct page *follow_huge_addr(struct mm_struct *mm,
  186. unsigned long address, int write)
  187. {
  188. return ERR_PTR(-EINVAL);
  189. }
  190. int pmd_huge(pmd_t pmd)
  191. {
  192. return 0;
  193. }
  194. int pud_huge(pud_t pud)
  195. {
  196. return 0;
  197. }
  198. int pmd_huge_support(void)
  199. {
  200. return 0;
  201. }
  202. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  203. pmd_t *pmd, int write)
  204. {
  205. return NULL;
  206. }