hugetlbpage.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * TILE Huge TLB Page Support for Kernel.
  15. * Taken from i386 hugetlb implementation:
  16. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  17. */
  18. #include <linux/init.h>
  19. #include <linux/fs.h>
  20. #include <linux/mm.h>
  21. #include <linux/hugetlb.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/smp_lock.h>
  24. #include <linux/slab.h>
  25. #include <linux/err.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/mman.h>
  28. #include <asm/tlb.h>
  29. #include <asm/tlbflush.h>
  30. pte_t *huge_pte_alloc(struct mm_struct *mm,
  31. unsigned long addr, unsigned long sz)
  32. {
  33. pgd_t *pgd;
  34. pud_t *pud;
  35. pte_t *pte = NULL;
  36. /* We do not yet support multiple huge page sizes. */
  37. BUG_ON(sz != PMD_SIZE);
  38. pgd = pgd_offset(mm, addr);
  39. pud = pud_alloc(mm, pgd, addr);
  40. if (pud)
  41. pte = (pte_t *) pmd_alloc(mm, pud, addr);
  42. BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
  43. return pte;
  44. }
  45. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  46. {
  47. pgd_t *pgd;
  48. pud_t *pud;
  49. pmd_t *pmd = NULL;
  50. pgd = pgd_offset(mm, addr);
  51. if (pgd_present(*pgd)) {
  52. pud = pud_offset(pgd, addr);
  53. if (pud_present(*pud))
  54. pmd = pmd_offset(pud, addr);
  55. }
  56. return (pte_t *) pmd;
  57. }
  58. #ifdef HUGETLB_TEST
  59. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  60. int write)
  61. {
  62. unsigned long start = address;
  63. int length = 1;
  64. int nr;
  65. struct page *page;
  66. struct vm_area_struct *vma;
  67. vma = find_vma(mm, addr);
  68. if (!vma || !is_vm_hugetlb_page(vma))
  69. return ERR_PTR(-EINVAL);
  70. pte = huge_pte_offset(mm, address);
  71. /* hugetlb should be locked, and hence, prefaulted */
  72. WARN_ON(!pte || pte_none(*pte));
  73. page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
  74. WARN_ON(!PageHead(page));
  75. return page;
  76. }
  77. int pmd_huge(pmd_t pmd)
  78. {
  79. return 0;
  80. }
  81. int pud_huge(pud_t pud)
  82. {
  83. return 0;
  84. }
  85. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  86. pmd_t *pmd, int write)
  87. {
  88. return NULL;
  89. }
  90. #else
  91. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  92. int write)
  93. {
  94. return ERR_PTR(-EINVAL);
  95. }
  96. int pmd_huge(pmd_t pmd)
  97. {
  98. return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
  99. }
  100. int pud_huge(pud_t pud)
  101. {
  102. return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
  103. }
  104. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  105. pmd_t *pmd, int write)
  106. {
  107. struct page *page;
  108. page = pte_page(*(pte_t *)pmd);
  109. if (page)
  110. page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
  111. return page;
  112. }
  113. struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
  114. pud_t *pud, int write)
  115. {
  116. struct page *page;
  117. page = pte_page(*(pte_t *)pud);
  118. if (page)
  119. page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
  120. return page;
  121. }
  122. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  123. {
  124. return 0;
  125. }
  126. #endif
  127. #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  128. static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  129. unsigned long addr, unsigned long len,
  130. unsigned long pgoff, unsigned long flags)
  131. {
  132. struct hstate *h = hstate_file(file);
  133. struct mm_struct *mm = current->mm;
  134. struct vm_area_struct *vma;
  135. unsigned long start_addr;
  136. if (len > mm->cached_hole_size) {
  137. start_addr = mm->free_area_cache;
  138. } else {
  139. start_addr = TASK_UNMAPPED_BASE;
  140. mm->cached_hole_size = 0;
  141. }
  142. full_search:
  143. addr = ALIGN(start_addr, huge_page_size(h));
  144. for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  145. /* At this point: (!vma || addr < vma->vm_end). */
  146. if (TASK_SIZE - len < addr) {
  147. /*
  148. * Start a new search - just in case we missed
  149. * some holes.
  150. */
  151. if (start_addr != TASK_UNMAPPED_BASE) {
  152. start_addr = TASK_UNMAPPED_BASE;
  153. mm->cached_hole_size = 0;
  154. goto full_search;
  155. }
  156. return -ENOMEM;
  157. }
  158. if (!vma || addr + len <= vma->vm_start) {
  159. mm->free_area_cache = addr + len;
  160. return addr;
  161. }
  162. if (addr + mm->cached_hole_size < vma->vm_start)
  163. mm->cached_hole_size = vma->vm_start - addr;
  164. addr = ALIGN(vma->vm_end, huge_page_size(h));
  165. }
  166. }
  167. static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  168. unsigned long addr0, unsigned long len,
  169. unsigned long pgoff, unsigned long flags)
  170. {
  171. struct hstate *h = hstate_file(file);
  172. struct mm_struct *mm = current->mm;
  173. struct vm_area_struct *vma, *prev_vma;
  174. unsigned long base = mm->mmap_base, addr = addr0;
  175. unsigned long largest_hole = mm->cached_hole_size;
  176. int first_time = 1;
  177. /* don't allow allocations above current base */
  178. if (mm->free_area_cache > base)
  179. mm->free_area_cache = base;
  180. if (len <= largest_hole) {
  181. largest_hole = 0;
  182. mm->free_area_cache = base;
  183. }
  184. try_again:
  185. /* make sure it can fit in the remaining address space */
  186. if (mm->free_area_cache < len)
  187. goto fail;
  188. /* either no address requested or cant fit in requested address hole */
  189. addr = (mm->free_area_cache - len) & huge_page_mask(h);
  190. do {
  191. /*
  192. * Lookup failure means no vma is above this address,
  193. * i.e. return with success:
  194. */
  195. vma = find_vma_prev(mm, addr, &prev_vma);
  196. if (!vma) {
  197. return addr;
  198. break;
  199. }
  200. /*
  201. * new region fits between prev_vma->vm_end and
  202. * vma->vm_start, use it:
  203. */
  204. if (addr + len <= vma->vm_start &&
  205. (!prev_vma || (addr >= prev_vma->vm_end))) {
  206. /* remember the address as a hint for next time */
  207. mm->cached_hole_size = largest_hole;
  208. mm->free_area_cache = addr;
  209. return addr;
  210. } else {
  211. /* pull free_area_cache down to the first hole */
  212. if (mm->free_area_cache == vma->vm_end) {
  213. mm->free_area_cache = vma->vm_start;
  214. mm->cached_hole_size = largest_hole;
  215. }
  216. }
  217. /* remember the largest hole we saw so far */
  218. if (addr + largest_hole < vma->vm_start)
  219. largest_hole = vma->vm_start - addr;
  220. /* try just below the current vma->vm_start */
  221. addr = (vma->vm_start - len) & huge_page_mask(h);
  222. } while (len <= vma->vm_start);
  223. fail:
  224. /*
  225. * if hint left us with no space for the requested
  226. * mapping then try again:
  227. */
  228. if (first_time) {
  229. mm->free_area_cache = base;
  230. largest_hole = 0;
  231. first_time = 0;
  232. goto try_again;
  233. }
  234. /*
  235. * A failed mmap() very likely causes application failure,
  236. * so fall back to the bottom-up function here. This scenario
  237. * can happen with large stack limits and large mmap()
  238. * allocations.
  239. */
  240. mm->free_area_cache = TASK_UNMAPPED_BASE;
  241. mm->cached_hole_size = ~0UL;
  242. addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
  243. len, pgoff, flags);
  244. /*
  245. * Restore the topdown base:
  246. */
  247. mm->free_area_cache = base;
  248. mm->cached_hole_size = ~0UL;
  249. return addr;
  250. }
  251. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  252. unsigned long len, unsigned long pgoff, unsigned long flags)
  253. {
  254. struct hstate *h = hstate_file(file);
  255. struct mm_struct *mm = current->mm;
  256. struct vm_area_struct *vma;
  257. if (len & ~huge_page_mask(h))
  258. return -EINVAL;
  259. if (len > TASK_SIZE)
  260. return -ENOMEM;
  261. if (flags & MAP_FIXED) {
  262. if (prepare_hugepage_range(file, addr, len))
  263. return -EINVAL;
  264. return addr;
  265. }
  266. if (addr) {
  267. addr = ALIGN(addr, huge_page_size(h));
  268. vma = find_vma(mm, addr);
  269. if (TASK_SIZE - len >= addr &&
  270. (!vma || addr + len <= vma->vm_start))
  271. return addr;
  272. }
  273. if (current->mm->get_unmapped_area == arch_get_unmapped_area)
  274. return hugetlb_get_unmapped_area_bottomup(file, addr, len,
  275. pgoff, flags);
  276. else
  277. return hugetlb_get_unmapped_area_topdown(file, addr, len,
  278. pgoff, flags);
  279. }
  280. static __init int setup_hugepagesz(char *opt)
  281. {
  282. unsigned long ps = memparse(opt, &opt);
  283. if (ps == PMD_SIZE) {
  284. hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
  285. } else if (ps == PUD_SIZE) {
  286. hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
  287. } else {
  288. pr_err("hugepagesz: Unsupported page size %lu M\n",
  289. ps >> 20);
  290. return 0;
  291. }
  292. return 1;
  293. }
  294. __setup("hugepagesz=", setup_hugepagesz);
  295. #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/