hugetlbpage.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * TILE Huge TLB Page Support for Kernel.
  15. * Taken from i386 hugetlb implementation:
  16. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  17. */
  18. #include <linux/init.h>
  19. #include <linux/fs.h>
  20. #include <linux/mm.h>
  21. #include <linux/hugetlb.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/slab.h>
  24. #include <linux/err.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/mman.h>
  27. #include <asm/tlb.h>
  28. #include <asm/tlbflush.h>
  29. #include <asm/setup.h>
  30. #ifdef CONFIG_HUGETLB_SUPER_PAGES
  31. /*
  32. * Provide an additional huge page size (in addition to the regular default
  33. * huge page size) if no "hugepagesz" arguments are specified.
  34. * Note that it must be smaller than the default huge page size so
  35. * that it's possible to allocate them on demand from the buddy allocator.
  36. * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
  37. * or not define it at all.
  38. */
  39. #define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
  40. /* "Extra" page-size multipliers, one per level of the page table. */
  41. int huge_shift[HUGE_SHIFT_ENTRIES] = {
  42. #ifdef ADDITIONAL_HUGE_SIZE
  43. #define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
  44. [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
  45. #endif
  46. };
  47. #endif
  48. pte_t *huge_pte_alloc(struct mm_struct *mm,
  49. unsigned long addr, unsigned long sz)
  50. {
  51. pgd_t *pgd;
  52. pud_t *pud;
  53. addr &= -sz; /* Mask off any low bits in the address. */
  54. pgd = pgd_offset(mm, addr);
  55. pud = pud_alloc(mm, pgd, addr);
  56. #ifdef CONFIG_HUGETLB_SUPER_PAGES
  57. if (sz >= PGDIR_SIZE) {
  58. BUG_ON(sz != PGDIR_SIZE &&
  59. sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
  60. return (pte_t *)pud;
  61. } else {
  62. pmd_t *pmd = pmd_alloc(mm, pud, addr);
  63. if (sz >= PMD_SIZE) {
  64. BUG_ON(sz != PMD_SIZE &&
  65. sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
  66. return (pte_t *)pmd;
  67. }
  68. else {
  69. if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
  70. panic("Unexpected page size %#lx\n", sz);
  71. return pte_alloc_map(mm, NULL, pmd, addr);
  72. }
  73. }
  74. #else
  75. BUG_ON(sz != PMD_SIZE);
  76. return (pte_t *) pmd_alloc(mm, pud, addr);
  77. #endif
  78. }
  79. static pte_t *get_pte(pte_t *base, int index, int level)
  80. {
  81. pte_t *ptep = base + index;
  82. #ifdef CONFIG_HUGETLB_SUPER_PAGES
  83. if (!pte_present(*ptep) && huge_shift[level] != 0) {
  84. unsigned long mask = -1UL << huge_shift[level];
  85. pte_t *super_ptep = base + (index & mask);
  86. pte_t pte = *super_ptep;
  87. if (pte_present(pte) && pte_super(pte))
  88. ptep = super_ptep;
  89. }
  90. #endif
  91. return ptep;
  92. }
  93. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  94. {
  95. pgd_t *pgd;
  96. pud_t *pud;
  97. pmd_t *pmd;
  98. #ifdef CONFIG_HUGETLB_SUPER_PAGES
  99. pte_t *pte;
  100. #endif
  101. /* Get the top-level page table entry. */
  102. pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
  103. /* We don't have four levels. */
  104. pud = pud_offset(pgd, addr);
  105. #ifndef __PAGETABLE_PUD_FOLDED
  106. # error support fourth page table level
  107. #endif
  108. if (!pud_present(*pud))
  109. return NULL;
  110. /* Check for an L0 huge PTE, if we have three levels. */
  111. #ifndef __PAGETABLE_PMD_FOLDED
  112. if (pud_huge(*pud))
  113. return (pte_t *)pud;
  114. pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
  115. pmd_index(addr), 1);
  116. if (!pmd_present(*pmd))
  117. return NULL;
  118. #else
  119. pmd = pmd_offset(pud, addr);
  120. #endif
  121. /* Check for an L1 huge PTE. */
  122. if (pmd_huge(*pmd))
  123. return (pte_t *)pmd;
  124. #ifdef CONFIG_HUGETLB_SUPER_PAGES
  125. /* Check for an L2 huge PTE. */
  126. pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
  127. if (!pte_present(*pte))
  128. return NULL;
  129. if (pte_super(*pte))
  130. return pte;
  131. #endif
  132. return NULL;
  133. }
  134. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  135. int write)
  136. {
  137. return ERR_PTR(-EINVAL);
  138. }
  139. int pmd_huge(pmd_t pmd)
  140. {
  141. return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
  142. }
  143. int pud_huge(pud_t pud)
  144. {
  145. return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
  146. }
  147. int pmd_huge_support(void)
  148. {
  149. return 1;
  150. }
  151. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  152. pmd_t *pmd, int write)
  153. {
  154. struct page *page;
  155. page = pte_page(*(pte_t *)pmd);
  156. if (page)
  157. page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
  158. return page;
  159. }
  160. struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
  161. pud_t *pud, int write)
  162. {
  163. struct page *page;
  164. page = pte_page(*(pte_t *)pud);
  165. if (page)
  166. page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
  167. return page;
  168. }
  169. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  170. {
  171. return 0;
  172. }
  173. #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  174. static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  175. unsigned long addr, unsigned long len,
  176. unsigned long pgoff, unsigned long flags)
  177. {
  178. struct hstate *h = hstate_file(file);
  179. struct vm_unmapped_area_info info;
  180. info.flags = 0;
  181. info.length = len;
  182. info.low_limit = TASK_UNMAPPED_BASE;
  183. info.high_limit = TASK_SIZE;
  184. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  185. info.align_offset = 0;
  186. return vm_unmapped_area(&info);
  187. }
  188. static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  189. unsigned long addr0, unsigned long len,
  190. unsigned long pgoff, unsigned long flags)
  191. {
  192. struct hstate *h = hstate_file(file);
  193. struct vm_unmapped_area_info info;
  194. unsigned long addr;
  195. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  196. info.length = len;
  197. info.low_limit = PAGE_SIZE;
  198. info.high_limit = current->mm->mmap_base;
  199. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  200. info.align_offset = 0;
  201. addr = vm_unmapped_area(&info);
  202. /*
  203. * A failed mmap() very likely causes application failure,
  204. * so fall back to the bottom-up function here. This scenario
  205. * can happen with large stack limits and large mmap()
  206. * allocations.
  207. */
  208. if (addr & ~PAGE_MASK) {
  209. VM_BUG_ON(addr != -ENOMEM);
  210. info.flags = 0;
  211. info.low_limit = TASK_UNMAPPED_BASE;
  212. info.high_limit = TASK_SIZE;
  213. addr = vm_unmapped_area(&info);
  214. }
  215. return addr;
  216. }
  217. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  218. unsigned long len, unsigned long pgoff, unsigned long flags)
  219. {
  220. struct hstate *h = hstate_file(file);
  221. struct mm_struct *mm = current->mm;
  222. struct vm_area_struct *vma;
  223. if (len & ~huge_page_mask(h))
  224. return -EINVAL;
  225. if (len > TASK_SIZE)
  226. return -ENOMEM;
  227. if (flags & MAP_FIXED) {
  228. if (prepare_hugepage_range(file, addr, len))
  229. return -EINVAL;
  230. return addr;
  231. }
  232. if (addr) {
  233. addr = ALIGN(addr, huge_page_size(h));
  234. vma = find_vma(mm, addr);
  235. if (TASK_SIZE - len >= addr &&
  236. (!vma || addr + len <= vma->vm_start))
  237. return addr;
  238. }
  239. if (current->mm->get_unmapped_area == arch_get_unmapped_area)
  240. return hugetlb_get_unmapped_area_bottomup(file, addr, len,
  241. pgoff, flags);
  242. else
  243. return hugetlb_get_unmapped_area_topdown(file, addr, len,
  244. pgoff, flags);
  245. }
  246. #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
  247. #ifdef CONFIG_HUGETLB_SUPER_PAGES
  248. static __init int __setup_hugepagesz(unsigned long ps)
  249. {
  250. int log_ps = __builtin_ctzl(ps);
  251. int level, base_shift;
  252. if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
  253. pr_warn("Not enabling %ld byte huge pages;"
  254. " must be a power of four.\n", ps);
  255. return -EINVAL;
  256. }
  257. if (ps > 64*1024*1024*1024UL) {
  258. pr_warn("Not enabling %ld MB huge pages;"
  259. " largest legal value is 64 GB .\n", ps >> 20);
  260. return -EINVAL;
  261. } else if (ps >= PUD_SIZE) {
  262. static long hv_jpage_size;
  263. if (hv_jpage_size == 0)
  264. hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
  265. if (hv_jpage_size != PUD_SIZE) {
  266. pr_warn("Not enabling >= %ld MB huge pages:"
  267. " hypervisor reports size %ld\n",
  268. PUD_SIZE >> 20, hv_jpage_size);
  269. return -EINVAL;
  270. }
  271. level = 0;
  272. base_shift = PUD_SHIFT;
  273. } else if (ps >= PMD_SIZE) {
  274. level = 1;
  275. base_shift = PMD_SHIFT;
  276. } else if (ps > PAGE_SIZE) {
  277. level = 2;
  278. base_shift = PAGE_SHIFT;
  279. } else {
  280. pr_err("hugepagesz: huge page size %ld too small\n", ps);
  281. return -EINVAL;
  282. }
  283. if (log_ps != base_shift) {
  284. int shift_val = log_ps - base_shift;
  285. if (huge_shift[level] != 0) {
  286. int old_shift = base_shift + huge_shift[level];
  287. pr_warn("Not enabling %ld MB huge pages;"
  288. " already have size %ld MB.\n",
  289. ps >> 20, (1UL << old_shift) >> 20);
  290. return -EINVAL;
  291. }
  292. if (hv_set_pte_super_shift(level, shift_val) != 0) {
  293. pr_warn("Not enabling %ld MB huge pages;"
  294. " no hypervisor support.\n", ps >> 20);
  295. return -EINVAL;
  296. }
  297. printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
  298. huge_shift[level] = shift_val;
  299. }
  300. hugetlb_add_hstate(log_ps - PAGE_SHIFT);
  301. return 0;
  302. }
  303. static bool saw_hugepagesz;
  304. static __init int setup_hugepagesz(char *opt)
  305. {
  306. if (!saw_hugepagesz) {
  307. saw_hugepagesz = true;
  308. memset(huge_shift, 0, sizeof(huge_shift));
  309. }
  310. return __setup_hugepagesz(memparse(opt, NULL));
  311. }
  312. __setup("hugepagesz=", setup_hugepagesz);
  313. #ifdef ADDITIONAL_HUGE_SIZE
  314. /*
  315. * Provide an additional huge page size if no "hugepagesz" args are given.
  316. * In that case, all the cores have properly set up their hv super_shift
  317. * already, but we need to notify the hugetlb code to enable the
  318. * new huge page size from the Linux point of view.
  319. */
  320. static __init int add_default_hugepagesz(void)
  321. {
  322. if (!saw_hugepagesz) {
  323. BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
  324. ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
  325. BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
  326. ADDITIONAL_HUGE_SIZE);
  327. BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
  328. hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
  329. }
  330. return 0;
  331. }
  332. arch_initcall(add_default_hugepagesz);
  333. #endif
  334. #endif /* CONFIG_HUGETLB_SUPER_PAGES */