hugetlbpage.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. /*
  2. * PPC Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright (C) 2003 David Gibson, IBM Corporation.
  5. * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
  6. *
  7. * Based on the IA-32 version:
  8. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/io.h>
  12. #include <linux/slab.h>
  13. #include <linux/hugetlb.h>
  14. #include <linux/export.h>
  15. #include <linux/of_fdt.h>
  16. #include <linux/memblock.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/moduleparam.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/setup.h>
  23. #include <asm/hugetlb.h>
  24. #ifdef CONFIG_HUGETLB_PAGE
  25. #define PAGE_SHIFT_64K 16
  26. #define PAGE_SHIFT_16M 24
  27. #define PAGE_SHIFT_16G 34
  28. unsigned int HPAGE_SHIFT;
  29. /*
  30. * Tracks gpages after the device tree is scanned and before the
  31. * huge_boot_pages list is ready. On non-Freescale implementations, this is
  32. * just used to track 16G pages and so is a single array. FSL-based
  33. * implementations may have more than one gpage size, so we need multiple
  34. * arrays
  35. */
  36. #ifdef CONFIG_PPC_FSL_BOOK3E
  37. #define MAX_NUMBER_GPAGES 128
  38. struct psize_gpages {
  39. u64 gpage_list[MAX_NUMBER_GPAGES];
  40. unsigned int nr_gpages;
  41. };
  42. static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
  43. #else
  44. #define MAX_NUMBER_GPAGES 1024
  45. static u64 gpage_freearray[MAX_NUMBER_GPAGES];
  46. static unsigned nr_gpages;
  47. #endif
  48. #define hugepd_none(hpd) ((hpd).pd == 0)
  49. #ifdef CONFIG_PPC_BOOK3S_64
  50. /*
  51. * At this point we do the placement change only for BOOK3S 64. This would
  52. * possibly work on other subarchs.
  53. */
  54. /*
  55. * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
  56. * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
  57. */
  58. int pmd_huge(pmd_t pmd)
  59. {
  60. /*
  61. * leaf pte for huge page, bottom two bits != 00
  62. */
  63. return ((pmd_val(pmd) & 0x3) != 0x0);
  64. }
  65. int pud_huge(pud_t pud)
  66. {
  67. /*
  68. * leaf pte for huge page, bottom two bits != 00
  69. */
  70. return ((pud_val(pud) & 0x3) != 0x0);
  71. }
  72. int pgd_huge(pgd_t pgd)
  73. {
  74. /*
  75. * leaf pte for huge page, bottom two bits != 00
  76. */
  77. return ((pgd_val(pgd) & 0x3) != 0x0);
  78. }
  79. #else
  80. int pmd_huge(pmd_t pmd)
  81. {
  82. return 0;
  83. }
  84. int pud_huge(pud_t pud)
  85. {
  86. return 0;
  87. }
  88. int pgd_huge(pgd_t pgd)
  89. {
  90. return 0;
  91. }
  92. #endif
  93. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  94. {
  95. /* Only called for hugetlbfs pages, hence can ignore THP */
  96. return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
  97. }
  98. static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
  99. unsigned long address, unsigned pdshift, unsigned pshift)
  100. {
  101. struct kmem_cache *cachep;
  102. pte_t *new;
  103. #ifdef CONFIG_PPC_FSL_BOOK3E
  104. int i;
  105. int num_hugepd = 1 << (pshift - pdshift);
  106. cachep = hugepte_cache;
  107. #else
  108. cachep = PGT_CACHE(pdshift - pshift);
  109. #endif
  110. new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
  111. BUG_ON(pshift > HUGEPD_SHIFT_MASK);
  112. BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
  113. if (! new)
  114. return -ENOMEM;
  115. spin_lock(&mm->page_table_lock);
  116. #ifdef CONFIG_PPC_FSL_BOOK3E
  117. /*
  118. * We have multiple higher-level entries that point to the same
  119. * actual pte location. Fill in each as we go and backtrack on error.
  120. * We need all of these so the DTLB pgtable walk code can find the
  121. * right higher-level entry without knowing if it's a hugepage or not.
  122. */
  123. for (i = 0; i < num_hugepd; i++, hpdp++) {
  124. if (unlikely(!hugepd_none(*hpdp)))
  125. break;
  126. else
  127. /* We use the old format for PPC_FSL_BOOK3E */
  128. hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
  129. }
  130. /* If we bailed from the for loop early, an error occurred, clean up */
  131. if (i < num_hugepd) {
  132. for (i = i - 1 ; i >= 0; i--, hpdp--)
  133. hpdp->pd = 0;
  134. kmem_cache_free(cachep, new);
  135. }
  136. #else
  137. if (!hugepd_none(*hpdp))
  138. kmem_cache_free(cachep, new);
  139. else {
  140. #ifdef CONFIG_PPC_BOOK3S_64
  141. hpdp->pd = (unsigned long)new |
  142. (shift_to_mmu_psize(pshift) << 2);
  143. #else
  144. hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
  145. #endif
  146. }
  147. #endif
  148. spin_unlock(&mm->page_table_lock);
  149. return 0;
  150. }
  151. /*
  152. * These macros define how to determine which level of the page table holds
  153. * the hpdp.
  154. */
  155. #ifdef CONFIG_PPC_FSL_BOOK3E
  156. #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
  157. #define HUGEPD_PUD_SHIFT PUD_SHIFT
  158. #else
  159. #define HUGEPD_PGD_SHIFT PUD_SHIFT
  160. #define HUGEPD_PUD_SHIFT PMD_SHIFT
  161. #endif
  162. #ifdef CONFIG_PPC_BOOK3S_64
  163. /*
  164. * At this point we do the placement change only for BOOK3S 64. This would
  165. * possibly work on other subarchs.
  166. */
  167. pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
  168. {
  169. pgd_t *pg;
  170. pud_t *pu;
  171. pmd_t *pm;
  172. hugepd_t *hpdp = NULL;
  173. unsigned pshift = __ffs(sz);
  174. unsigned pdshift = PGDIR_SHIFT;
  175. addr &= ~(sz-1);
  176. pg = pgd_offset(mm, addr);
  177. if (pshift == PGDIR_SHIFT)
  178. /* 16GB huge page */
  179. return (pte_t *) pg;
  180. else if (pshift > PUD_SHIFT)
  181. /*
  182. * We need to use hugepd table
  183. */
  184. hpdp = (hugepd_t *)pg;
  185. else {
  186. pdshift = PUD_SHIFT;
  187. pu = pud_alloc(mm, pg, addr);
  188. if (pshift == PUD_SHIFT)
  189. return (pte_t *)pu;
  190. else if (pshift > PMD_SHIFT)
  191. hpdp = (hugepd_t *)pu;
  192. else {
  193. pdshift = PMD_SHIFT;
  194. pm = pmd_alloc(mm, pu, addr);
  195. if (pshift == PMD_SHIFT)
  196. /* 16MB hugepage */
  197. return (pte_t *)pm;
  198. else
  199. hpdp = (hugepd_t *)pm;
  200. }
  201. }
  202. if (!hpdp)
  203. return NULL;
  204. BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
  205. if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
  206. return NULL;
  207. return hugepte_offset(hpdp, addr, pdshift);
  208. }
  209. #else
  210. pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
  211. {
  212. pgd_t *pg;
  213. pud_t *pu;
  214. pmd_t *pm;
  215. hugepd_t *hpdp = NULL;
  216. unsigned pshift = __ffs(sz);
  217. unsigned pdshift = PGDIR_SHIFT;
  218. addr &= ~(sz-1);
  219. pg = pgd_offset(mm, addr);
  220. if (pshift >= HUGEPD_PGD_SHIFT) {
  221. hpdp = (hugepd_t *)pg;
  222. } else {
  223. pdshift = PUD_SHIFT;
  224. pu = pud_alloc(mm, pg, addr);
  225. if (pshift >= HUGEPD_PUD_SHIFT) {
  226. hpdp = (hugepd_t *)pu;
  227. } else {
  228. pdshift = PMD_SHIFT;
  229. pm = pmd_alloc(mm, pu, addr);
  230. hpdp = (hugepd_t *)pm;
  231. }
  232. }
  233. if (!hpdp)
  234. return NULL;
  235. BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
  236. if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
  237. return NULL;
  238. return hugepte_offset(hpdp, addr, pdshift);
  239. }
  240. #endif
  241. #ifdef CONFIG_PPC_FSL_BOOK3E
  242. /* Build list of addresses of gigantic pages. This function is used in early
  243. * boot before the buddy or bootmem allocator is setup.
  244. */
  245. void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
  246. {
  247. unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
  248. int i;
  249. if (addr == 0)
  250. return;
  251. gpage_freearray[idx].nr_gpages = number_of_pages;
  252. for (i = 0; i < number_of_pages; i++) {
  253. gpage_freearray[idx].gpage_list[i] = addr;
  254. addr += page_size;
  255. }
  256. }
  257. /*
  258. * Moves the gigantic page addresses from the temporary list to the
  259. * huge_boot_pages list.
  260. */
  261. int alloc_bootmem_huge_page(struct hstate *hstate)
  262. {
  263. struct huge_bootmem_page *m;
  264. int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
  265. int nr_gpages = gpage_freearray[idx].nr_gpages;
  266. if (nr_gpages == 0)
  267. return 0;
  268. #ifdef CONFIG_HIGHMEM
  269. /*
  270. * If gpages can be in highmem we can't use the trick of storing the
  271. * data structure in the page; allocate space for this
  272. */
  273. m = alloc_bootmem(sizeof(struct huge_bootmem_page));
  274. m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
  275. #else
  276. m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
  277. #endif
  278. list_add(&m->list, &huge_boot_pages);
  279. gpage_freearray[idx].nr_gpages = nr_gpages;
  280. gpage_freearray[idx].gpage_list[nr_gpages] = 0;
  281. m->hstate = hstate;
  282. return 1;
  283. }
  284. /*
  285. * Scan the command line hugepagesz= options for gigantic pages; store those in
  286. * a list that we use to allocate the memory once all options are parsed.
  287. */
  288. unsigned long gpage_npages[MMU_PAGE_COUNT];
  289. static int __init do_gpage_early_setup(char *param, char *val,
  290. const char *unused)
  291. {
  292. static phys_addr_t size;
  293. unsigned long npages;
  294. /*
  295. * The hugepagesz and hugepages cmdline options are interleaved. We
  296. * use the size variable to keep track of whether or not this was done
  297. * properly and skip over instances where it is incorrect. Other
  298. * command-line parsing code will issue warnings, so we don't need to.
  299. *
  300. */
  301. if ((strcmp(param, "default_hugepagesz") == 0) ||
  302. (strcmp(param, "hugepagesz") == 0)) {
  303. size = memparse(val, NULL);
  304. } else if (strcmp(param, "hugepages") == 0) {
  305. if (size != 0) {
  306. if (sscanf(val, "%lu", &npages) <= 0)
  307. npages = 0;
  308. gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
  309. size = 0;
  310. }
  311. }
  312. return 0;
  313. }
  314. /*
  315. * This function allocates physical space for pages that are larger than the
  316. * buddy allocator can handle. We want to allocate these in highmem because
  317. * the amount of lowmem is limited. This means that this function MUST be
  318. * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
  319. * allocate to grab highmem.
  320. */
  321. void __init reserve_hugetlb_gpages(void)
  322. {
  323. static __initdata char cmdline[COMMAND_LINE_SIZE];
  324. phys_addr_t size, base;
  325. int i;
  326. strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
  327. parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
  328. &do_gpage_early_setup);
  329. /*
  330. * Walk gpage list in reverse, allocating larger page sizes first.
  331. * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
  332. * When we reach the point in the list where pages are no longer
  333. * considered gpages, we're done.
  334. */
  335. for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
  336. if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
  337. continue;
  338. else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
  339. break;
  340. size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
  341. base = memblock_alloc_base(size * gpage_npages[i], size,
  342. MEMBLOCK_ALLOC_ANYWHERE);
  343. add_gpage(base, size, gpage_npages[i]);
  344. }
  345. }
  346. #else /* !PPC_FSL_BOOK3E */
  347. /* Build list of addresses of gigantic pages. This function is used in early
  348. * boot before the buddy or bootmem allocator is setup.
  349. */
  350. void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
  351. {
  352. if (!addr)
  353. return;
  354. while (number_of_pages > 0) {
  355. gpage_freearray[nr_gpages] = addr;
  356. nr_gpages++;
  357. number_of_pages--;
  358. addr += page_size;
  359. }
  360. }
  361. /* Moves the gigantic page addresses from the temporary list to the
  362. * huge_boot_pages list.
  363. */
  364. int alloc_bootmem_huge_page(struct hstate *hstate)
  365. {
  366. struct huge_bootmem_page *m;
  367. if (nr_gpages == 0)
  368. return 0;
  369. m = phys_to_virt(gpage_freearray[--nr_gpages]);
  370. gpage_freearray[nr_gpages] = 0;
  371. list_add(&m->list, &huge_boot_pages);
  372. m->hstate = hstate;
  373. return 1;
  374. }
  375. #endif
  376. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  377. {
  378. return 0;
  379. }
  380. #ifdef CONFIG_PPC_FSL_BOOK3E
  381. #define HUGEPD_FREELIST_SIZE \
  382. ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
  383. struct hugepd_freelist {
  384. struct rcu_head rcu;
  385. unsigned int index;
  386. void *ptes[0];
  387. };
  388. static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
  389. static void hugepd_free_rcu_callback(struct rcu_head *head)
  390. {
  391. struct hugepd_freelist *batch =
  392. container_of(head, struct hugepd_freelist, rcu);
  393. unsigned int i;
  394. for (i = 0; i < batch->index; i++)
  395. kmem_cache_free(hugepte_cache, batch->ptes[i]);
  396. free_page((unsigned long)batch);
  397. }
  398. static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
  399. {
  400. struct hugepd_freelist **batchp;
  401. batchp = &__get_cpu_var(hugepd_freelist_cur);
  402. if (atomic_read(&tlb->mm->mm_users) < 2 ||
  403. cpumask_equal(mm_cpumask(tlb->mm),
  404. cpumask_of(smp_processor_id()))) {
  405. kmem_cache_free(hugepte_cache, hugepte);
  406. return;
  407. }
  408. if (*batchp == NULL) {
  409. *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
  410. (*batchp)->index = 0;
  411. }
  412. (*batchp)->ptes[(*batchp)->index++] = hugepte;
  413. if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
  414. call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
  415. *batchp = NULL;
  416. }
  417. }
  418. #endif
  419. static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
  420. unsigned long start, unsigned long end,
  421. unsigned long floor, unsigned long ceiling)
  422. {
  423. pte_t *hugepte = hugepd_page(*hpdp);
  424. int i;
  425. unsigned long pdmask = ~((1UL << pdshift) - 1);
  426. unsigned int num_hugepd = 1;
  427. #ifdef CONFIG_PPC_FSL_BOOK3E
  428. /* Note: On fsl the hpdp may be the first of several */
  429. num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
  430. #else
  431. unsigned int shift = hugepd_shift(*hpdp);
  432. #endif
  433. start &= pdmask;
  434. if (start < floor)
  435. return;
  436. if (ceiling) {
  437. ceiling &= pdmask;
  438. if (! ceiling)
  439. return;
  440. }
  441. if (end - 1 > ceiling - 1)
  442. return;
  443. for (i = 0; i < num_hugepd; i++, hpdp++)
  444. hpdp->pd = 0;
  445. tlb->need_flush = 1;
  446. #ifdef CONFIG_PPC_FSL_BOOK3E
  447. hugepd_free(tlb, hugepte);
  448. #else
  449. pgtable_free_tlb(tlb, hugepte, pdshift - shift);
  450. #endif
  451. }
  452. static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  453. unsigned long addr, unsigned long end,
  454. unsigned long floor, unsigned long ceiling)
  455. {
  456. pmd_t *pmd;
  457. unsigned long next;
  458. unsigned long start;
  459. start = addr;
  460. do {
  461. pmd = pmd_offset(pud, addr);
  462. next = pmd_addr_end(addr, end);
  463. if (pmd_none_or_clear_bad(pmd))
  464. continue;
  465. #ifdef CONFIG_PPC_FSL_BOOK3E
  466. /*
  467. * Increment next by the size of the huge mapping since
  468. * there may be more than one entry at this level for a
  469. * single hugepage, but all of them point to
  470. * the same kmem cache that holds the hugepte.
  471. */
  472. next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
  473. #endif
  474. free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
  475. addr, next, floor, ceiling);
  476. } while (addr = next, addr != end);
  477. start &= PUD_MASK;
  478. if (start < floor)
  479. return;
  480. if (ceiling) {
  481. ceiling &= PUD_MASK;
  482. if (!ceiling)
  483. return;
  484. }
  485. if (end - 1 > ceiling - 1)
  486. return;
  487. pmd = pmd_offset(pud, start);
  488. pud_clear(pud);
  489. pmd_free_tlb(tlb, pmd, start);
  490. }
  491. static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  492. unsigned long addr, unsigned long end,
  493. unsigned long floor, unsigned long ceiling)
  494. {
  495. pud_t *pud;
  496. unsigned long next;
  497. unsigned long start;
  498. start = addr;
  499. do {
  500. pud = pud_offset(pgd, addr);
  501. next = pud_addr_end(addr, end);
  502. if (!is_hugepd(pud)) {
  503. if (pud_none_or_clear_bad(pud))
  504. continue;
  505. hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
  506. ceiling);
  507. } else {
  508. #ifdef CONFIG_PPC_FSL_BOOK3E
  509. /*
  510. * Increment next by the size of the huge mapping since
  511. * there may be more than one entry at this level for a
  512. * single hugepage, but all of them point to
  513. * the same kmem cache that holds the hugepte.
  514. */
  515. next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
  516. #endif
  517. free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
  518. addr, next, floor, ceiling);
  519. }
  520. } while (addr = next, addr != end);
  521. start &= PGDIR_MASK;
  522. if (start < floor)
  523. return;
  524. if (ceiling) {
  525. ceiling &= PGDIR_MASK;
  526. if (!ceiling)
  527. return;
  528. }
  529. if (end - 1 > ceiling - 1)
  530. return;
  531. pud = pud_offset(pgd, start);
  532. pgd_clear(pgd);
  533. pud_free_tlb(tlb, pud, start);
  534. }
  535. /*
  536. * This function frees user-level page tables of a process.
  537. *
  538. * Must be called with pagetable lock held.
  539. */
  540. void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  541. unsigned long addr, unsigned long end,
  542. unsigned long floor, unsigned long ceiling)
  543. {
  544. pgd_t *pgd;
  545. unsigned long next;
  546. /*
  547. * Because there are a number of different possible pagetable
  548. * layouts for hugepage ranges, we limit knowledge of how
  549. * things should be laid out to the allocation path
  550. * (huge_pte_alloc(), above). Everything else works out the
  551. * structure as it goes from information in the hugepd
  552. * pointers. That means that we can't here use the
  553. * optimization used in the normal page free_pgd_range(), of
  554. * checking whether we're actually covering a large enough
  555. * range to have to do anything at the top level of the walk
  556. * instead of at the bottom.
  557. *
  558. * To make sense of this, you should probably go read the big
  559. * block comment at the top of the normal free_pgd_range(),
  560. * too.
  561. */
  562. do {
  563. next = pgd_addr_end(addr, end);
  564. pgd = pgd_offset(tlb->mm, addr);
  565. if (!is_hugepd(pgd)) {
  566. if (pgd_none_or_clear_bad(pgd))
  567. continue;
  568. hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
  569. } else {
  570. #ifdef CONFIG_PPC_FSL_BOOK3E
  571. /*
  572. * Increment next by the size of the huge mapping since
  573. * there may be more than one entry at the pgd level
  574. * for a single hugepage, but all of them point to the
  575. * same kmem cache that holds the hugepte.
  576. */
  577. next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
  578. #endif
  579. free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
  580. addr, next, floor, ceiling);
  581. }
  582. } while (addr = next, addr != end);
  583. }
  584. struct page *
  585. follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
  586. {
  587. pte_t *ptep;
  588. struct page *page;
  589. unsigned shift;
  590. unsigned long mask;
  591. /*
  592. * Transparent hugepages are handled by generic code. We can skip them
  593. * here.
  594. */
  595. ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
  596. /* Verify it is a huge page else bail. */
  597. if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
  598. return ERR_PTR(-EINVAL);
  599. mask = (1UL << shift) - 1;
  600. page = pte_page(*ptep);
  601. if (page)
  602. page += (address & mask) / PAGE_SIZE;
  603. return page;
  604. }
  605. struct page *
  606. follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  607. pmd_t *pmd, int write)
  608. {
  609. BUG();
  610. return NULL;
  611. }
  612. static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
  613. unsigned long sz)
  614. {
  615. unsigned long __boundary = (addr + sz) & ~(sz-1);
  616. return (__boundary - 1 < end - 1) ? __boundary : end;
  617. }
  618. int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
  619. unsigned long addr, unsigned long end,
  620. int write, struct page **pages, int *nr)
  621. {
  622. pte_t *ptep;
  623. unsigned long sz = 1UL << hugepd_shift(*hugepd);
  624. unsigned long next;
  625. ptep = hugepte_offset(hugepd, addr, pdshift);
  626. do {
  627. next = hugepte_addr_end(addr, end, sz);
  628. if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
  629. return 0;
  630. } while (ptep++, addr = next, addr != end);
  631. return 1;
  632. }
  633. #ifdef CONFIG_PPC_MM_SLICES
  634. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  635. unsigned long len, unsigned long pgoff,
  636. unsigned long flags)
  637. {
  638. struct hstate *hstate = hstate_file(file);
  639. int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
  640. return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
  641. }
  642. #endif
  643. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  644. {
  645. #ifdef CONFIG_PPC_MM_SLICES
  646. unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
  647. return 1UL << mmu_psize_to_shift(psize);
  648. #else
  649. if (!is_vm_hugetlb_page(vma))
  650. return PAGE_SIZE;
  651. return huge_page_size(hstate_vma(vma));
  652. #endif
  653. }
  654. static inline bool is_power_of_4(unsigned long x)
  655. {
  656. if (is_power_of_2(x))
  657. return (__ilog2(x) % 2) ? false : true;
  658. return false;
  659. }
  660. static int __init add_huge_page_size(unsigned long long size)
  661. {
  662. int shift = __ffs(size);
  663. int mmu_psize;
  664. /* Check that it is a page size supported by the hardware and
  665. * that it fits within pagetable and slice limits. */
  666. #ifdef CONFIG_PPC_FSL_BOOK3E
  667. if ((size < PAGE_SIZE) || !is_power_of_4(size))
  668. return -EINVAL;
  669. #else
  670. if (!is_power_of_2(size)
  671. || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
  672. return -EINVAL;
  673. #endif
  674. if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
  675. return -EINVAL;
  676. #ifdef CONFIG_SPU_FS_64K_LS
  677. /* Disable support for 64K huge pages when 64K SPU local store
  678. * support is enabled as the current implementation conflicts.
  679. */
  680. if (shift == PAGE_SHIFT_64K)
  681. return -EINVAL;
  682. #endif /* CONFIG_SPU_FS_64K_LS */
  683. BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
  684. /* Return if huge page size has already been setup */
  685. if (size_to_hstate(size))
  686. return 0;
  687. hugetlb_add_hstate(shift - PAGE_SHIFT);
  688. return 0;
  689. }
  690. static int __init hugepage_setup_sz(char *str)
  691. {
  692. unsigned long long size;
  693. size = memparse(str, &str);
  694. if (add_huge_page_size(size) != 0)
  695. printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
  696. return 1;
  697. }
  698. __setup("hugepagesz=", hugepage_setup_sz);
  699. #ifdef CONFIG_PPC_FSL_BOOK3E
  700. struct kmem_cache *hugepte_cache;
  701. static int __init hugetlbpage_init(void)
  702. {
  703. int psize;
  704. for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
  705. unsigned shift;
  706. if (!mmu_psize_defs[psize].shift)
  707. continue;
  708. shift = mmu_psize_to_shift(psize);
  709. /* Don't treat normal page sizes as huge... */
  710. if (shift != PAGE_SHIFT)
  711. if (add_huge_page_size(1ULL << shift) < 0)
  712. continue;
  713. }
  714. /*
  715. * Create a kmem cache for hugeptes. The bottom bits in the pte have
  716. * size information encoded in them, so align them to allow this
  717. */
  718. hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
  719. HUGEPD_SHIFT_MASK + 1, 0, NULL);
  720. if (hugepte_cache == NULL)
  721. panic("%s: Unable to create kmem cache for hugeptes\n",
  722. __func__);
  723. /* Default hpage size = 4M */
  724. if (mmu_psize_defs[MMU_PAGE_4M].shift)
  725. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
  726. else
  727. panic("%s: Unable to set default huge page size\n", __func__);
  728. return 0;
  729. }
  730. #else
  731. static int __init hugetlbpage_init(void)
  732. {
  733. int psize;
  734. if (!mmu_has_feature(MMU_FTR_16M_PAGE))
  735. return -ENODEV;
  736. for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
  737. unsigned shift;
  738. unsigned pdshift;
  739. if (!mmu_psize_defs[psize].shift)
  740. continue;
  741. shift = mmu_psize_to_shift(psize);
  742. if (add_huge_page_size(1ULL << shift) < 0)
  743. continue;
  744. if (shift < PMD_SHIFT)
  745. pdshift = PMD_SHIFT;
  746. else if (shift < PUD_SHIFT)
  747. pdshift = PUD_SHIFT;
  748. else
  749. pdshift = PGDIR_SHIFT;
  750. /*
  751. * if we have pdshift and shift value same, we don't
  752. * use pgt cache for hugepd.
  753. */
  754. if (pdshift != shift) {
  755. pgtable_cache_add(pdshift - shift, NULL);
  756. if (!PGT_CACHE(pdshift - shift))
  757. panic("hugetlbpage_init(): could not create "
  758. "pgtable cache for %d bit pagesize\n", shift);
  759. }
  760. }
  761. /* Set default large page size. Currently, we pick 16M or 1M
  762. * depending on what is available
  763. */
  764. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  765. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
  766. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  767. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
  768. return 0;
  769. }
  770. #endif
  771. module_init(hugetlbpage_init);
  772. void flush_dcache_icache_hugepage(struct page *page)
  773. {
  774. int i;
  775. void *start;
  776. BUG_ON(!PageCompound(page));
  777. for (i = 0; i < (1UL << compound_order(page)); i++) {
  778. if (!PageHighMem(page)) {
  779. __flush_dcache_icache(page_address(page+i));
  780. } else {
  781. start = kmap_atomic(page+i);
  782. __flush_dcache_icache(start);
  783. kunmap_atomic(start);
  784. }
  785. }
  786. }
  787. #endif /* CONFIG_HUGETLB_PAGE */
  788. /*
  789. * We have 4 cases for pgds and pmds:
  790. * (1) invalid (all zeroes)
  791. * (2) pointer to next table, as normal; bottom 6 bits == 0
  792. * (3) leaf pte for huge page, bottom two bits != 00
  793. * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
  794. *
  795. * So long as we atomically load page table pointers we are safe against teardown,
  796. * we can follow the address down to the the page and take a ref on it.
  797. */
  798. pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
  799. {
  800. pgd_t pgd, *pgdp;
  801. pud_t pud, *pudp;
  802. pmd_t pmd, *pmdp;
  803. pte_t *ret_pte;
  804. hugepd_t *hpdp = NULL;
  805. unsigned pdshift = PGDIR_SHIFT;
  806. if (shift)
  807. *shift = 0;
  808. pgdp = pgdir + pgd_index(ea);
  809. pgd = ACCESS_ONCE(*pgdp);
  810. /*
  811. * Always operate on the local stack value. This make sure the
  812. * value don't get updated by a parallel THP split/collapse,
  813. * page fault or a page unmap. The return pte_t * is still not
  814. * stable. So should be checked there for above conditions.
  815. */
  816. if (pgd_none(pgd))
  817. return NULL;
  818. else if (pgd_huge(pgd)) {
  819. ret_pte = (pte_t *) pgdp;
  820. goto out;
  821. } else if (is_hugepd(&pgd))
  822. hpdp = (hugepd_t *)&pgd;
  823. else {
  824. /*
  825. * Even if we end up with an unmap, the pgtable will not
  826. * be freed, because we do an rcu free and here we are
  827. * irq disabled
  828. */
  829. pdshift = PUD_SHIFT;
  830. pudp = pud_offset(&pgd, ea);
  831. pud = ACCESS_ONCE(*pudp);
  832. if (pud_none(pud))
  833. return NULL;
  834. else if (pud_huge(pud)) {
  835. ret_pte = (pte_t *) pudp;
  836. goto out;
  837. } else if (is_hugepd(&pud))
  838. hpdp = (hugepd_t *)&pud;
  839. else {
  840. pdshift = PMD_SHIFT;
  841. pmdp = pmd_offset(&pud, ea);
  842. pmd = ACCESS_ONCE(*pmdp);
  843. /*
  844. * A hugepage collapse is captured by pmd_none, because
  845. * it mark the pmd none and do a hpte invalidate.
  846. *
  847. * A hugepage split is captured by pmd_trans_splitting
  848. * because we mark the pmd trans splitting and do a
  849. * hpte invalidate
  850. *
  851. */
  852. if (pmd_none(pmd) || pmd_trans_splitting(pmd))
  853. return NULL;
  854. if (pmd_huge(pmd) || pmd_large(pmd)) {
  855. ret_pte = (pte_t *) pmdp;
  856. goto out;
  857. } else if (is_hugepd(&pmd))
  858. hpdp = (hugepd_t *)&pmd;
  859. else
  860. return pte_offset_kernel(&pmd, ea);
  861. }
  862. }
  863. if (!hpdp)
  864. return NULL;
  865. ret_pte = hugepte_offset(hpdp, ea, pdshift);
  866. pdshift = hugepd_shift(*hpdp);
  867. out:
  868. if (shift)
  869. *shift = pdshift;
  870. return ret_pte;
  871. }
  872. EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
  873. int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
  874. unsigned long end, int write, struct page **pages, int *nr)
  875. {
  876. unsigned long mask;
  877. unsigned long pte_end;
  878. struct page *head, *page, *tail;
  879. pte_t pte;
  880. int refs;
  881. pte_end = (addr + sz) & ~(sz-1);
  882. if (pte_end < end)
  883. end = pte_end;
  884. pte = ACCESS_ONCE(*ptep);
  885. mask = _PAGE_PRESENT | _PAGE_USER;
  886. if (write)
  887. mask |= _PAGE_RW;
  888. if ((pte_val(pte) & mask) != mask)
  889. return 0;
  890. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  891. /*
  892. * check for splitting here
  893. */
  894. if (pmd_trans_splitting(pte_pmd(pte)))
  895. return 0;
  896. #endif
  897. /* hugepages are never "special" */
  898. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  899. refs = 0;
  900. head = pte_page(pte);
  901. page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
  902. tail = page;
  903. do {
  904. VM_BUG_ON(compound_head(page) != head);
  905. pages[*nr] = page;
  906. (*nr)++;
  907. page++;
  908. refs++;
  909. } while (addr += PAGE_SIZE, addr != end);
  910. if (!page_cache_add_speculative(head, refs)) {
  911. *nr -= refs;
  912. return 0;
  913. }
  914. if (unlikely(pte_val(pte) != pte_val(*ptep))) {
  915. /* Could be optimized better */
  916. *nr -= refs;
  917. while (refs--)
  918. put_page(head);
  919. return 0;
  920. }
  921. /*
  922. * Any tail page need their mapcount reference taken before we
  923. * return.
  924. */
  925. while (refs--) {
  926. if (PageTail(tail))
  927. get_huge_page_tail(tail);
  928. tail++;
  929. }
  930. return 1;
  931. }