hugetlbpage.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /*
  2. * IA-32 Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  5. */
  6. #include <linux/init.h>
  7. #include <linux/fs.h>
  8. #include <linux/mm.h>
  9. #include <linux/hugetlb.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/err.h>
  12. #include <linux/sysctl.h>
  13. #include <asm/mman.h>
  14. #include <asm/tlb.h>
  15. #include <asm/tlbflush.h>
  16. #include <asm/pgalloc.h>
  17. #if 0 /* This is just for testing */
  18. struct page *
  19. follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
  20. {
  21. unsigned long start = address;
  22. int length = 1;
  23. int nr;
  24. struct page *page;
  25. struct vm_area_struct *vma;
  26. vma = find_vma(mm, addr);
  27. if (!vma || !is_vm_hugetlb_page(vma))
  28. return ERR_PTR(-EINVAL);
  29. pte = huge_pte_offset(mm, address);
  30. /* hugetlb should be locked, and hence, prefaulted */
  31. WARN_ON(!pte || pte_none(*pte));
  32. page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
  33. WARN_ON(!PageHead(page));
  34. return page;
  35. }
  36. int pmd_huge(pmd_t pmd)
  37. {
  38. return 0;
  39. }
  40. int pud_huge(pud_t pud)
  41. {
  42. return 0;
  43. }
  44. struct page *
  45. follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  46. pmd_t *pmd, int write)
  47. {
  48. return NULL;
  49. }
  50. #else
  51. struct page *
  52. follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
  53. {
  54. return ERR_PTR(-EINVAL);
  55. }
  56. int pmd_huge(pmd_t pmd)
  57. {
  58. return !!(pmd_val(pmd) & _PAGE_PSE);
  59. }
  60. int pud_huge(pud_t pud)
  61. {
  62. return !!(pud_val(pud) & _PAGE_PSE);
  63. }
  64. #endif
  65. /* x86_64 also uses this file */
  66. #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  67. static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  68. unsigned long addr, unsigned long len,
  69. unsigned long pgoff, unsigned long flags)
  70. {
  71. struct hstate *h = hstate_file(file);
  72. struct vm_unmapped_area_info info;
  73. info.flags = 0;
  74. info.length = len;
  75. info.low_limit = TASK_UNMAPPED_BASE;
  76. info.high_limit = TASK_SIZE;
  77. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  78. info.align_offset = 0;
  79. return vm_unmapped_area(&info);
  80. }
  81. static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  82. unsigned long addr0, unsigned long len,
  83. unsigned long pgoff, unsigned long flags)
  84. {
  85. struct hstate *h = hstate_file(file);
  86. struct vm_unmapped_area_info info;
  87. unsigned long addr;
  88. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  89. info.length = len;
  90. info.low_limit = PAGE_SIZE;
  91. info.high_limit = current->mm->mmap_base;
  92. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  93. info.align_offset = 0;
  94. addr = vm_unmapped_area(&info);
  95. /*
  96. * A failed mmap() very likely causes application failure,
  97. * so fall back to the bottom-up function here. This scenario
  98. * can happen with large stack limits and large mmap()
  99. * allocations.
  100. */
  101. if (addr & ~PAGE_MASK) {
  102. VM_BUG_ON(addr != -ENOMEM);
  103. info.flags = 0;
  104. info.low_limit = TASK_UNMAPPED_BASE;
  105. info.high_limit = TASK_SIZE;
  106. addr = vm_unmapped_area(&info);
  107. }
  108. return addr;
  109. }
  110. unsigned long
  111. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  112. unsigned long len, unsigned long pgoff, unsigned long flags)
  113. {
  114. struct hstate *h = hstate_file(file);
  115. struct mm_struct *mm = current->mm;
  116. struct vm_area_struct *vma;
  117. if (len & ~huge_page_mask(h))
  118. return -EINVAL;
  119. if (len > TASK_SIZE)
  120. return -ENOMEM;
  121. if (flags & MAP_FIXED) {
  122. if (prepare_hugepage_range(file, addr, len))
  123. return -EINVAL;
  124. return addr;
  125. }
  126. if (addr) {
  127. addr = ALIGN(addr, huge_page_size(h));
  128. vma = find_vma(mm, addr);
  129. if (TASK_SIZE - len >= addr &&
  130. (!vma || addr + len <= vma->vm_start))
  131. return addr;
  132. }
  133. if (mm->get_unmapped_area == arch_get_unmapped_area)
  134. return hugetlb_get_unmapped_area_bottomup(file, addr, len,
  135. pgoff, flags);
  136. else
  137. return hugetlb_get_unmapped_area_topdown(file, addr, len,
  138. pgoff, flags);
  139. }
  140. #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
  141. #ifdef CONFIG_X86_64
  142. static __init int setup_hugepagesz(char *opt)
  143. {
  144. unsigned long ps = memparse(opt, &opt);
  145. if (ps == PMD_SIZE) {
  146. hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
  147. } else if (ps == PUD_SIZE && cpu_has_gbpages) {
  148. hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
  149. } else {
  150. printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
  151. ps >> 20);
  152. return 0;
  153. }
  154. return 1;
  155. }
  156. __setup("hugepagesz=", setup_hugepagesz);
  157. #endif