hugetlbpage.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. /*
  2. * IA-64 Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
  5. * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
  6. *
  7. * Sep, 2003: add numa support
  8. * Feb, 2004: dynamic hugetlb page size via boot parameter
  9. */
  10. #include <linux/config.h>
  11. #include <linux/init.h>
  12. #include <linux/fs.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/smp_lock.h>
  17. #include <linux/slab.h>
  18. #include <linux/sysctl.h>
  19. #include <asm/mman.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/tlbflush.h>
  23. unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
  24. pte_t *
  25. huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
  26. {
  27. unsigned long taddr = htlbpage_to_page(addr);
  28. pgd_t *pgd;
  29. pud_t *pud;
  30. pmd_t *pmd;
  31. pte_t *pte = NULL;
  32. pgd = pgd_offset(mm, taddr);
  33. pud = pud_alloc(mm, pgd, taddr);
  34. if (pud) {
  35. pmd = pmd_alloc(mm, pud, taddr);
  36. if (pmd)
  37. pte = pte_alloc_map(mm, pmd, taddr);
  38. }
  39. return pte;
  40. }
  41. pte_t *
  42. huge_pte_offset (struct mm_struct *mm, unsigned long addr)
  43. {
  44. unsigned long taddr = htlbpage_to_page(addr);
  45. pgd_t *pgd;
  46. pud_t *pud;
  47. pmd_t *pmd;
  48. pte_t *pte = NULL;
  49. pgd = pgd_offset(mm, taddr);
  50. if (pgd_present(*pgd)) {
  51. pud = pud_offset(pgd, taddr);
  52. if (pud_present(*pud)) {
  53. pmd = pmd_offset(pud, taddr);
  54. if (pmd_present(*pmd))
  55. pte = pte_offset_map(pmd, taddr);
  56. }
  57. }
  58. return pte;
  59. }
  60. #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
  61. /*
  62. * This function checks for proper alignment of input addr and len parameters.
  63. */
  64. int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
  65. {
  66. if (len & ~HPAGE_MASK)
  67. return -EINVAL;
  68. if (addr & ~HPAGE_MASK)
  69. return -EINVAL;
  70. if (REGION_NUMBER(addr) != RGN_HPAGE)
  71. return -EINVAL;
  72. return 0;
  73. }
  74. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
  75. {
  76. struct page *page;
  77. pte_t *ptep;
  78. if (REGION_NUMBER(addr) != RGN_HPAGE)
  79. return ERR_PTR(-EINVAL);
  80. ptep = huge_pte_offset(mm, addr);
  81. if (!ptep || pte_none(*ptep))
  82. return NULL;
  83. page = pte_page(*ptep);
  84. page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
  85. return page;
  86. }
  87. int pmd_huge(pmd_t pmd)
  88. {
  89. return 0;
  90. }
  91. struct page *
  92. follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
  93. {
  94. return NULL;
  95. }
  96. void hugetlb_free_pgd_range(struct mmu_gather **tlb,
  97. unsigned long addr, unsigned long end,
  98. unsigned long floor, unsigned long ceiling)
  99. {
  100. /*
  101. * This is called only when is_hugepage_only_range(addr,),
  102. * and it follows that is_hugepage_only_range(end,) also.
  103. *
  104. * The offset of these addresses from the base of the hugetlb
  105. * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
  106. * the standard free_pgd_range will free the right page tables.
  107. *
  108. * If floor and ceiling are also in the hugetlb region, they
  109. * must likewise be scaled down; but if outside, left unchanged.
  110. */
  111. addr = htlbpage_to_page(addr);
  112. end = htlbpage_to_page(end);
  113. if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
  114. floor = htlbpage_to_page(floor);
  115. if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
  116. ceiling = htlbpage_to_page(ceiling);
  117. free_pgd_range(tlb, addr, end, floor, ceiling);
  118. }
  119. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
  120. unsigned long pgoff, unsigned long flags)
  121. {
  122. struct vm_area_struct *vmm;
  123. if (len > RGN_MAP_LIMIT)
  124. return -ENOMEM;
  125. if (len & ~HPAGE_MASK)
  126. return -EINVAL;
  127. /* This code assumes that RGN_HPAGE != 0. */
  128. if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
  129. addr = HPAGE_REGION_BASE;
  130. else
  131. addr = ALIGN(addr, HPAGE_SIZE);
  132. for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
  133. /* At this point: (!vmm || addr < vmm->vm_end). */
  134. if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
  135. return -ENOMEM;
  136. if (!vmm || (addr + len) <= vmm->vm_start)
  137. return addr;
  138. addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
  139. }
  140. }
  141. static int __init hugetlb_setup_sz(char *str)
  142. {
  143. u64 tr_pages;
  144. unsigned long long size;
  145. if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
  146. /*
  147. * shouldn't happen, but just in case.
  148. */
  149. tr_pages = 0x15557000UL;
  150. size = memparse(str, &str);
  151. if (*str || (size & (size-1)) || !(tr_pages & size) ||
  152. size <= PAGE_SIZE ||
  153. size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
  154. printk(KERN_WARNING "Invalid huge page size specified\n");
  155. return 1;
  156. }
  157. hpage_shift = __ffs(size);
  158. /*
  159. * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
  160. * override here with new page shift.
  161. */
  162. ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
  163. return 1;
  164. }
  165. __setup("hugepagesz=", hugetlb_setup_sz);