hugetlbpage.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /*
  2. * IBM System z Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright 2007 IBM Corp.
  5. * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/hugetlb.h>
  9. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  10. pte_t *pteptr, pte_t pteval)
  11. {
  12. pmd_t *pmdp = (pmd_t *) pteptr;
  13. pte_t shadow_pteval = pteval;
  14. unsigned long mask;
  15. if (!MACHINE_HAS_HPAGE) {
  16. pteptr = (pte_t *) pte_page(pteval)[1].index;
  17. mask = pte_val(pteval) &
  18. (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
  19. pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
  20. if (mm->context.noexec) {
  21. pteptr += PTRS_PER_PTE;
  22. pte_val(shadow_pteval) =
  23. (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
  24. }
  25. }
  26. pmd_val(*pmdp) = pte_val(pteval);
  27. if (mm->context.noexec) {
  28. pmdp = get_shadow_table(pmdp);
  29. pmd_val(*pmdp) = pte_val(shadow_pteval);
  30. }
  31. }
  32. int arch_prepare_hugepage(struct page *page)
  33. {
  34. unsigned long addr = page_to_phys(page);
  35. pte_t pte;
  36. pte_t *ptep;
  37. int i;
  38. if (MACHINE_HAS_HPAGE)
  39. return 0;
  40. ptep = (pte_t *) pte_alloc_one(&init_mm, address);
  41. if (!ptep)
  42. return -ENOMEM;
  43. pte = mk_pte(page, PAGE_RW);
  44. for (i = 0; i < PTRS_PER_PTE; i++) {
  45. set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
  46. pte_val(pte) += PAGE_SIZE;
  47. }
  48. page[1].index = (unsigned long) ptep;
  49. return 0;
  50. }
  51. void arch_release_hugepage(struct page *page)
  52. {
  53. pte_t *ptep;
  54. if (MACHINE_HAS_HPAGE)
  55. return;
  56. ptep = (pte_t *) page[1].index;
  57. if (!ptep)
  58. return;
  59. pte_free(&init_mm, ptep);
  60. page[1].index = 0;
  61. }
  62. pte_t *huge_pte_alloc(struct mm_struct *mm,
  63. unsigned long addr, unsigned long sz)
  64. {
  65. pgd_t *pgdp;
  66. pud_t *pudp;
  67. pmd_t *pmdp = NULL;
  68. pgdp = pgd_offset(mm, addr);
  69. pudp = pud_alloc(mm, pgdp, addr);
  70. if (pudp)
  71. pmdp = pmd_alloc(mm, pudp, addr);
  72. return (pte_t *) pmdp;
  73. }
  74. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  75. {
  76. pgd_t *pgdp;
  77. pud_t *pudp;
  78. pmd_t *pmdp = NULL;
  79. pgdp = pgd_offset(mm, addr);
  80. if (pgd_present(*pgdp)) {
  81. pudp = pud_offset(pgdp, addr);
  82. if (pud_present(*pudp))
  83. pmdp = pmd_offset(pudp, addr);
  84. }
  85. return (pte_t *) pmdp;
  86. }
  87. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  88. {
  89. return 0;
  90. }
  91. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  92. int write)
  93. {
  94. return ERR_PTR(-EINVAL);
  95. }
  96. int pmd_huge(pmd_t pmd)
  97. {
  98. if (!MACHINE_HAS_HPAGE)
  99. return 0;
  100. return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
  101. }
  102. int pud_huge(pud_t pud)
  103. {
  104. return 0;
  105. }
  106. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  107. pmd_t *pmdp, int write)
  108. {
  109. struct page *page;
  110. if (!MACHINE_HAS_HPAGE)
  111. return NULL;
  112. page = pmd_page(*pmdp);
  113. if (page)
  114. page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
  115. return page;
  116. }