hugetlb.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /*
  2. * IBM System z Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright IBM Corp. 2008
  5. * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
  6. */
  7. #ifndef _ASM_S390_HUGETLB_H
  8. #define _ASM_S390_HUGETLB_H
  9. #include <asm/page.h>
  10. #include <asm/pgtable.h>
  11. #define is_hugepage_only_range(mm, addr, len) 0
  12. #define hugetlb_free_pgd_range free_pgd_range
  13. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  14. pte_t *ptep, pte_t pte);
  15. /*
  16. * If the arch doesn't supply something else, assume that hugepage
  17. * size aligned regions are ok without further preparation.
  18. */
  19. static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
  20. {
  21. if (len & ~HPAGE_MASK)
  22. return -EINVAL;
  23. if (addr & ~HPAGE_MASK)
  24. return -EINVAL;
  25. return 0;
  26. }
  27. #define hugetlb_prefault_arch_hook(mm) do { } while (0)
  28. int arch_prepare_hugepage(struct page *page);
  29. void arch_release_hugepage(struct page *page);
  30. static inline pte_t pte_mkhuge(pte_t pte)
  31. {
  32. /*
  33. * PROT_NONE needs to be remapped from the pte type to the ste type.
  34. * The HW invalid bit is also different for pte and ste. The pte
  35. * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
  36. * bit, so we don't have to clear it.
  37. */
  38. if (pte_val(pte) & _PAGE_INVALID) {
  39. if (pte_val(pte) & _PAGE_SWT)
  40. pte_val(pte) |= _HPAGE_TYPE_NONE;
  41. pte_val(pte) |= _SEGMENT_ENTRY_INV;
  42. }
  43. /*
  44. * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
  45. * table entry.
  46. */
  47. pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
  48. /*
  49. * Also set the change-override bit because we don't need dirty bit
  50. * tracking for hugetlbfs pages.
  51. */
  52. pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
  53. return pte;
  54. }
  55. static inline pte_t huge_pte_wrprotect(pte_t pte)
  56. {
  57. pte_val(pte) |= _PAGE_RO;
  58. return pte;
  59. }
  60. static inline int huge_pte_none(pte_t pte)
  61. {
  62. return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
  63. !(pte_val(pte) & _SEGMENT_ENTRY_RO);
  64. }
  65. static inline pte_t huge_ptep_get(pte_t *ptep)
  66. {
  67. pte_t pte = *ptep;
  68. unsigned long mask;
  69. if (!MACHINE_HAS_HPAGE) {
  70. ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
  71. if (ptep) {
  72. mask = pte_val(pte) &
  73. (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
  74. pte = pte_mkhuge(*ptep);
  75. pte_val(pte) |= mask;
  76. }
  77. }
  78. return pte;
  79. }
  80. static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  81. unsigned long addr, pte_t *ptep)
  82. {
  83. pte_t pte = huge_ptep_get(ptep);
  84. pmd_clear((pmd_t *) ptep);
  85. return pte;
  86. }
  87. static inline void __pmd_csp(pmd_t *pmdp)
  88. {
  89. register unsigned long reg2 asm("2") = pmd_val(*pmdp);
  90. register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
  91. _SEGMENT_ENTRY_INV;
  92. register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
  93. asm volatile(
  94. " csp %1,%3"
  95. : "=m" (*pmdp)
  96. : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
  97. pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
  98. }
  99. static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
  100. {
  101. unsigned long sto = (unsigned long) pmdp -
  102. pmd_index(address) * sizeof(pmd_t);
  103. if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
  104. asm volatile(
  105. " .insn rrf,0xb98e0000,%2,%3,0,0"
  106. : "=m" (*pmdp)
  107. : "m" (*pmdp), "a" (sto),
  108. "a" ((address & HPAGE_MASK))
  109. );
  110. }
  111. pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
  112. }
  113. static inline void huge_ptep_invalidate(struct mm_struct *mm,
  114. unsigned long address, pte_t *ptep)
  115. {
  116. pmd_t *pmdp = (pmd_t *) ptep;
  117. if (!MACHINE_HAS_IDTE) {
  118. __pmd_csp(pmdp);
  119. if (mm->context.noexec) {
  120. pmdp = get_shadow_table(pmdp);
  121. __pmd_csp(pmdp);
  122. }
  123. return;
  124. }
  125. __pmd_idte(address, pmdp);
  126. if (mm->context.noexec) {
  127. pmdp = get_shadow_table(pmdp);
  128. __pmd_idte(address, pmdp);
  129. }
  130. return;
  131. }
  132. #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
  133. ({ \
  134. int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
  135. if (__changed) { \
  136. huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
  137. set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
  138. } \
  139. __changed; \
  140. })
  141. #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
  142. ({ \
  143. pte_t __pte = huge_ptep_get(__ptep); \
  144. if (pte_write(__pte)) { \
  145. if (atomic_read(&(__mm)->mm_users) > 1 || \
  146. (__mm) != current->active_mm) \
  147. huge_ptep_invalidate(__mm, __addr, __ptep); \
  148. set_huge_pte_at(__mm, __addr, __ptep, \
  149. huge_pte_wrprotect(__pte)); \
  150. } \
  151. })
  152. static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
  153. unsigned long address, pte_t *ptep)
  154. {
  155. huge_ptep_invalidate(vma->vm_mm, address, ptep);
  156. }
  157. #endif /* _ASM_S390_HUGETLB_H */