hugetlb.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * IBM System z Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright IBM Corp. 2008
  5. * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
  6. */
  7. #ifndef _ASM_S390_HUGETLB_H
  8. #define _ASM_S390_HUGETLB_H
  9. #include <asm/page.h>
  10. #include <asm/pgtable.h>
  11. #define is_hugepage_only_range(mm, addr, len) 0
  12. #define hugetlb_free_pgd_range free_pgd_range
  13. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  14. pte_t *ptep, pte_t pte);
  15. /*
  16. * If the arch doesn't supply something else, assume that hugepage
  17. * size aligned regions are ok without further preparation.
  18. */
  19. static inline int prepare_hugepage_range(struct file *file,
  20. unsigned long addr, unsigned long len)
  21. {
  22. if (len & ~HPAGE_MASK)
  23. return -EINVAL;
  24. if (addr & ~HPAGE_MASK)
  25. return -EINVAL;
  26. return 0;
  27. }
  28. #define hugetlb_prefault_arch_hook(mm) do { } while (0)
  29. int arch_prepare_hugepage(struct page *page);
  30. void arch_release_hugepage(struct page *page);
  31. static inline pte_t huge_pte_wrprotect(pte_t pte)
  32. {
  33. pte_val(pte) |= _PAGE_RO;
  34. return pte;
  35. }
  36. static inline int huge_pte_none(pte_t pte)
  37. {
  38. return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
  39. !(pte_val(pte) & _SEGMENT_ENTRY_RO);
  40. }
  41. static inline pte_t huge_ptep_get(pte_t *ptep)
  42. {
  43. pte_t pte = *ptep;
  44. unsigned long mask;
  45. if (!MACHINE_HAS_HPAGE) {
  46. ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
  47. if (ptep) {
  48. mask = pte_val(pte) &
  49. (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
  50. pte = pte_mkhuge(*ptep);
  51. pte_val(pte) |= mask;
  52. }
  53. }
  54. return pte;
  55. }
  56. static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  57. unsigned long addr, pte_t *ptep)
  58. {
  59. pte_t pte = huge_ptep_get(ptep);
  60. mm->context.flush_mm = 1;
  61. pmd_clear((pmd_t *) ptep);
  62. return pte;
  63. }
  64. static inline void __pmd_csp(pmd_t *pmdp)
  65. {
  66. register unsigned long reg2 asm("2") = pmd_val(*pmdp);
  67. register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
  68. _SEGMENT_ENTRY_INV;
  69. register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
  70. asm volatile(
  71. " csp %1,%3"
  72. : "=m" (*pmdp)
  73. : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
  74. pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
  75. }
  76. static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
  77. {
  78. unsigned long sto = (unsigned long) pmdp -
  79. pmd_index(address) * sizeof(pmd_t);
  80. if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
  81. asm volatile(
  82. " .insn rrf,0xb98e0000,%2,%3,0,0"
  83. : "=m" (*pmdp)
  84. : "m" (*pmdp), "a" (sto),
  85. "a" ((address & HPAGE_MASK))
  86. );
  87. }
  88. pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
  89. }
  90. static inline void huge_ptep_invalidate(struct mm_struct *mm,
  91. unsigned long address, pte_t *ptep)
  92. {
  93. pmd_t *pmdp = (pmd_t *) ptep;
  94. if (!MACHINE_HAS_IDTE) {
  95. __pmd_csp(pmdp);
  96. if (mm->context.noexec) {
  97. pmdp = get_shadow_table(pmdp);
  98. __pmd_csp(pmdp);
  99. }
  100. return;
  101. }
  102. __pmd_idte(address, pmdp);
  103. if (mm->context.noexec) {
  104. pmdp = get_shadow_table(pmdp);
  105. __pmd_idte(address, pmdp);
  106. }
  107. return;
  108. }
  109. #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
  110. ({ \
  111. int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
  112. if (__changed) { \
  113. huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
  114. set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
  115. } \
  116. __changed; \
  117. })
  118. #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
  119. ({ \
  120. pte_t __pte = huge_ptep_get(__ptep); \
  121. if (pte_write(__pte)) { \
  122. (__mm)->context.flush_mm = 1; \
  123. if (atomic_read(&(__mm)->context.attach_count) > 1 || \
  124. (__mm) != current->active_mm) \
  125. huge_ptep_invalidate(__mm, __addr, __ptep); \
  126. set_huge_pte_at(__mm, __addr, __ptep, \
  127. huge_pte_wrprotect(__pte)); \
  128. } \
  129. })
  130. static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
  131. unsigned long address, pte_t *ptep)
  132. {
  133. huge_ptep_invalidate(vma->vm_mm, address, ptep);
  134. }
  135. #endif /* _ASM_S390_HUGETLB_H */