hugetlb.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. /*
  2. * IBM System z Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright IBM Corp. 2008
  5. * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
  6. */
  7. #ifndef _ASM_S390_HUGETLB_H
  8. #define _ASM_S390_HUGETLB_H
  9. #include <asm/page.h>
  10. #include <asm/pgtable.h>
  11. #define is_hugepage_only_range(mm, addr, len) 0
  12. #define hugetlb_free_pgd_range free_pgd_range
  13. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  14. pte_t *ptep, pte_t pte);
  15. /*
  16. * If the arch doesn't supply something else, assume that hugepage
  17. * size aligned regions are ok without further preparation.
  18. */
  19. static inline int prepare_hugepage_range(struct file *file,
  20. unsigned long addr, unsigned long len)
  21. {
  22. if (len & ~HPAGE_MASK)
  23. return -EINVAL;
  24. if (addr & ~HPAGE_MASK)
  25. return -EINVAL;
  26. return 0;
  27. }
  28. #define hugetlb_prefault_arch_hook(mm) do { } while (0)
  29. int arch_prepare_hugepage(struct page *page);
  30. void arch_release_hugepage(struct page *page);
  31. static inline pte_t huge_pte_wrprotect(pte_t pte)
  32. {
  33. pte_val(pte) |= _PAGE_RO;
  34. return pte;
  35. }
  36. static inline int huge_pte_none(pte_t pte)
  37. {
  38. return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
  39. !(pte_val(pte) & _SEGMENT_ENTRY_RO);
  40. }
  41. static inline pte_t huge_ptep_get(pte_t *ptep)
  42. {
  43. pte_t pte = *ptep;
  44. unsigned long mask;
  45. if (!MACHINE_HAS_HPAGE) {
  46. ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
  47. if (ptep) {
  48. mask = pte_val(pte) &
  49. (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
  50. pte = pte_mkhuge(*ptep);
  51. pte_val(pte) |= mask;
  52. }
  53. }
  54. return pte;
  55. }
  56. static inline void __pmd_csp(pmd_t *pmdp)
  57. {
  58. register unsigned long reg2 asm("2") = pmd_val(*pmdp);
  59. register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
  60. _SEGMENT_ENTRY_INV;
  61. register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
  62. asm volatile(
  63. " csp %1,%3"
  64. : "=m" (*pmdp)
  65. : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
  66. pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
  67. }
  68. static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
  69. {
  70. unsigned long sto = (unsigned long) pmdp -
  71. pmd_index(address) * sizeof(pmd_t);
  72. if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
  73. asm volatile(
  74. " .insn rrf,0xb98e0000,%2,%3,0,0"
  75. : "=m" (*pmdp)
  76. : "m" (*pmdp), "a" (sto),
  77. "a" ((address & HPAGE_MASK))
  78. );
  79. }
  80. pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
  81. }
  82. static inline void huge_ptep_invalidate(struct mm_struct *mm,
  83. unsigned long address, pte_t *ptep)
  84. {
  85. pmd_t *pmdp = (pmd_t *) ptep;
  86. if (MACHINE_HAS_IDTE)
  87. __pmd_idte(address, pmdp);
  88. else
  89. __pmd_csp(pmdp);
  90. }
  91. static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  92. unsigned long addr, pte_t *ptep)
  93. {
  94. pte_t pte = huge_ptep_get(ptep);
  95. huge_ptep_invalidate(mm, addr, ptep);
  96. return pte;
  97. }
  98. #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
  99. ({ \
  100. int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
  101. if (__changed) { \
  102. huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
  103. set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
  104. } \
  105. __changed; \
  106. })
  107. #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
  108. ({ \
  109. pte_t __pte = huge_ptep_get(__ptep); \
  110. if (pte_write(__pte)) { \
  111. huge_ptep_invalidate(__mm, __addr, __ptep); \
  112. set_huge_pte_at(__mm, __addr, __ptep, \
  113. huge_pte_wrprotect(__pte)); \
  114. } \
  115. })
  116. static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
  117. unsigned long address, pte_t *ptep)
  118. {
  119. huge_ptep_invalidate(vma->vm_mm, address, ptep);
  120. }
  121. #endif /* _ASM_S390_HUGETLB_H */