pgtable-3level.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. #ifndef _I386_PGTABLE_3LEVEL_H
  2. #define _I386_PGTABLE_3LEVEL_H
  3. /*
  4. * Intel Physical Address Extension (PAE) Mode - three-level page
  5. * tables on PPro+ CPUs.
  6. *
  7. * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  8. */
  9. #define pte_ERROR(e) \
  10. printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
  11. #define pmd_ERROR(e) \
  12. printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
  13. #define pgd_ERROR(e) \
  14. printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
  15. #define pud_none(pud) 0
  16. #define pud_bad(pud) 0
  17. #define pud_present(pud) 1
  18. /*
  19. * All present pages with !NX bit are kernel-executable:
  20. */
  21. static inline int pte_exec_kernel(pte_t pte)
  22. {
  23. return !(pte_val(pte) & _PAGE_NX);
  24. }
  25. /* Rules for using set_pte: the pte being assigned *must* be
  26. * either not present or in a state where the hardware will
  27. * not attempt to update the pte. In places where this is
  28. * not possible, use pte_get_and_clear to obtain the old pte
  29. * value and then use set_pte to update it. -ben
  30. */
  31. static inline void native_set_pte(pte_t *ptep, pte_t pte)
  32. {
  33. ptep->pte_high = pte.pte_high;
  34. smp_wmb();
  35. ptep->pte_low = pte.pte_low;
  36. }
  37. /*
  38. * Since this is only called on user PTEs, and the page fault handler
  39. * must handle the already racy situation of simultaneous page faults,
  40. * we are justified in merely clearing the PTE present bit, followed
  41. * by a set. The ordering here is important.
  42. */
  43. static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr,
  44. pte_t *ptep, pte_t pte)
  45. {
  46. ptep->pte_low = 0;
  47. smp_wmb();
  48. ptep->pte_high = pte.pte_high;
  49. smp_wmb();
  50. ptep->pte_low = pte.pte_low;
  51. }
  52. static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
  53. {
  54. set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
  55. }
  56. static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
  57. {
  58. set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
  59. }
  60. static inline void native_set_pud(pud_t *pudp, pud_t pud)
  61. {
  62. *pudp = pud;
  63. }
  64. /*
  65. * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
  66. * entry, so clear the bottom half first and enforce ordering with a compiler
  67. * barrier.
  68. */
  69. static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  70. {
  71. ptep->pte_low = 0;
  72. smp_wmb();
  73. ptep->pte_high = 0;
  74. }
  75. static inline void native_pmd_clear(pmd_t *pmd)
  76. {
  77. u32 *tmp = (u32 *)pmd;
  78. *tmp = 0;
  79. smp_wmb();
  80. *(tmp + 1) = 0;
  81. }
  82. /*
  83. * Pentium-II erratum A13: in PAE mode we explicitly have to flush
  84. * the TLB via cr3 if the top-level pgd is changed...
  85. * We do not let the generic code free and clear pgd entries due to
  86. * this erratum.
  87. */
  88. static inline void pud_clear (pud_t * pud) { }
  89. #define pud_page(pud) \
  90. ((struct page *) __va(pud_val(pud) & PAGE_MASK))
  91. #define pud_page_vaddr(pud) \
  92. ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
  93. /* Find an entry in the second-level page table.. */
  94. #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
  95. pmd_index(address))
  96. #ifdef CONFIG_SMP
  97. static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
  98. {
  99. pte_t res;
  100. /* xchg acts as a barrier before the setting of the high bits */
  101. res.pte_low = xchg(&ptep->pte_low, 0);
  102. res.pte_high = ptep->pte_high;
  103. ptep->pte_high = 0;
  104. return res;
  105. }
  106. #else
  107. #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
  108. #endif
  109. #define __HAVE_ARCH_PTE_SAME
  110. static inline int pte_same(pte_t a, pte_t b)
  111. {
  112. return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
  113. }
  114. #define pte_page(x) pfn_to_page(pte_pfn(x))
  115. static inline int pte_none(pte_t pte)
  116. {
  117. return !pte.pte_low && !pte.pte_high;
  118. }
  119. static inline unsigned long pte_pfn(pte_t pte)
  120. {
  121. return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT;
  122. }
  123. /*
  124. * Bits 0, 6 and 7 are taken in the low part of the pte,
  125. * put the 32 bits of offset into the high part.
  126. */
  127. #define pte_to_pgoff(pte) ((pte).pte_high)
  128. #define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
  129. #define PTE_FILE_MAX_BITS 32
  130. /* Encode and de-code a swap entry */
  131. #define __swp_type(x) (((x).val) & 0x1f)
  132. #define __swp_offset(x) ((x).val >> 5)
  133. #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
  134. #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
  135. #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
  136. #define __pmd_free_tlb(tlb, x) do { } while (0)
  137. #endif /* _I386_PGTABLE_3LEVEL_H */