pgtable-3level.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. #ifndef _I386_PGTABLE_3LEVEL_H
  2. #define _I386_PGTABLE_3LEVEL_H
  3. /*
  4. * Intel Physical Address Extension (PAE) Mode - three-level page
  5. * tables on PPro+ CPUs.
  6. *
  7. * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
  8. */
  9. #define pte_ERROR(e) \
  10. printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
  11. __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
  12. #define pmd_ERROR(e) \
  13. printk("%s:%d: bad pmd %p(%016Lx).\n", \
  14. __FILE__, __LINE__, &(e), pmd_val(e))
  15. #define pgd_ERROR(e) \
  16. printk("%s:%d: bad pgd %p(%016Lx).\n", \
  17. __FILE__, __LINE__, &(e), pgd_val(e))
  18. static inline int pud_none(pud_t pud)
  19. {
  20. return pud_val(pud) == 0;
  21. }
  22. static inline int pud_bad(pud_t pud)
  23. {
  24. return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
  25. }
  26. static inline int pud_present(pud_t pud)
  27. {
  28. return pud_val(pud) & _PAGE_PRESENT;
  29. }
  30. /* Rules for using set_pte: the pte being assigned *must* be
  31. * either not present or in a state where the hardware will
  32. * not attempt to update the pte. In places where this is
  33. * not possible, use pte_get_and_clear to obtain the old pte
  34. * value and then use set_pte to update it. -ben
  35. */
  36. static inline void native_set_pte(pte_t *ptep, pte_t pte)
  37. {
  38. ptep->pte_high = pte.pte_high;
  39. smp_wmb();
  40. ptep->pte_low = pte.pte_low;
  41. }
  42. /*
  43. * Since this is only called on user PTEs, and the page fault handler
  44. * must handle the already racy situation of simultaneous page faults,
  45. * we are justified in merely clearing the PTE present bit, followed
  46. * by a set. The ordering here is important.
  47. */
  48. static inline void native_set_pte_present(struct mm_struct *mm,
  49. unsigned long addr,
  50. pte_t *ptep, pte_t pte)
  51. {
  52. ptep->pte_low = 0;
  53. smp_wmb();
  54. ptep->pte_high = pte.pte_high;
  55. smp_wmb();
  56. ptep->pte_low = pte.pte_low;
  57. }
  58. static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
  59. {
  60. set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
  61. }
  62. static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
  63. {
  64. set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
  65. }
  66. static inline void native_set_pud(pud_t *pudp, pud_t pud)
  67. {
  68. set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
  69. }
  70. /*
  71. * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
  72. * entry, so clear the bottom half first and enforce ordering with a compiler
  73. * barrier.
  74. */
  75. static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
  76. pte_t *ptep)
  77. {
  78. ptep->pte_low = 0;
  79. smp_wmb();
  80. ptep->pte_high = 0;
  81. }
  82. static inline void native_pmd_clear(pmd_t *pmd)
  83. {
  84. u32 *tmp = (u32 *)pmd;
  85. *tmp = 0;
  86. smp_wmb();
  87. *(tmp + 1) = 0;
  88. }
  89. static inline void pud_clear(pud_t *pudp)
  90. {
  91. unsigned long pgd;
  92. set_pud(pudp, __pud(0));
  93. /*
  94. * According to Intel App note "TLBs, Paging-Structure Caches,
  95. * and Their Invalidation", April 2007, document 317080-001,
  96. * section 8.1: in PAE mode we explicitly have to flush the
  97. * TLB via cr3 if the top-level pgd is changed...
  98. *
  99. * Make sure the pud entry we're updating is within the
  100. * current pgd to avoid unnecessary TLB flushes.
  101. */
  102. pgd = read_cr3();
  103. if (__pa(pudp) >= pgd && __pa(pudp) <
  104. (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
  105. write_cr3(pgd);
  106. }
  107. #define pud_page(pud) ((struct page *) __va(pud_val(pud) & PAGE_MASK))
  108. #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
  109. /* Find an entry in the second-level page table.. */
  110. #define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \
  111. pmd_index(address))
  112. #ifdef CONFIG_SMP
  113. static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
  114. {
  115. pte_t res;
  116. /* xchg acts as a barrier before the setting of the high bits */
  117. res.pte_low = xchg(&ptep->pte_low, 0);
  118. res.pte_high = ptep->pte_high;
  119. ptep->pte_high = 0;
  120. return res;
  121. }
  122. #else
  123. #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
  124. #endif
  125. #define __HAVE_ARCH_PTE_SAME
  126. static inline int pte_same(pte_t a, pte_t b)
  127. {
  128. return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
  129. }
  130. #define pte_page(x) pfn_to_page(pte_pfn(x))
  131. static inline int pte_none(pte_t pte)
  132. {
  133. return !pte.pte_low && !pte.pte_high;
  134. }
  135. static inline unsigned long pte_pfn(pte_t pte)
  136. {
  137. return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT;
  138. }
  139. /*
  140. * Bits 0, 6 and 7 are taken in the low part of the pte,
  141. * put the 32 bits of offset into the high part.
  142. */
  143. #define pte_to_pgoff(pte) ((pte).pte_high)
  144. #define pgoff_to_pte(off) \
  145. ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
  146. #define PTE_FILE_MAX_BITS 32
  147. /* Encode and de-code a swap entry */
  148. #define __swp_type(x) (((x).val) & 0x1f)
  149. #define __swp_offset(x) ((x).val >> 5)
  150. #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
  151. #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
  152. #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
  153. #endif /* _I386_PGTABLE_3LEVEL_H */