pgtable_64.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. #ifndef _X86_64_PGTABLE_H
  2. #define _X86_64_PGTABLE_H
  3. #include <linux/const.h>
  4. #ifndef __ASSEMBLY__
  5. /*
  6. * This file contains the functions and defines necessary to modify and use
  7. * the x86-64 page table tree.
  8. */
  9. #include <asm/processor.h>
  10. #include <linux/bitops.h>
  11. #include <linux/threads.h>
  12. #include <asm/pda.h>
  13. extern pud_t level3_kernel_pgt[512];
  14. extern pud_t level3_ident_pgt[512];
  15. extern pmd_t level2_kernel_pgt[512];
  16. extern pgd_t init_level4_pgt[];
  17. #define swapper_pg_dir init_level4_pgt
  18. extern void paging_init(void);
  19. #endif /* !__ASSEMBLY__ */
  20. #define SHARED_KERNEL_PMD 0
  21. /*
  22. * PGDIR_SHIFT determines what a top-level page table entry can map
  23. */
  24. #define PGDIR_SHIFT 39
  25. #define PTRS_PER_PGD 512
  26. /*
  27. * 3rd level page
  28. */
  29. #define PUD_SHIFT 30
  30. #define PTRS_PER_PUD 512
  31. /*
  32. * PMD_SHIFT determines the size of the area a middle-level
  33. * page table can map
  34. */
  35. #define PMD_SHIFT 21
  36. #define PTRS_PER_PMD 512
  37. /*
  38. * entries per page directory level
  39. */
  40. #define PTRS_PER_PTE 512
  41. #ifndef __ASSEMBLY__
  42. #define pte_ERROR(e) \
  43. printk("%s:%d: bad pte %p(%016lx).\n", \
  44. __FILE__, __LINE__, &(e), pte_val(e))
  45. #define pmd_ERROR(e) \
  46. printk("%s:%d: bad pmd %p(%016lx).\n", \
  47. __FILE__, __LINE__, &(e), pmd_val(e))
  48. #define pud_ERROR(e) \
  49. printk("%s:%d: bad pud %p(%016lx).\n", \
  50. __FILE__, __LINE__, &(e), pud_val(e))
  51. #define pgd_ERROR(e) \
  52. printk("%s:%d: bad pgd %p(%016lx).\n", \
  53. __FILE__, __LINE__, &(e), pgd_val(e))
  54. #define pgd_none(x) (!pgd_val(x))
  55. #define pud_none(x) (!pud_val(x))
  56. struct mm_struct;
  57. static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
  58. pte_t *ptep)
  59. {
  60. *ptep = native_make_pte(0);
  61. }
  62. static inline void native_set_pte(pte_t *ptep, pte_t pte)
  63. {
  64. *ptep = pte;
  65. }
  66. static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
  67. {
  68. native_set_pte(ptep, pte);
  69. }
  70. static inline pte_t native_ptep_get_and_clear(pte_t *xp)
  71. {
  72. #ifdef CONFIG_SMP
  73. return native_make_pte(xchg(&xp->pte, 0));
  74. #else
  75. /* native_local_ptep_get_and_clear,
  76. but duplicated because of cyclic dependency */
  77. pte_t ret = *xp;
  78. native_pte_clear(NULL, 0, xp);
  79. return ret;
  80. #endif
  81. }
  82. static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
  83. {
  84. *pmdp = pmd;
  85. }
  86. static inline void native_pmd_clear(pmd_t *pmd)
  87. {
  88. native_set_pmd(pmd, native_make_pmd(0));
  89. }
  90. static inline void native_set_pud(pud_t *pudp, pud_t pud)
  91. {
  92. *pudp = pud;
  93. }
  94. static inline void native_pud_clear(pud_t *pud)
  95. {
  96. native_set_pud(pud, native_make_pud(0));
  97. }
  98. static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
  99. {
  100. *pgdp = pgd;
  101. }
  102. static inline void native_pgd_clear(pgd_t *pgd)
  103. {
  104. native_set_pgd(pgd, native_make_pgd(0));
  105. }
  106. #define pte_same(a, b) ((a).pte == (b).pte)
  107. #endif /* !__ASSEMBLY__ */
  108. #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
  109. #define PMD_MASK (~(PMD_SIZE - 1))
  110. #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
  111. #define PUD_MASK (~(PUD_SIZE - 1))
  112. #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
  113. #define PGDIR_MASK (~(PGDIR_SIZE - 1))
  114. #define MAXMEM _AC(0x00003fffffffffff, UL)
  115. #define VMALLOC_START _AC(0xffffc20000000000, UL)
  116. #define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
  117. #define VMEMMAP_START _AC(0xffffe20000000000, UL)
  118. #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
  119. #define MODULES_END _AC(0xfffffffffff00000, UL)
  120. #define MODULES_LEN (MODULES_END - MODULES_VADDR)
  121. #ifndef __ASSEMBLY__
  122. static inline int pgd_bad(pgd_t pgd)
  123. {
  124. return (pgd_val(pgd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE;
  125. }
  126. static inline int pud_bad(pud_t pud)
  127. {
  128. return (pud_val(pud) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE;
  129. }
  130. static inline int pmd_bad(pmd_t pmd)
  131. {
  132. return (pmd_val(pmd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE;
  133. }
  134. #define pte_none(x) (!pte_val((x)))
  135. #define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
  136. #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
  137. #define pte_page(x) pfn_to_page(pte_pfn((x)))
  138. #define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
  139. /*
  140. * Macro to mark a page protection value as "uncacheable".
  141. */
  142. #define pgprot_noncached(prot) \
  143. (__pgprot(pgprot_val((prot)) | _PAGE_PCD | _PAGE_PWT))
  144. /*
  145. * Conversion functions: convert a page and protection to a page entry,
  146. * and a page entry and page directory to the page they refer to.
  147. */
  148. /*
  149. * Level 4 access.
  150. */
  151. #define pgd_page_vaddr(pgd) \
  152. ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_MASK))
  153. #define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
  154. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  155. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  156. #define pgd_offset_k(address) (init_level4_pgt + pgd_index((address)))
  157. #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
  158. static inline int pgd_large(pgd_t pgd) { return 0; }
  159. #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
  160. /* PUD - Level3 access */
  161. /* to find an entry in a page-table-directory. */
  162. #define pud_page_vaddr(pud) \
  163. ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
  164. #define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
  165. #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
  166. #define pud_offset(pgd, address) \
  167. ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
  168. #define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
  169. static inline int pud_large(pud_t pte)
  170. {
  171. return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  172. (_PAGE_PSE | _PAGE_PRESENT);
  173. }
  174. /* PMD - Level 2 access */
  175. #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_MASK))
  176. #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
  177. #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
  178. #define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
  179. pmd_index(address))
  180. #define pmd_none(x) (!pmd_val((x)))
  181. #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
  182. #define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
  183. #define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
  184. #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
  185. #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
  186. _PAGE_FILE })
  187. #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
  188. /* PTE - Level 1 access. */
  189. /* page, protection -> pte */
  190. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
  191. #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  192. #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
  193. pte_index((address)))
  194. /* x86-64 always has all page tables mapped. */
  195. #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
  196. #define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
  197. #define pte_unmap(pte) /* NOP */
  198. #define pte_unmap_nested(pte) /* NOP */
  199. #define update_mmu_cache(vma, address, pte) do { } while (0)
  200. extern int direct_gbpages;
  201. /* Encode and de-code a swap entry */
  202. #define __swp_type(x) (((x).val >> 1) & 0x3f)
  203. #define __swp_offset(x) ((x).val >> 8)
  204. #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
  205. ((offset) << 8) })
  206. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
  207. #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
  208. extern int kern_addr_valid(unsigned long addr);
  209. extern void cleanup_highmap(void);
  210. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  211. remap_pfn_range(vma, vaddr, pfn, size, prot)
  212. #define HAVE_ARCH_UNMAPPED_AREA
  213. #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
  214. #define pgtable_cache_init() do { } while (0)
  215. #define check_pgt_cache() do { } while (0)
  216. #define PAGE_AGP PAGE_KERNEL_NOCACHE
  217. #define HAVE_PAGE_AGP 1
  218. /* fs/proc/kcore.c */
  219. #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
  220. #define kc_offset_to_vaddr(o) \
  221. (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \
  222. ? ((o) | ~__VIRTUAL_MASK) \
  223. : (o))
  224. #define __HAVE_ARCH_PTE_SAME
  225. #endif /* !__ASSEMBLY__ */
  226. #endif /* _X86_64_PGTABLE_H */