pgtable_64.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. #ifndef _X86_64_PGTABLE_H
  2. #define _X86_64_PGTABLE_H
  3. #include <linux/const.h>
  4. #ifndef __ASSEMBLY__
  5. /*
  6. * This file contains the functions and defines necessary to modify and use
  7. * the x86-64 page table tree.
  8. */
  9. #include <asm/processor.h>
  10. #include <linux/bitops.h>
  11. #include <linux/threads.h>
  12. #include <asm/pda.h>
  13. extern pud_t level3_kernel_pgt[512];
  14. extern pud_t level3_ident_pgt[512];
  15. extern pmd_t level2_kernel_pgt[512];
  16. extern pgd_t init_level4_pgt[];
  17. extern unsigned long __supported_pte_mask;
  18. #define swapper_pg_dir init_level4_pgt
  19. extern void paging_init(void);
  20. extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
  21. /*
  22. * ZERO_PAGE is a global shared page that is always zero: used
  23. * for zero-mapped memory areas etc..
  24. */
  25. extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
  26. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  27. #endif /* !__ASSEMBLY__ */
  28. /*
  29. * PGDIR_SHIFT determines what a top-level page table entry can map
  30. */
  31. #define PGDIR_SHIFT 39
  32. #define PTRS_PER_PGD 512
  33. /*
  34. * 3rd level page
  35. */
  36. #define PUD_SHIFT 30
  37. #define PTRS_PER_PUD 512
  38. /*
  39. * PMD_SHIFT determines the size of the area a middle-level
  40. * page table can map
  41. */
  42. #define PMD_SHIFT 21
  43. #define PTRS_PER_PMD 512
  44. /*
  45. * entries per page directory level
  46. */
  47. #define PTRS_PER_PTE 512
  48. #ifndef __ASSEMBLY__
  49. #define pte_ERROR(e) \
  50. printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
  51. #define pmd_ERROR(e) \
  52. printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
  53. #define pud_ERROR(e) \
  54. printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
  55. #define pgd_ERROR(e) \
  56. printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
  57. #define pgd_none(x) (!pgd_val(x))
  58. #define pud_none(x) (!pud_val(x))
  59. static inline void set_pte(pte_t *dst, pte_t val)
  60. {
  61. pte_val(*dst) = pte_val(val);
  62. }
  63. #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  64. static inline void set_pmd(pmd_t *dst, pmd_t val)
  65. {
  66. *dst = val;
  67. }
  68. static inline void set_pud(pud_t *dst, pud_t val)
  69. {
  70. *dst = val;
  71. }
  72. static inline void pud_clear (pud_t *pud)
  73. {
  74. set_pud(pud, __pud(0));
  75. }
  76. static inline void set_pgd(pgd_t *dst, pgd_t val)
  77. {
  78. *dst = val;
  79. }
  80. static inline void pgd_clear (pgd_t * pgd)
  81. {
  82. set_pgd(pgd, __pgd(0));
  83. }
  84. #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0))
  85. struct mm_struct;
  86. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
  87. {
  88. pte_t pte;
  89. if (full) {
  90. pte = *ptep;
  91. *ptep = __pte(0);
  92. } else {
  93. pte = ptep_get_and_clear(mm, addr, ptep);
  94. }
  95. return pte;
  96. }
  97. #define pte_same(a, b) ((a).pte == (b).pte)
  98. #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
  99. #endif /* !__ASSEMBLY__ */
  100. #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
  101. #define PMD_MASK (~(PMD_SIZE-1))
  102. #define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
  103. #define PUD_MASK (~(PUD_SIZE-1))
  104. #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
  105. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  106. #define MAXMEM _AC(0x3fffffffffff, UL)
  107. #define VMALLOC_START _AC(0xffffc20000000000, UL)
  108. #define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
  109. #define VMEMMAP_START _AC(0xffffe20000000000, UL)
  110. #define MODULES_VADDR _AC(0xffffffff88000000, UL)
  111. #define MODULES_END _AC(0xfffffffffff00000, UL)
  112. #define MODULES_LEN (MODULES_END - MODULES_VADDR)
  113. #ifndef __ASSEMBLY__
  114. static inline unsigned long pgd_bad(pgd_t pgd)
  115. {
  116. return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
  117. }
  118. static inline unsigned long pud_bad(pud_t pud)
  119. {
  120. return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
  121. }
  122. static inline unsigned long pmd_bad(pmd_t pmd)
  123. {
  124. return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
  125. }
  126. #define pte_none(x) (!pte_val(x))
  127. #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
  128. #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
  129. #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */
  130. #define pte_page(x) pfn_to_page(pte_pfn(x))
  131. #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
  132. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  133. {
  134. pte_t pte;
  135. pte_val(pte) = (page_nr << PAGE_SHIFT);
  136. pte_val(pte) |= pgprot_val(pgprot);
  137. pte_val(pte) &= __supported_pte_mask;
  138. return pte;
  139. }
  140. /*
  141. * The following only work if pte_present() is true.
  142. * Undefined behaviour if not..
  143. */
  144. #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
  145. static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
  146. static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
  147. static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
  148. static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
  149. static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
  150. static inline int pmd_large(pmd_t pte) {
  151. return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
  152. (_PAGE_PSE|_PAGE_PRESENT);
  153. }
  154. static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_DIRTY); }
  155. static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_ACCESSED); }
  156. static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_RW); }
  157. static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_NX); }
  158. static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); }
  159. static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); }
  160. static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); }
  161. static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
  162. static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_PSE); }
  163. struct vm_area_struct;
  164. static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
  165. {
  166. if (!pte_young(*ptep))
  167. return 0;
  168. return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
  169. }
  170. static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  171. {
  172. clear_bit(_PAGE_BIT_RW, &ptep->pte);
  173. }
  174. /*
  175. * Macro to mark a page protection value as "uncacheable".
  176. */
  177. #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
  178. static inline int pmd_large(pmd_t pte) {
  179. return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
  180. }
  181. /*
  182. * Conversion functions: convert a page and protection to a page entry,
  183. * and a page entry and page directory to the page they refer to.
  184. */
  185. /*
  186. * Level 4 access.
  187. */
  188. #define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
  189. #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
  190. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  191. #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
  192. #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
  193. #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
  194. #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
  195. /* PUD - Level3 access */
  196. /* to find an entry in a page-table-directory. */
  197. #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
  198. #define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
  199. #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  200. #define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
  201. #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
  202. /* PMD - Level 2 access */
  203. #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
  204. #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
  205. #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  206. #define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
  207. pmd_index(address))
  208. #define pmd_none(x) (!pmd_val(x))
  209. #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  210. #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
  211. #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
  212. #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
  213. #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
  214. #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
  215. #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
  216. /* PTE - Level 1 access. */
  217. /* page, protection -> pte */
  218. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  219. /* Change flags of a PTE */
  220. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  221. {
  222. pte_val(pte) &= _PAGE_CHG_MASK;
  223. pte_val(pte) |= pgprot_val(newprot);
  224. pte_val(pte) &= __supported_pte_mask;
  225. return pte;
  226. }
  227. #define pte_index(address) \
  228. (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  229. #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
  230. pte_index(address))
  231. /* x86-64 always has all page tables mapped. */
  232. #define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
  233. #define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
  234. #define pte_unmap(pte) /* NOP */
  235. #define pte_unmap_nested(pte) /* NOP */
  236. #define update_mmu_cache(vma,address,pte) do { } while (0)
  237. /* We only update the dirty/accessed state if we set
  238. * the dirty bit by hand in the kernel, since the hardware
  239. * will do the accessed bit for us, and we don't want to
  240. * race with other CPU's that might be updating the dirty
  241. * bit at the same time. */
  242. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  243. #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
  244. ({ \
  245. int __changed = !pte_same(*(__ptep), __entry); \
  246. if (__changed && __dirty) { \
  247. set_pte(__ptep, __entry); \
  248. flush_tlb_page(__vma, __address); \
  249. } \
  250. __changed; \
  251. })
  252. /* Encode and de-code a swap entry */
  253. #define __swp_type(x) (((x).val >> 1) & 0x3f)
  254. #define __swp_offset(x) ((x).val >> 8)
  255. #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
  256. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  257. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  258. extern spinlock_t pgd_lock;
  259. extern struct list_head pgd_list;
  260. extern int kern_addr_valid(unsigned long addr);
  261. pte_t *lookup_address(unsigned long addr);
  262. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  263. remap_pfn_range(vma, vaddr, pfn, size, prot)
  264. #define HAVE_ARCH_UNMAPPED_AREA
  265. #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
  266. #define pgtable_cache_init() do { } while (0)
  267. #define check_pgt_cache() do { } while (0)
  268. #define PAGE_AGP PAGE_KERNEL_NOCACHE
  269. #define HAVE_PAGE_AGP 1
  270. /* fs/proc/kcore.c */
  271. #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
  272. #define kc_offset_to_vaddr(o) \
  273. (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
  274. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  275. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  276. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  277. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  278. #define __HAVE_ARCH_PTE_SAME
  279. #include <asm-generic/pgtable.h>
  280. #endif /* !__ASSEMBLY__ */
  281. #endif /* _X86_64_PGTABLE_H */