pgtable.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. #ifndef _I386_PGTABLE_H
  2. #define _I386_PGTABLE_H
  3. #include <linux/config.h>
  4. /*
  5. * The Linux memory management assumes a three-level page table setup. On
  6. * the i386, we use that, but "fold" the mid level into the top-level page
  7. * table, so that we physically have the same two-level page table as the
  8. * i386 mmu expects.
  9. *
  10. * This file contains the functions and defines necessary to modify and use
  11. * the i386 page table tree.
  12. */
  13. #ifndef __ASSEMBLY__
  14. #include <asm/processor.h>
  15. #include <asm/fixmap.h>
  16. #include <linux/threads.h>
  17. #ifndef _I386_BITOPS_H
  18. #include <asm/bitops.h>
  19. #endif
  20. #include <linux/slab.h>
  21. #include <linux/list.h>
  22. #include <linux/spinlock.h>
  23. /*
  24. * ZERO_PAGE is a global shared page that is always zero: used
  25. * for zero-mapped memory areas etc..
  26. */
  27. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  28. extern unsigned long empty_zero_page[1024];
  29. extern pgd_t swapper_pg_dir[1024];
  30. extern kmem_cache_t *pgd_cache;
  31. extern kmem_cache_t *pmd_cache;
  32. extern spinlock_t pgd_lock;
  33. extern struct page *pgd_list;
  34. void pmd_ctor(void *, kmem_cache_t *, unsigned long);
  35. void pgd_ctor(void *, kmem_cache_t *, unsigned long);
  36. void pgd_dtor(void *, kmem_cache_t *, unsigned long);
  37. void pgtable_cache_init(void);
  38. void paging_init(void);
  39. /*
  40. * The Linux x86 paging architecture is 'compile-time dual-mode', it
  41. * implements both the traditional 2-level x86 page tables and the
  42. * newer 3-level PAE-mode page tables.
  43. */
  44. #ifdef CONFIG_X86_PAE
  45. # include <asm/pgtable-3level-defs.h>
  46. # define PMD_SIZE (1UL << PMD_SHIFT)
  47. # define PMD_MASK (~(PMD_SIZE-1))
  48. #else
  49. # include <asm/pgtable-2level-defs.h>
  50. #endif
  51. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  52. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  53. #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
  54. #define FIRST_USER_ADDRESS 0
  55. #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
  56. #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
  57. #define TWOLEVEL_PGDIR_SHIFT 22
  58. #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
  59. #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
  60. /* Just any arbitrary offset to the start of the vmalloc VM area: the
  61. * current 8MB value just means that there will be a 8MB "hole" after the
  62. * physical memory until the kernel virtual memory starts. That means that
  63. * any out-of-bounds memory accesses will hopefully be caught.
  64. * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  65. * area for the same reason. ;)
  66. */
  67. #define VMALLOC_OFFSET (8*1024*1024)
  68. #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
  69. 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
  70. #ifdef CONFIG_HIGHMEM
  71. # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
  72. #else
  73. # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
  74. #endif
  75. /*
  76. * _PAGE_PSE set in the page directory entry just means that
  77. * the page directory entry points directly to a 4MB-aligned block of
  78. * memory.
  79. */
  80. #define _PAGE_BIT_PRESENT 0
  81. #define _PAGE_BIT_RW 1
  82. #define _PAGE_BIT_USER 2
  83. #define _PAGE_BIT_PWT 3
  84. #define _PAGE_BIT_PCD 4
  85. #define _PAGE_BIT_ACCESSED 5
  86. #define _PAGE_BIT_DIRTY 6
  87. #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
  88. #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
  89. #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
  90. #define _PAGE_BIT_UNUSED2 10
  91. #define _PAGE_BIT_UNUSED3 11
  92. #define _PAGE_BIT_NX 63
  93. #define _PAGE_PRESENT 0x001
  94. #define _PAGE_RW 0x002
  95. #define _PAGE_USER 0x004
  96. #define _PAGE_PWT 0x008
  97. #define _PAGE_PCD 0x010
  98. #define _PAGE_ACCESSED 0x020
  99. #define _PAGE_DIRTY 0x040
  100. #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
  101. #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
  102. #define _PAGE_UNUSED1 0x200 /* available for programmer */
  103. #define _PAGE_UNUSED2 0x400
  104. #define _PAGE_UNUSED3 0x800
  105. /* If _PAGE_PRESENT is clear, we use these: */
  106. #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
  107. #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
  108. pte_present gives true */
  109. #ifdef CONFIG_X86_PAE
  110. #define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
  111. #else
  112. #define _PAGE_NX 0
  113. #endif
  114. #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
  115. #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  116. #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  117. #define PAGE_NONE \
  118. __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  119. #define PAGE_SHARED \
  120. __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  121. #define PAGE_SHARED_EXEC \
  122. __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  123. #define PAGE_COPY_NOEXEC \
  124. __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
  125. #define PAGE_COPY_EXEC \
  126. __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  127. #define PAGE_COPY \
  128. PAGE_COPY_NOEXEC
  129. #define PAGE_READONLY \
  130. __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
  131. #define PAGE_READONLY_EXEC \
  132. __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  133. #define _PAGE_KERNEL \
  134. (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
  135. #define _PAGE_KERNEL_EXEC \
  136. (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  137. extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
  138. #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
  139. #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
  140. #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
  141. #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
  142. #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
  143. #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
  144. #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
  145. #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
  146. #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
  147. #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
  148. /*
  149. * The i386 can't do page protection for execute, and considers that
  150. * the same are read. Also, write permissions imply read permissions.
  151. * This is the closest we can get..
  152. */
  153. #define __P000 PAGE_NONE
  154. #define __P001 PAGE_READONLY
  155. #define __P010 PAGE_COPY
  156. #define __P011 PAGE_COPY
  157. #define __P100 PAGE_READONLY_EXEC
  158. #define __P101 PAGE_READONLY_EXEC
  159. #define __P110 PAGE_COPY_EXEC
  160. #define __P111 PAGE_COPY_EXEC
  161. #define __S000 PAGE_NONE
  162. #define __S001 PAGE_READONLY
  163. #define __S010 PAGE_SHARED
  164. #define __S011 PAGE_SHARED
  165. #define __S100 PAGE_READONLY_EXEC
  166. #define __S101 PAGE_READONLY_EXEC
  167. #define __S110 PAGE_SHARED_EXEC
  168. #define __S111 PAGE_SHARED_EXEC
  169. /*
  170. * Define this if things work differently on an i386 and an i486:
  171. * it will (on an i486) warn about kernel memory accesses that are
  172. * done without a 'access_ok(VERIFY_WRITE,..)'
  173. */
  174. #undef TEST_ACCESS_OK
  175. /* The boot page tables (all created as a single array) */
  176. extern unsigned long pg0[];
  177. #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
  178. #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
  179. #define pmd_none(x) (!pmd_val(x))
  180. #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  181. #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
  182. #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  183. #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
  184. /*
  185. * The following only work if pte_present() is true.
  186. * Undefined behaviour if not..
  187. */
  188. #define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT)
  189. static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
  190. static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
  191. static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
  192. static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
  193. static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
  194. static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; }
  195. /*
  196. * The following only works if pte_present() is not true.
  197. */
  198. static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
  199. static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
  200. static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
  201. static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
  202. static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
  203. static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
  204. static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
  205. static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
  206. static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
  207. static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
  208. static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
  209. static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; }
  210. #ifdef CONFIG_X86_PAE
  211. # include <asm/pgtable-3level.h>
  212. #else
  213. # include <asm/pgtable-2level.h>
  214. #endif
  215. static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
  216. {
  217. if (!pte_dirty(*ptep))
  218. return 0;
  219. return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
  220. }
  221. static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
  222. {
  223. if (!pte_young(*ptep))
  224. return 0;
  225. return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
  226. }
  227. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
  228. {
  229. pte_t pte;
  230. if (full) {
  231. pte = *ptep;
  232. *ptep = __pte(0);
  233. } else {
  234. pte = ptep_get_and_clear(mm, addr, ptep);
  235. }
  236. return pte;
  237. }
  238. static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  239. {
  240. clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
  241. }
  242. /*
  243. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  244. *
  245. * dst - pointer to pgd range anwhere on a pgd page
  246. * src - ""
  247. * count - the number of pgds to copy.
  248. *
  249. * dst and src can be on the same page, but the range must not overlap,
  250. * and must not cross a page boundary.
  251. */
  252. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  253. {
  254. memcpy(dst, src, count * sizeof(pgd_t));
  255. }
  256. /*
  257. * Macro to mark a page protection value as "uncacheable". On processors which do not support
  258. * it, this is a no-op.
  259. */
  260. #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
  261. ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
  262. /*
  263. * Conversion functions: convert a page and protection to a page entry,
  264. * and a page entry and page directory to the page they refer to.
  265. */
  266. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  267. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  268. {
  269. pte.pte_low &= _PAGE_CHG_MASK;
  270. pte.pte_low |= pgprot_val(newprot);
  271. #ifdef CONFIG_X86_PAE
  272. /*
  273. * Chop off the NX bit (if present), and add the NX portion of
  274. * the newprot (if present):
  275. */
  276. pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
  277. pte.pte_high |= (pgprot_val(newprot) >> 32) & \
  278. (__supported_pte_mask >> 32);
  279. #endif
  280. return pte;
  281. }
  282. #define page_pte(page) page_pte_prot(page, __pgprot(0))
  283. #define pmd_large(pmd) \
  284. ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
  285. /*
  286. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  287. *
  288. * this macro returns the index of the entry in the pgd page which would
  289. * control the given virtual address
  290. */
  291. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  292. #define pgd_index_k(addr) pgd_index(addr)
  293. /*
  294. * pgd_offset() returns a (pgd_t *)
  295. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  296. */
  297. #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  298. /*
  299. * a shortcut which implies the use of the kernel's pgd, instead
  300. * of a process's
  301. */
  302. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  303. /*
  304. * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  305. *
  306. * this macro returns the index of the entry in the pmd page which would
  307. * control the given virtual address
  308. */
  309. #define pmd_index(address) \
  310. (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  311. /*
  312. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  313. *
  314. * this macro returns the index of the entry in the pte page which would
  315. * control the given virtual address
  316. */
  317. #define pte_index(address) \
  318. (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  319. #define pte_offset_kernel(dir, address) \
  320. ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
  321. /*
  322. * Helper function that returns the kernel pagetable entry controlling
  323. * the virtual address 'address'. NULL means no pagetable entry present.
  324. * NOTE: the return type is pte_t but if the pmd is PSE then we return it
  325. * as a pte too.
  326. */
  327. extern pte_t *lookup_address(unsigned long address);
  328. /*
  329. * Make a given kernel text page executable/non-executable.
  330. * Returns the previous executability setting of that page (which
  331. * is used to restore the previous state). Used by the SMP bootup code.
  332. * NOTE: this is an __init function for security reasons.
  333. */
  334. #ifdef CONFIG_X86_PAE
  335. extern int set_kernel_exec(unsigned long vaddr, int enable);
  336. #else
  337. static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
  338. #endif
  339. extern void noexec_setup(const char *str);
  340. #if defined(CONFIG_HIGHPTE)
  341. #define pte_offset_map(dir, address) \
  342. ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
  343. #define pte_offset_map_nested(dir, address) \
  344. ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
  345. #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
  346. #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
  347. #else
  348. #define pte_offset_map(dir, address) \
  349. ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
  350. #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
  351. #define pte_unmap(pte) do { } while (0)
  352. #define pte_unmap_nested(pte) do { } while (0)
  353. #endif
  354. /*
  355. * The i386 doesn't have any external MMU info: the kernel page
  356. * tables contain all the necessary information.
  357. *
  358. * Also, we only update the dirty/accessed state if we set
  359. * the dirty bit by hand in the kernel, since the hardware
  360. * will do the accessed bit for us, and we don't want to
  361. * race with other CPU's that might be updating the dirty
  362. * bit at the same time.
  363. */
  364. #define update_mmu_cache(vma,address,pte) do { } while (0)
  365. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  366. #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
  367. do { \
  368. if (__dirty) { \
  369. (__ptep)->pte_low = (__entry).pte_low; \
  370. flush_tlb_page(__vma, __address); \
  371. } \
  372. } while (0)
  373. #endif /* !__ASSEMBLY__ */
  374. #ifdef CONFIG_FLATMEM
  375. #define kern_addr_valid(addr) (1)
  376. #endif /* CONFIG_FLATMEM */
  377. #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
  378. remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
  379. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  380. remap_pfn_range(vma, vaddr, pfn, size, prot)
  381. #define MK_IOSPACE_PFN(space, pfn) (pfn)
  382. #define GET_IOSPACE(pfn) 0
  383. #define GET_PFN(pfn) (pfn)
  384. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  385. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
  386. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  387. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  388. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  389. #define __HAVE_ARCH_PTE_SAME
  390. #include <asm-generic/pgtable.h>
  391. #endif /* _I386_PGTABLE_H */