pgtable.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. /*
  2. * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  3. * Copyright 2003 PathScale, Inc.
  4. * Derived from include/asm-i386/pgtable.h
  5. * Licensed under the GPL
  6. */
  7. #ifndef __UM_PGTABLE_H
  8. #define __UM_PGTABLE_H
  9. #include "linux/sched.h"
  10. #include "linux/linkage.h"
  11. #include "asm/processor.h"
  12. #include "asm/page.h"
  13. #include "asm/fixmap.h"
  14. #define _PAGE_PRESENT 0x001
  15. #define _PAGE_NEWPAGE 0x002
  16. #define _PAGE_NEWPROT 0x004
  17. #define _PAGE_RW 0x020
  18. #define _PAGE_USER 0x040
  19. #define _PAGE_ACCESSED 0x080
  20. #define _PAGE_DIRTY 0x100
  21. /* If _PAGE_PRESENT is clear, we use these: */
  22. #define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
  23. #define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
  24. pte_present gives true */
  25. #ifdef CONFIG_3_LEVEL_PGTABLES
  26. #include "asm/pgtable-3level.h"
  27. #else
  28. #include "asm/pgtable-2level.h"
  29. #endif
  30. extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  31. extern void *um_virt_to_phys(struct task_struct *task, unsigned long virt,
  32. pte_t *pte_out);
  33. /* zero page used for uninitialized stuff */
  34. extern unsigned long *empty_zero_page;
  35. #define pgtable_cache_init() do ; while (0)
  36. /*
  37. * pgd entries used up by user/kernel:
  38. */
  39. #define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT)
  40. #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
  41. #ifndef __ASSEMBLY__
  42. /* Just any arbitrary offset to the start of the vmalloc VM area: the
  43. * current 8MB value just means that there will be a 8MB "hole" after the
  44. * physical memory until the kernel virtual memory starts. That means that
  45. * any out-of-bounds memory accesses will hopefully be caught.
  46. * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  47. * area for the same reason. ;)
  48. */
  49. extern unsigned long end_iomem;
  50. #define VMALLOC_OFFSET (__va_space)
  51. #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  52. #ifdef CONFIG_HIGHMEM
  53. # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
  54. #else
  55. # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
  56. #endif
  57. #define REGION_SHIFT (sizeof(pte_t) * 8 - 4)
  58. #define REGION_MASK (((unsigned long) 0xf) << REGION_SHIFT)
  59. #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
  60. #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  61. #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  62. #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  63. #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  64. #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  65. #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  66. #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  67. #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
  68. /*
  69. * The i386 can't do page protection for execute, and considers that the same are read.
  70. * Also, write permissions imply read permissions. This is the closest we can get..
  71. */
  72. #define __P000 PAGE_NONE
  73. #define __P001 PAGE_READONLY
  74. #define __P010 PAGE_COPY
  75. #define __P011 PAGE_COPY
  76. #define __P100 PAGE_READONLY
  77. #define __P101 PAGE_READONLY
  78. #define __P110 PAGE_COPY
  79. #define __P111 PAGE_COPY
  80. #define __S000 PAGE_NONE
  81. #define __S001 PAGE_READONLY
  82. #define __S010 PAGE_SHARED
  83. #define __S011 PAGE_SHARED
  84. #define __S100 PAGE_READONLY
  85. #define __S101 PAGE_READONLY
  86. #define __S110 PAGE_SHARED
  87. #define __S111 PAGE_SHARED
  88. /*
  89. * Define this if things work differently on an i386 and an i486:
  90. * it will (on an i486) warn about kernel memory accesses that are
  91. * done without a 'access_ok(VERIFY_WRITE,..)'
  92. */
  93. #undef TEST_VERIFY_AREA
  94. /* page table for 0-4MB for everybody */
  95. extern unsigned long pg0[1024];
  96. /*
  97. * ZERO_PAGE is a global shared page that is always zero: used
  98. * for zero-mapped memory areas etc..
  99. */
  100. #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
  101. /* number of bits that fit into a memory pointer */
  102. #define BITS_PER_PTR (8*sizeof(unsigned long))
  103. /* to align the pointer to a pointer address */
  104. #define PTR_MASK (~(sizeof(void*)-1))
  105. /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
  106. /* 64-bit machines, beware! SRB. */
  107. #define SIZEOF_PTR_LOG2 3
  108. /* to find an entry in a page-table */
  109. #define PAGE_PTR(address) \
  110. ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
  111. #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
  112. #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
  113. #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  114. #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  115. #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
  116. #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
  117. #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
  118. #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
  119. #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
  120. #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
  121. #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
  122. #define pte_page(x) pfn_to_page(pte_pfn(x))
  123. #define pte_address(x) (__va(pte_val(x) & PAGE_MASK))
  124. #define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT))
  125. #define phys_addr(p) ((p) & ~REGION_MASK)
  126. #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
  127. /*
  128. * =================================
  129. * Flags checking section.
  130. * =================================
  131. */
  132. static inline int pte_none(pte_t pte)
  133. {
  134. return pte_is_zero(pte);
  135. }
  136. /*
  137. * The following only work if pte_present() is true.
  138. * Undefined behaviour if not..
  139. */
  140. static inline int pte_user(pte_t pte)
  141. {
  142. return((pte_get_bits(pte, _PAGE_USER)) &&
  143. !(pte_get_bits(pte, _PAGE_PROTNONE)));
  144. }
  145. static inline int pte_read(pte_t pte)
  146. {
  147. return((pte_get_bits(pte, _PAGE_USER)) &&
  148. !(pte_get_bits(pte, _PAGE_PROTNONE)));
  149. }
  150. static inline int pte_exec(pte_t pte){
  151. return((pte_get_bits(pte, _PAGE_USER)) &&
  152. !(pte_get_bits(pte, _PAGE_PROTNONE)));
  153. }
  154. static inline int pte_write(pte_t pte)
  155. {
  156. return((pte_get_bits(pte, _PAGE_RW)) &&
  157. !(pte_get_bits(pte, _PAGE_PROTNONE)));
  158. }
  159. /*
  160. * The following only works if pte_present() is not true.
  161. */
  162. static inline int pte_file(pte_t pte)
  163. {
  164. return pte_get_bits(pte, _PAGE_FILE);
  165. }
  166. static inline int pte_dirty(pte_t pte)
  167. {
  168. return pte_get_bits(pte, _PAGE_DIRTY);
  169. }
  170. static inline int pte_young(pte_t pte)
  171. {
  172. return pte_get_bits(pte, _PAGE_ACCESSED);
  173. }
  174. static inline int pte_newpage(pte_t pte)
  175. {
  176. return pte_get_bits(pte, _PAGE_NEWPAGE);
  177. }
  178. static inline int pte_newprot(pte_t pte)
  179. {
  180. return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
  181. }
  182. /*
  183. * =================================
  184. * Flags setting section.
  185. * =================================
  186. */
  187. static inline pte_t pte_mknewprot(pte_t pte)
  188. {
  189. pte_set_bits(pte, _PAGE_NEWPROT);
  190. return(pte);
  191. }
  192. static inline pte_t pte_rdprotect(pte_t pte)
  193. {
  194. pte_clear_bits(pte, _PAGE_USER);
  195. return(pte_mknewprot(pte));
  196. }
  197. static inline pte_t pte_exprotect(pte_t pte)
  198. {
  199. pte_clear_bits(pte, _PAGE_USER);
  200. return(pte_mknewprot(pte));
  201. }
  202. static inline pte_t pte_mkclean(pte_t pte)
  203. {
  204. pte_clear_bits(pte, _PAGE_DIRTY);
  205. return(pte);
  206. }
  207. static inline pte_t pte_mkold(pte_t pte)
  208. {
  209. pte_clear_bits(pte, _PAGE_ACCESSED);
  210. return(pte);
  211. }
  212. static inline pte_t pte_wrprotect(pte_t pte)
  213. {
  214. pte_clear_bits(pte, _PAGE_RW);
  215. return(pte_mknewprot(pte));
  216. }
  217. static inline pte_t pte_mkread(pte_t pte)
  218. {
  219. pte_set_bits(pte, _PAGE_RW);
  220. return(pte_mknewprot(pte));
  221. }
  222. static inline pte_t pte_mkexec(pte_t pte)
  223. {
  224. pte_set_bits(pte, _PAGE_USER);
  225. return(pte_mknewprot(pte));
  226. }
  227. static inline pte_t pte_mkdirty(pte_t pte)
  228. {
  229. pte_set_bits(pte, _PAGE_DIRTY);
  230. return(pte);
  231. }
  232. static inline pte_t pte_mkyoung(pte_t pte)
  233. {
  234. pte_set_bits(pte, _PAGE_ACCESSED);
  235. return(pte);
  236. }
  237. static inline pte_t pte_mkwrite(pte_t pte)
  238. {
  239. pte_set_bits(pte, _PAGE_RW);
  240. return(pte_mknewprot(pte));
  241. }
  242. static inline pte_t pte_mkuptodate(pte_t pte)
  243. {
  244. pte_clear_bits(pte, _PAGE_NEWPAGE);
  245. if(pte_present(pte))
  246. pte_clear_bits(pte, _PAGE_NEWPROT);
  247. return(pte);
  248. }
  249. static inline pte_t pte_mknewpage(pte_t pte)
  250. {
  251. pte_set_bits(pte, _PAGE_NEWPAGE);
  252. return(pte);
  253. }
  254. static inline void set_pte(pte_t *pteptr, pte_t pteval)
  255. {
  256. pte_copy(*pteptr, pteval);
  257. /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
  258. * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
  259. * mapped pages.
  260. */
  261. *pteptr = pte_mknewpage(*pteptr);
  262. if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
  263. }
  264. #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  265. /*
  266. * Conversion functions: convert a page and protection to a page entry,
  267. * and a page entry and page directory to the page they refer to.
  268. */
  269. #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
  270. #define __virt_to_page(virt) phys_to_page(__pa(virt))
  271. #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
  272. #define mk_pte(page, pgprot) \
  273. ({ pte_t pte; \
  274. \
  275. pte_set_val(pte, page_to_phys(page), (pgprot)); \
  276. if (pte_present(pte)) \
  277. pte_mknewprot(pte_mknewpage(pte)); \
  278. pte;})
  279. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  280. {
  281. pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
  282. return pte;
  283. }
  284. #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  285. /*
  286. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  287. *
  288. * this macro returns the index of the entry in the pgd page which would
  289. * control the given virtual address
  290. */
  291. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  292. #define pgd_index_k(addr) pgd_index(addr)
  293. /*
  294. * pgd_offset() returns a (pgd_t *)
  295. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  296. */
  297. #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  298. /*
  299. * a shortcut which implies the use of the kernel's pgd, instead
  300. * of a process's
  301. */
  302. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  303. /*
  304. * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  305. *
  306. * this macro returns the index of the entry in the pmd page which would
  307. * control the given virtual address
  308. */
  309. #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  310. /*
  311. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  312. *
  313. * this macro returns the index of the entry in the pte page which would
  314. * control the given virtual address
  315. */
  316. #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  317. #define pte_offset_kernel(dir, address) \
  318. ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
  319. #define pte_offset_map(dir, address) \
  320. ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
  321. #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
  322. #define pte_unmap(pte) do { } while (0)
  323. #define pte_unmap_nested(pte) do { } while (0)
  324. #define update_mmu_cache(vma,address,pte) do ; while (0)
  325. /* Encode and de-code a swap entry */
  326. #define __swp_type(x) (((x).val >> 4) & 0x3f)
  327. #define __swp_offset(x) ((x).val >> 11)
  328. #define __swp_entry(type, offset) \
  329. ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
  330. #define __pte_to_swp_entry(pte) \
  331. ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
  332. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  333. #define kern_addr_valid(addr) (1)
  334. #include <asm-generic/pgtable.h>
  335. #include <asm-generic/pgtable-nopud.h>
  336. #endif
  337. #endif
  338. #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
  339. /*
  340. * Overrides for Emacs so that we follow Linus's tabbing style.
  341. * Emacs will notice this stuff at the end of the file and automatically
  342. * adjust the settings for this buffer only. This must remain at the end
  343. * of the file.
  344. * ---------------------------------------------------------------------------
  345. * Local variables:
  346. * c-file-style: "linux"
  347. * End:
  348. */