pgtable.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /*
  2. * linux/include/asm-xtensa/pgtable.h
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Copyright (C) 2001 - 2005 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_PGTABLE_H
  11. #define _XTENSA_PGTABLE_H
  12. #include <asm-generic/pgtable-nopmd.h>
  13. #include <asm/page.h>
  14. /*
  15. * We only use two ring levels, user and kernel space.
  16. */
  17. #define USER_RING 1 /* user ring level */
  18. #define KERNEL_RING 0 /* kernel ring level */
  19. /*
  20. * The Xtensa architecture port of Linux has a two-level page table system,
  21. * i.e. the logical three-level Linux page table layout are folded.
  22. * Each task has the following memory page tables:
  23. *
  24. * PGD table (page directory), ie. 3rd-level page table:
  25. * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
  26. * (Architectures that don't have the PMD folded point to the PMD tables)
  27. *
  28. * The pointer to the PGD table for a given task can be retrieved from
  29. * the task structure (struct task_struct*) t, e.g. current():
  30. * (t->mm ? t->mm : t->active_mm)->pgd
  31. *
  32. * PMD tables (page middle-directory), ie. 2nd-level page tables:
  33. * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
  34. *
  35. * PTE tables (page table entry), ie. 1st-level page tables:
  36. * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
  37. * invalid_pte_table for absent mappings.
  38. *
  39. * The individual pages are 4 kB big with special pages for the empty_zero_page.
  40. */
  41. #define PGDIR_SHIFT 22
  42. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  43. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  44. /*
  45. * Entries per page directory level: we use two-level, so
  46. * we don't really have any PMD directory physically.
  47. */
  48. #define PTRS_PER_PTE 1024
  49. #define PTRS_PER_PTE_SHIFT 10
  50. #define PTRS_PER_PMD 1
  51. #define PTRS_PER_PGD 1024
  52. #define PGD_ORDER 0
  53. #define PMD_ORDER 0
  54. #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
  55. #define FIRST_USER_ADDRESS 0
  56. #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
  57. /* virtual memory area. We keep a distance to other memory regions to be
  58. * on the safe side. We also use this area for cache aliasing.
  59. */
  60. // FIXME: virtual memory area must be configuration-dependent
  61. #define VMALLOC_START 0xC0000000
  62. #define VMALLOC_END 0xC7FF0000
  63. /* Xtensa Linux config PTE layout (when present):
  64. * 31-12: PPN
  65. * 11-6: Software
  66. * 5-4: RING
  67. * 3-0: CA
  68. *
  69. * Similar to the Alpha and MIPS ports, we need to keep track of the ref
  70. * and mod bits in software. We have a software "you can read
  71. * from this page" bit, and a hardware one which actually lets the
  72. * process read from the page. On the same token we have a software
  73. * writable bit and the real hardware one which actually lets the
  74. * process write to the page.
  75. *
  76. * See further below for PTE layout for swapped-out pages.
  77. */
  78. #define _PAGE_VALID (1<<0) /* hardware: page is accessible */
  79. #define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */
  80. /* None of these cache modes include MP coherency: */
  81. #define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */
  82. #if XCHAL_DCACHE_IS_WRITEBACK
  83. # define _PAGE_WRITEBACK (1<<2) /* write back */
  84. # define _PAGE_WRITETHRU (2<<2) /* write through */
  85. #else
  86. # define _PAGE_WRITEBACK (1<<2) /* assume write through */
  87. # define _PAGE_WRITETHRU (1<<2)
  88. #endif
  89. #define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */
  90. #define _CACHE_MASK (3<<2)
  91. #define _PAGE_USER (1<<4) /* user access (ring=1) */
  92. #define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */
  93. /* Software */
  94. #define _PAGE_RW (1<<6) /* software: page writable */
  95. #define _PAGE_DIRTY (1<<7) /* software: page dirty */
  96. #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
  97. #define _PAGE_FILE (1<<9) /* nonlinear file mapping*/
  98. #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY)
  99. #define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED)
  100. #ifdef CONFIG_MMU
  101. # define PAGE_NONE __pgprot(_PAGE_PRESENT)
  102. # define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW)
  103. # define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
  104. # define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
  105. # define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE)
  106. # define PAGE_INVALID __pgprot(_PAGE_USER)
  107. # if (DCACHE_WAY_SIZE > PAGE_SIZE)
  108. # define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL)
  109. # else
  110. # define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL)
  111. # endif
  112. #else /* no mmu */
  113. # define PAGE_NONE __pgprot(0)
  114. # define PAGE_SHARED __pgprot(0)
  115. # define PAGE_COPY __pgprot(0)
  116. # define PAGE_READONLY __pgprot(0)
  117. # define PAGE_KERNEL __pgprot(0)
  118. #endif
  119. /*
  120. * On certain configurations of Xtensa MMUs (eg. the initial Linux config),
  121. * the MMU can't do page protection for execute, and considers that the same as
  122. * read. Also, write permissions may imply read permissions.
  123. * What follows is the closest we can get by reasonable means..
  124. * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
  125. */
  126. #define __P000 PAGE_NONE /* private --- */
  127. #define __P001 PAGE_READONLY /* private --r */
  128. #define __P010 PAGE_COPY /* private -w- */
  129. #define __P011 PAGE_COPY /* private -wr */
  130. #define __P100 PAGE_READONLY /* private x-- */
  131. #define __P101 PAGE_READONLY /* private x-r */
  132. #define __P110 PAGE_COPY /* private xw- */
  133. #define __P111 PAGE_COPY /* private xwr */
  134. #define __S000 PAGE_NONE /* shared --- */
  135. #define __S001 PAGE_READONLY /* shared --r */
  136. #define __S010 PAGE_SHARED /* shared -w- */
  137. #define __S011 PAGE_SHARED /* shared -wr */
  138. #define __S100 PAGE_READONLY /* shared x-- */
  139. #define __S101 PAGE_READONLY /* shared x-r */
  140. #define __S110 PAGE_SHARED /* shared xw- */
  141. #define __S111 PAGE_SHARED /* shared xwr */
  142. #ifndef __ASSEMBLY__
  143. #define pte_ERROR(e) \
  144. printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
  145. #define pgd_ERROR(e) \
  146. printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  147. extern unsigned long empty_zero_page[1024];
  148. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  149. extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
  150. /*
  151. * The pmd contains the kernel virtual address of the pte page.
  152. */
  153. #define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
  154. #define pmd_page(pmd) virt_to_page(pmd_val(pmd))
  155. /*
  156. * The following only work if pte_present() is true.
  157. */
  158. #define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER))
  159. #define pte_present(pte) (pte_val(pte) & _PAGE_VALID)
  160. #define pte_clear(mm,addr,ptep) \
  161. do { update_pte(ptep, __pte(_PAGE_USER)); } while(0)
  162. #define pmd_none(pmd) (!pmd_val(pmd))
  163. #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
  164. #define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
  165. #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
  166. /* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */
  167. static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
  168. static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
  169. static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
  170. static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
  171. static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
  172. static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; }
  173. static inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
  174. static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
  175. static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
  176. static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
  177. static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
  178. static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
  179. static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
  180. /*
  181. * Conversion functions: convert a page and protection to a page entry,
  182. * and a page entry and page directory to the page they refer to.
  183. */
  184. #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
  185. #define pte_same(a,b) (pte_val(a) == pte_val(b))
  186. #define pte_page(x) pfn_to_page(pte_pfn(x))
  187. #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
  188. #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
  189. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  190. {
  191. return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
  192. }
  193. /*
  194. * Certain architectures need to do special things when pte's
  195. * within a page table are directly modified. Thus, the following
  196. * hook is made available.
  197. */
  198. static inline void update_pte(pte_t *ptep, pte_t pteval)
  199. {
  200. *ptep = pteval;
  201. #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
  202. __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep));
  203. #endif
  204. }
  205. struct mm_struct;
  206. static inline void
  207. set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
  208. {
  209. update_pte(ptep, pteval);
  210. }
  211. static inline void
  212. set_pmd(pmd_t *pmdp, pmd_t pmdval)
  213. {
  214. *pmdp = pmdval;
  215. #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
  216. __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
  217. #endif
  218. }
  219. struct vm_area_struct;
  220. static inline int
  221. ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
  222. pte_t *ptep)
  223. {
  224. pte_t pte = *ptep;
  225. if (!pte_young(pte))
  226. return 0;
  227. update_pte(ptep, pte_mkold(pte));
  228. return 1;
  229. }
  230. static inline int
  231. ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr,
  232. pte_t *ptep)
  233. {
  234. pte_t pte = *ptep;
  235. if (!pte_dirty(pte))
  236. return 0;
  237. update_pte(ptep, pte_mkclean(pte));
  238. return 1;
  239. }
  240. static inline pte_t
  241. ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  242. {
  243. pte_t pte = *ptep;
  244. pte_clear(mm, addr, ptep);
  245. return pte;
  246. }
  247. static inline void
  248. ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  249. {
  250. pte_t pte = *ptep;
  251. update_pte(ptep, pte_wrprotect(pte));
  252. }
  253. /* to find an entry in a kernel page-table-directory */
  254. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  255. /* to find an entry in a page-table-directory */
  256. #define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
  257. #define pgd_index(address) ((address) >> PGDIR_SHIFT)
  258. /* Find an entry in the second-level page table.. */
  259. #define pmd_offset(dir,address) ((pmd_t*)(dir))
  260. /* Find an entry in the third-level page table.. */
  261. #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  262. #define pte_offset_kernel(dir,addr) \
  263. ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
  264. #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
  265. #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
  266. #define pte_unmap(pte) do { } while (0)
  267. #define pte_unmap_nested(pte) do { } while (0)
  268. /*
  269. * Encode and decode a swap entry.
  270. * Each PTE in a process VM's page table is either:
  271. * "present" -- valid and not swapped out, protection bits are meaningful;
  272. * "not present" -- which further subdivides in these two cases:
  273. * "none" -- no mapping at all; identified by pte_none(), set by pte_clear(
  274. * "swapped out" -- the page is swapped out, and the SWP macros below
  275. * are used to store swap file info in the PTE itself.
  276. *
  277. * In the Xtensa processor MMU, any PTE entries in user space (or anywhere
  278. * in virtual memory that can map differently across address spaces)
  279. * must have a correct ring value that represents the RASID field that
  280. * is changed when switching address spaces. Eg. such PTE entries cannot
  281. * be set to ring zero, because that can cause a (global) kernel ASID
  282. * entry to be created in the TLBs (even with invalid cache attribute),
  283. * potentially causing a multihit exception when going back to another
  284. * address space that mapped the same virtual address at another ring.
  285. *
  286. * SO: we avoid using ring bits (_PAGE_RING_MASK) in "not present" PTEs.
  287. * We also avoid using the _PAGE_VALID bit which must be zero for non-present
  288. * pages.
  289. *
  290. * We end up with the following available bits: 1..3 and 7..31.
  291. * We don't bother with 1..3 for now (we can use them later if needed),
  292. * and chose to allocate 6 bits for SWP_TYPE and the remaining 19 bits
  293. * for SWP_OFFSET. At least 5 bits are needed for SWP_TYPE, because it
  294. * is currently implemented as an index into swap_info[MAX_SWAPFILES]
  295. * and MAX_SWAPFILES is currently defined as 32 in <linux/swap.h>.
  296. * However, for some reason all other architectures in the 2.4 kernel
  297. * reserve either 6, 7, or 8 bits so I'll not detract from that for now. :)
  298. * SWP_OFFSET is an offset into the swap file in page-size units, so
  299. * with 4 kB pages, 19 bits supports a maximum swap file size of 2 GB.
  300. *
  301. * FIXME: 2 GB isn't very big. Other bits can be used to allow
  302. * larger swap sizes. In the meantime, it appears relatively easy to get
  303. * around the 2 GB limitation by simply using multiple swap files.
  304. */
  305. #define __swp_type(entry) (((entry).val >> 7) & 0x3f)
  306. #define __swp_offset(entry) ((entry).val >> 13)
  307. #define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)})
  308. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  309. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  310. #define PTE_FILE_MAX_BITS 29
  311. #define pte_to_pgoff(pte) (pte_val(pte) >> 3)
  312. #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
  313. #endif /* !defined (__ASSEMBLY__) */
  314. #ifdef __ASSEMBLY__
  315. /* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
  316. * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
  317. * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
  318. * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
  319. *
  320. * Note: We require an additional temporary register which can be the same as
  321. * the register that holds the address.
  322. *
  323. * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
  324. *
  325. */
  326. #define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
  327. #define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
  328. #define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
  329. _PGD_INDEX(tmp, adr); \
  330. addx4 mm, tmp, mm
  331. #define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
  332. srli pmd, pmd, PAGE_SHIFT; \
  333. slli pmd, pmd, PAGE_SHIFT; \
  334. addx4 pmd, tmp, pmd
  335. #else
  336. extern void paging_init(void);
  337. #define kern_addr_valid(addr) (1)
  338. extern void update_mmu_cache(struct vm_area_struct * vma,
  339. unsigned long address, pte_t pte);
  340. /*
  341. * remap a physical page `pfn' of size `size' with page protection `prot'
  342. * into virtual address `from'
  343. */
  344. #define io_remap_pfn_range(vma,from,pfn,size,prot) \
  345. remap_pfn_range(vma, from, pfn, size, prot)
  346. /* No page table caches to init */
  347. #define pgtable_cache_init() do { } while (0)
  348. typedef pte_t *pte_addr_t;
  349. #endif /* !defined (__ASSEMBLY__) */
  350. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  351. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
  352. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  353. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  354. #define __HAVE_ARCH_PTEP_MKDIRTY
  355. #define __HAVE_ARCH_PTE_SAME
  356. #include <asm-generic/pgtable.h>
  357. #endif /* _XTENSA_PGTABLE_H */