pgtable.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * vineetg: May 2011
  9. * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
  10. * They are semantically the same although in different contexts
  11. * VALID marks a TLB entry exists and it will only happen if PRESENT
  12. * - Utilise some unused free bits to confine PTE flags to 12 bits
  13. * This is a must for 4k pg-sz
  14. *
  15. * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
  16. * -TLB Locking never really existed, except for initial specs
  17. * -SILENT_xxx not needed for our port
  18. * -Per my request, MMU V3 changes the layout of some of the bits
  19. * to avoid a few shifts in TLB Miss handlers.
  20. *
  21. * vineetg: April 2010
  22. * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
  23. * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
  24. *
  25. * vineetg: April 2010
  26. * -Switched form 8:11:13 split for page table lookup to 11:8:13
  27. * -this speeds up page table allocation itself as we now have to memset 1K
  28. * instead of 8k per page table.
  29. * -TODO: Right now page table alloc is 8K and rest 7K is unused
  30. * need to optimise it
  31. *
  32. * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
  33. */
  34. #ifndef _ASM_ARC_PGTABLE_H
  35. #define _ASM_ARC_PGTABLE_H
  36. #include <asm/page.h>
  37. #include <asm/mmu.h>
  38. #include <asm-generic/pgtable-nopmd.h>
  39. /**************************************************************************
  40. * Page Table Flags
  41. *
  42. * ARC700 MMU only deals with softare managed TLB entries.
  43. * Page Tables are purely for Linux VM's consumption and the bits below are
  44. * suited to that (uniqueness). Hence some are not implemented in the TLB and
  45. * some have different value in TLB.
  46. * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
  47. * seperate PD0 and PD1, which combined forms a translation entry)
  48. * while for PTE perspective, they are 8 and 9 respectively
  49. * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
  50. * (saves some bit shift ops in TLB Miss hdlrs)
  51. */
  52. #if (CONFIG_ARC_MMU_VER <= 2)
  53. #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
  54. #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
  55. #define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */
  56. #define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */
  57. #define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */
  58. #define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
  59. #define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
  60. #define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
  61. #define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
  62. #define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
  63. #define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
  64. #define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
  65. #else
  66. /* PD1 */
  67. #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
  68. #define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */
  69. #define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */
  70. #define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */
  71. #define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
  72. #define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
  73. #define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
  74. #define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
  75. /* PD0 */
  76. #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
  77. #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
  78. #define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
  79. usable for shared TLB entries (H) */
  80. #define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
  81. #define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
  82. #define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
  83. #endif
  84. /* Kernel allowed all permissions for all pages */
  85. #define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
  86. _PAGE_GLOBAL | _PAGE_PRESENT)
  87. #ifdef CONFIG_ARC_CACHE_PAGES
  88. #define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
  89. #else
  90. #define _PAGE_DEF_CACHEABLE (0)
  91. #endif
  92. /* Helper for every "user" page
  93. * -kernel can R/W/X
  94. * -by default cached, unless config otherwise
  95. * -present in memory
  96. */
  97. #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
  98. #define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ)
  99. #define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE)
  100. #define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
  101. /* Set of bits not changed in pte_modify */
  102. #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
  103. /* More Abbrevaited helpers */
  104. #define PAGE_U_NONE __pgprot(___DEF)
  105. #define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
  106. #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
  107. #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
  108. #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
  109. _PAGE_EXECUTE)
  110. #define PAGE_SHARED PAGE_U_W_R
  111. /* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
  112. * kernel vaddr space - visible in all addr spaces, but kernel mode only
  113. * Thus Global, all-kernel-access, no-user-access, cached
  114. */
  115. #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
  116. /* ioremap */
  117. #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
  118. /**************************************************************************
  119. * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
  120. *
  121. * Certain cases have 1:1 mapping
  122. * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
  123. * which directly corresponds to PAGE_U_X_R
  124. *
  125. * Other rules which cause the divergence from 1:1 mapping
  126. *
  127. * 1. Although ARC700 can do exclusive execute/write protection (meaning R
  128. * can be tracked independet of X/W unlike some other CPUs), still to
  129. * keep things consistent with other archs:
  130. * -Write implies Read: W => R
  131. * -Execute implies Read: X => R
  132. *
  133. * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
  134. * This is to enable COW mechanism
  135. */
  136. /* xwr */
  137. #define __P000 PAGE_U_NONE
  138. #define __P001 PAGE_U_R
  139. #define __P010 PAGE_U_R /* Pvt-W => !W */
  140. #define __P011 PAGE_U_R /* Pvt-W => !W */
  141. #define __P100 PAGE_U_X_R /* X => R */
  142. #define __P101 PAGE_U_X_R
  143. #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
  144. #define __P111 PAGE_U_X_R /* Pvt-W => !W */
  145. #define __S000 PAGE_U_NONE
  146. #define __S001 PAGE_U_R
  147. #define __S010 PAGE_U_W_R /* W => R */
  148. #define __S011 PAGE_U_W_R
  149. #define __S100 PAGE_U_X_R /* X => R */
  150. #define __S101 PAGE_U_X_R
  151. #define __S110 PAGE_U_X_W_R /* X => R */
  152. #define __S111 PAGE_U_X_W_R
  153. /****************************************************************
  154. * Page Table Lookup split
  155. *
  156. * We implement 2 tier paging and since this is all software, we are free
  157. * to customize the span of a PGD / PTE entry to suit us
  158. *
  159. * 32 bit virtual address
  160. * -------------------------------------------------------
  161. * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
  162. * -------------------------------------------------------
  163. * | | |
  164. * | | --> off in page frame
  165. * | |
  166. * | ---> index into Page Table
  167. * |
  168. * ----> index into Page Directory
  169. */
  170. #define BITS_IN_PAGE PAGE_SHIFT
  171. /* Optimal Sizing of Pg Tbl - based on MMU page size */
  172. #if defined(CONFIG_ARC_PAGE_SIZE_8K)
  173. #define BITS_FOR_PTE 8
  174. #elif defined(CONFIG_ARC_PAGE_SIZE_16K)
  175. #define BITS_FOR_PTE 8
  176. #elif defined(CONFIG_ARC_PAGE_SIZE_4K)
  177. #define BITS_FOR_PTE 9
  178. #endif
  179. #define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
  180. #define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
  181. #define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
  182. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  183. #ifdef __ASSEMBLY__
  184. #define PTRS_PER_PTE (1 << BITS_FOR_PTE)
  185. #define PTRS_PER_PGD (1 << BITS_FOR_PGD)
  186. #else
  187. #define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
  188. #define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
  189. #endif
  190. /*
  191. * Number of entries a user land program use.
  192. * TASK_SIZE is the maximum vaddr that can be used by a userland program.
  193. */
  194. #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
  195. /*
  196. * No special requirements for lowest virtual address we permit any user space
  197. * mapping to be mapped at.
  198. */
  199. #define FIRST_USER_ADDRESS 0
  200. /****************************************************************
  201. * Bucket load of VM Helpers
  202. */
  203. #ifndef __ASSEMBLY__
  204. #define pte_ERROR(e) \
  205. pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
  206. #define pgd_ERROR(e) \
  207. pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  208. /* the zero page used for uninitialized and anonymous pages */
  209. extern char empty_zero_page[PAGE_SIZE];
  210. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  211. #define pte_unmap(pte) do { } while (0)
  212. #define pte_unmap_nested(pte) do { } while (0)
  213. #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
  214. #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
  215. /* find the page descriptor of the Page Tbl ref by PMD entry */
  216. #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
  217. /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
  218. #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
  219. /* In a 2 level sys, setup the PGD entry with PTE value */
  220. static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
  221. {
  222. pmd_val(*pmdp) = (unsigned long)ptep;
  223. }
  224. #define pte_none(x) (!pte_val(x))
  225. #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
  226. #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
  227. #define pmd_none(x) (!pmd_val(x))
  228. #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
  229. #define pmd_present(x) (pmd_val(x))
  230. #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
  231. #define pte_page(x) (mem_map + \
  232. (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
  233. #define mk_pte(page, pgprot) \
  234. ({ \
  235. pte_t pte; \
  236. pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
  237. pte; \
  238. })
  239. /* TBD: Non linear mapping stuff */
  240. static inline int pte_file(pte_t pte)
  241. {
  242. return pte_val(pte) & _PAGE_FILE;
  243. }
  244. #define PTE_FILE_MAX_BITS 30
  245. #define pgoff_to_pte(x) __pte(x)
  246. #define pte_to_pgoff(x) (pte_val(x) >> 2)
  247. #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
  248. #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
  249. #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  250. /*
  251. * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
  252. * and returns ptr to PTE entry corresponding to @addr
  253. */
  254. #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
  255. __pte_index(addr))
  256. /* No mapping of Page Tables in high mem etc, so following same as above */
  257. #define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
  258. #define pte_offset_map(dir, addr) pte_offset(dir, addr)
  259. /* Zoo of pte_xxx function */
  260. #define pte_read(pte) (pte_val(pte) & _PAGE_READ)
  261. #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
  262. #define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
  263. #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
  264. #define pte_special(pte) (0)
  265. #define PTE_BIT_FUNC(fn, op) \
  266. static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
  267. PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
  268. PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
  269. PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
  270. PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
  271. PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
  272. PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
  273. PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
  274. PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
  275. static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
  276. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  277. {
  278. return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
  279. }
  280. /* Macro to mark a page protection as uncacheable */
  281. #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
  282. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  283. pte_t *ptep, pte_t pteval)
  284. {
  285. set_pte(ptep, pteval);
  286. }
  287. /*
  288. * All kernel related VM pages are in init's mm.
  289. */
  290. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  291. #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
  292. #define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
  293. /*
  294. * Macro to quickly access the PGD entry, utlising the fact that some
  295. * arch may cache the pointer to Page Directory of "current" task
  296. * in a MMU register
  297. *
  298. * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
  299. * becomes read a register
  300. *
  301. * ********CAUTION*******:
  302. * Kernel code might be dealing with some mm_struct of NON "current"
  303. * Thus use this macro only when you are certain that "current" is current
  304. * e.g. when dealing with signal frame setup code etc
  305. */
  306. #ifndef CONFIG_SMP
  307. #define pgd_offset_fast(mm, addr) \
  308. ({ \
  309. pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
  310. pgd_base + pgd_index(addr); \
  311. })
  312. #else
  313. #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
  314. #endif
  315. extern void paging_init(void);
  316. extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
  317. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  318. pte_t *ptep);
  319. /* Encode swap {type,off} tuple into PTE
  320. * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
  321. * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
  322. */
  323. #define __swp_entry(type, off) ((swp_entry_t) { \
  324. ((type) & 0x1f) | ((off) << 13) })
  325. /* Decode a PTE containing swap "identifier "into constituents */
  326. #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
  327. #define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
  328. /* NOPs, to keep generic kernel happy */
  329. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  330. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  331. #define kern_addr_valid(addr) (1)
  332. /*
  333. * remap a physical page `pfn' of size `size' with page protection `prot'
  334. * into virtual address `from'
  335. */
  336. #define io_remap_pfn_range(vma, from, pfn, size, prot) \
  337. remap_pfn_range(vma, from, pfn, size, prot)
  338. #include <asm-generic/pgtable.h>
  339. /* to cope with aliasing VIPT cache */
  340. #define HAVE_ARCH_UNMAPPED_AREA
  341. /*
  342. * No page table caches to initialise
  343. */
  344. #define pgtable_cache_init() do { } while (0)
  345. #endif /* __ASSEMBLY__ */
  346. #endif