pgtable-ppc32.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. #ifndef _ASM_POWERPC_PGTABLE_PPC32_H
  2. #define _ASM_POWERPC_PGTABLE_PPC32_H
  3. #include <asm-generic/pgtable-nopmd.h>
  4. #ifndef __ASSEMBLY__
  5. #include <linux/sched.h>
  6. #include <linux/threads.h>
  7. #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
  8. extern unsigned long va_to_phys(unsigned long address);
  9. extern pte_t *va_to_pte(unsigned long address);
  10. extern unsigned long ioremap_bot;
  11. #ifdef CONFIG_44x
  12. extern int icache_44x_need_flush;
  13. #endif
  14. #endif /* __ASSEMBLY__ */
  15. /*
  16. * The normal case is that PTEs are 32-bits and we have a 1-page
  17. * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
  18. *
  19. * For any >32-bit physical address platform, we can use the following
  20. * two level page table layout where the pgdir is 8KB and the MS 13 bits
  21. * are an index to the second level table. The combined pgdir/pmd first
  22. * level has 2048 entries and the second level has 512 64-bit PTE entries.
  23. * -Matt
  24. */
  25. /* PGDIR_SHIFT determines what a top-level page table entry can map */
  26. #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
  27. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  28. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  29. /*
  30. * entries per page directory level: our page-table tree is two-level, so
  31. * we don't really have any PMD directory.
  32. */
  33. #ifndef __ASSEMBLY__
  34. #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
  35. #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
  36. #endif /* __ASSEMBLY__ */
  37. #define PTRS_PER_PTE (1 << PTE_SHIFT)
  38. #define PTRS_PER_PMD 1
  39. #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
  40. #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
  41. #define FIRST_USER_ADDRESS 0
  42. #define pte_ERROR(e) \
  43. printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
  44. (unsigned long long)pte_val(e))
  45. #define pgd_ERROR(e) \
  46. printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  47. /*
  48. * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
  49. * value (for now) on others, from where we can start layout kernel
  50. * virtual space that goes below PKMAP and FIXMAP
  51. */
  52. #ifdef CONFIG_HIGHMEM
  53. #define KVIRT_TOP PKMAP_BASE
  54. #else
  55. #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
  56. #endif
  57. /*
  58. * ioremap_bot starts at that address. Early ioremaps move down from there,
  59. * until mem_init() at which point this becomes the top of the vmalloc
  60. * and ioremap space
  61. */
  62. #ifdef CONFIG_NOT_COHERENT_CACHE
  63. #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
  64. #else
  65. #define IOREMAP_TOP KVIRT_TOP
  66. #endif
  67. /*
  68. * Just any arbitrary offset to the start of the vmalloc VM area: the
  69. * current 16MB value just means that there will be a 64MB "hole" after the
  70. * physical memory until the kernel virtual memory starts. That means that
  71. * any out-of-bounds memory accesses will hopefully be caught.
  72. * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  73. * area for the same reason. ;)
  74. *
  75. * We no longer map larger than phys RAM with the BATs so we don't have
  76. * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
  77. * about clashes between our early calls to ioremap() that start growing down
  78. * from ioremap_base being run into the VM area allocations (growing upwards
  79. * from VMALLOC_START). For this reason we have ioremap_bot to check when
  80. * we actually run into our mappings setup in the early boot with the VM
  81. * system. This really does become a problem for machines with good amounts
  82. * of RAM. -- Cort
  83. */
  84. #define VMALLOC_OFFSET (0x1000000) /* 16M */
  85. #ifdef PPC_PIN_SIZE
  86. #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
  87. #else
  88. #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
  89. #endif
  90. #define VMALLOC_END ioremap_bot
  91. /*
  92. * Bits in a linux-style PTE. These match the bits in the
  93. * (hardware-defined) PowerPC PTE as closely as possible.
  94. */
  95. #if defined(CONFIG_40x)
  96. #include <asm/pte-40x.h>
  97. #elif defined(CONFIG_44x)
  98. #include <asm/pte-44x.h>
  99. #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
  100. #include <asm/pte-book3e.h>
  101. #elif defined(CONFIG_FSL_BOOKE)
  102. #include <asm/pte-fsl-booke.h>
  103. #elif defined(CONFIG_8xx)
  104. #include <asm/pte-8xx.h>
  105. #else /* CONFIG_6xx */
  106. #include <asm/pte-hash32.h>
  107. #endif
  108. /* And here we include common definitions */
  109. #include <asm/pte-common.h>
  110. #ifndef __ASSEMBLY__
  111. #define pte_clear(mm, addr, ptep) \
  112. do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
  113. #define pmd_none(pmd) (!pmd_val(pmd))
  114. #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
  115. #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
  116. #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
  117. /*
  118. * When flushing the tlb entry for a page, we also need to flush the hash
  119. * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
  120. */
  121. extern int flush_hash_pages(unsigned context, unsigned long va,
  122. unsigned long pmdval, int count);
  123. /* Add an HPTE to the hash table */
  124. extern void add_hash_page(unsigned context, unsigned long va,
  125. unsigned long pmdval);
  126. /* Flush an entry from the TLB/hash table */
  127. extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
  128. unsigned long address);
  129. /*
  130. * PTE updates. This function is called whenever an existing
  131. * valid PTE is updated. This does -not- include set_pte_at()
  132. * which nowadays only sets a new PTE.
  133. *
  134. * Depending on the type of MMU, we may need to use atomic updates
  135. * and the PTE may be either 32 or 64 bit wide. In the later case,
  136. * when using atomic updates, only the low part of the PTE is
  137. * accessed atomically.
  138. *
  139. * In addition, on 44x, we also maintain a global flag indicating
  140. * that an executable user mapping was modified, which is needed
  141. * to properly flush the virtually tagged instruction cache of
  142. * those implementations.
  143. */
  144. #ifndef CONFIG_PTE_64BIT
  145. static inline unsigned long pte_update(pte_t *p,
  146. unsigned long clr,
  147. unsigned long set)
  148. {
  149. #ifdef PTE_ATOMIC_UPDATES
  150. unsigned long old, tmp;
  151. __asm__ __volatile__("\
  152. 1: lwarx %0,0,%3\n\
  153. andc %1,%0,%4\n\
  154. or %1,%1,%5\n"
  155. PPC405_ERR77(0,%3)
  156. " stwcx. %1,0,%3\n\
  157. bne- 1b"
  158. : "=&r" (old), "=&r" (tmp), "=m" (*p)
  159. : "r" (p), "r" (clr), "r" (set), "m" (*p)
  160. : "cc" );
  161. #else /* PTE_ATOMIC_UPDATES */
  162. unsigned long old = pte_val(*p);
  163. *p = __pte((old & ~clr) | set);
  164. #endif /* !PTE_ATOMIC_UPDATES */
  165. #ifdef CONFIG_44x
  166. if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
  167. icache_44x_need_flush = 1;
  168. #endif
  169. return old;
  170. }
  171. #else /* CONFIG_PTE_64BIT */
  172. static inline unsigned long long pte_update(pte_t *p,
  173. unsigned long clr,
  174. unsigned long set)
  175. {
  176. #ifdef PTE_ATOMIC_UPDATES
  177. unsigned long long old;
  178. unsigned long tmp;
  179. __asm__ __volatile__("\
  180. 1: lwarx %L0,0,%4\n\
  181. lwzx %0,0,%3\n\
  182. andc %1,%L0,%5\n\
  183. or %1,%1,%6\n"
  184. PPC405_ERR77(0,%3)
  185. " stwcx. %1,0,%4\n\
  186. bne- 1b"
  187. : "=&r" (old), "=&r" (tmp), "=m" (*p)
  188. : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
  189. : "cc" );
  190. #else /* PTE_ATOMIC_UPDATES */
  191. unsigned long long old = pte_val(*p);
  192. *p = __pte((old & ~(unsigned long long)clr) | set);
  193. #endif /* !PTE_ATOMIC_UPDATES */
  194. #ifdef CONFIG_44x
  195. if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
  196. icache_44x_need_flush = 1;
  197. #endif
  198. return old;
  199. }
  200. #endif /* CONFIG_PTE_64BIT */
  201. /*
  202. * 2.6 calls this without flushing the TLB entry; this is wrong
  203. * for our hash-based implementation, we fix that up here.
  204. */
  205. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  206. static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
  207. {
  208. unsigned long old;
  209. old = pte_update(ptep, _PAGE_ACCESSED, 0);
  210. #if _PAGE_HASHPTE != 0
  211. if (old & _PAGE_HASHPTE) {
  212. unsigned long ptephys = __pa(ptep) & PAGE_MASK;
  213. flush_hash_pages(context, addr, ptephys, 1);
  214. }
  215. #endif
  216. return (old & _PAGE_ACCESSED) != 0;
  217. }
  218. #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
  219. __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
  220. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  221. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  222. pte_t *ptep)
  223. {
  224. return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
  225. }
  226. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  227. static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
  228. pte_t *ptep)
  229. {
  230. pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
  231. }
  232. static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
  233. unsigned long addr, pte_t *ptep)
  234. {
  235. ptep_set_wrprotect(mm, addr, ptep);
  236. }
  237. static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
  238. {
  239. unsigned long bits = pte_val(entry) &
  240. (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
  241. pte_update(ptep, 0, bits);
  242. }
  243. #define __HAVE_ARCH_PTE_SAME
  244. #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
  245. /*
  246. * Note that on Book E processors, the pmd contains the kernel virtual
  247. * (lowmem) address of the pte page. The physical address is less useful
  248. * because everything runs with translation enabled (even the TLB miss
  249. * handler). On everything else the pmd contains the physical address
  250. * of the pte page. -- paulus
  251. */
  252. #ifndef CONFIG_BOOKE
  253. #define pmd_page_vaddr(pmd) \
  254. ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  255. #define pmd_page(pmd) \
  256. (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
  257. #else
  258. #define pmd_page_vaddr(pmd) \
  259. ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
  260. #define pmd_page(pmd) \
  261. pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
  262. #endif
  263. /* to find an entry in a kernel page-table-directory */
  264. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  265. /* to find an entry in a page-table-directory */
  266. #define pgd_index(address) ((address) >> PGDIR_SHIFT)
  267. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  268. /* Find an entry in the third-level page table.. */
  269. #define pte_index(address) \
  270. (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  271. #define pte_offset_kernel(dir, addr) \
  272. ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
  273. #define pte_offset_map(dir, addr) \
  274. ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
  275. #define pte_offset_map_nested(dir, addr) \
  276. ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
  277. #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
  278. #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
  279. /*
  280. * Encode and decode a swap entry.
  281. * Note that the bits we use in a PTE for representing a swap entry
  282. * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
  283. *_PAGE_HASHPTE bit (if used). -- paulus
  284. */
  285. #define __swp_type(entry) ((entry).val & 0x1f)
  286. #define __swp_offset(entry) ((entry).val >> 5)
  287. #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
  288. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
  289. #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
  290. /* Encode and decode a nonlinear file mapping entry */
  291. #define PTE_FILE_MAX_BITS 29
  292. #define pte_to_pgoff(pte) (pte_val(pte) >> 3)
  293. #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
  294. /*
  295. * No page table caches to initialise
  296. */
  297. #define pgtable_cache_init() do { } while (0)
  298. extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
  299. pmd_t **pmdp);
  300. #endif /* !__ASSEMBLY__ */
  301. #endif /* _ASM_POWERPC_PGTABLE_PPC32_H */