pgtable-ppc32.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. #ifndef _ASM_POWERPC_PGTABLE_PPC32_H
  2. #define _ASM_POWERPC_PGTABLE_PPC32_H
  3. #include <asm-generic/pgtable-nopmd.h>
  4. #ifndef __ASSEMBLY__
  5. #include <linux/sched.h>
  6. #include <linux/threads.h>
  7. #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
  8. extern unsigned long va_to_phys(unsigned long address);
  9. extern pte_t *va_to_pte(unsigned long address);
  10. extern unsigned long ioremap_bot, ioremap_base;
  11. #ifdef CONFIG_44x
  12. extern int icache_44x_need_flush;
  13. #endif
  14. #endif /* __ASSEMBLY__ */
  15. /*
  16. * The normal case is that PTEs are 32-bits and we have a 1-page
  17. * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
  18. *
  19. * For any >32-bit physical address platform, we can use the following
  20. * two level page table layout where the pgdir is 8KB and the MS 13 bits
  21. * are an index to the second level table. The combined pgdir/pmd first
  22. * level has 2048 entries and the second level has 512 64-bit PTE entries.
  23. * -Matt
  24. */
  25. /* PGDIR_SHIFT determines what a top-level page table entry can map */
  26. #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
  27. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  28. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  29. /*
  30. * entries per page directory level: our page-table tree is two-level, so
  31. * we don't really have any PMD directory.
  32. */
  33. #ifndef __ASSEMBLY__
  34. #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
  35. #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
  36. #endif /* __ASSEMBLY__ */
  37. #define PTRS_PER_PTE (1 << PTE_SHIFT)
  38. #define PTRS_PER_PMD 1
  39. #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
  40. #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
  41. #define FIRST_USER_ADDRESS 0
  42. #define pte_ERROR(e) \
  43. printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
  44. (unsigned long long)pte_val(e))
  45. #define pgd_ERROR(e) \
  46. printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  47. /*
  48. * Just any arbitrary offset to the start of the vmalloc VM area: the
  49. * current 64MB value just means that there will be a 64MB "hole" after the
  50. * physical memory until the kernel virtual memory starts. That means that
  51. * any out-of-bounds memory accesses will hopefully be caught.
  52. * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  53. * area for the same reason. ;)
  54. *
  55. * We no longer map larger than phys RAM with the BATs so we don't have
  56. * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
  57. * about clashes between our early calls to ioremap() that start growing down
  58. * from ioremap_base being run into the VM area allocations (growing upwards
  59. * from VMALLOC_START). For this reason we have ioremap_bot to check when
  60. * we actually run into our mappings setup in the early boot with the VM
  61. * system. This really does become a problem for machines with good amounts
  62. * of RAM. -- Cort
  63. */
  64. #define VMALLOC_OFFSET (0x1000000) /* 16M */
  65. #ifdef PPC_PIN_SIZE
  66. #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
  67. #else
  68. #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
  69. #endif
  70. #define VMALLOC_END ioremap_bot
  71. /*
  72. * Bits in a linux-style PTE. These match the bits in the
  73. * (hardware-defined) PowerPC PTE as closely as possible.
  74. */
  75. #if defined(CONFIG_40x)
  76. #include <asm/pte-40x.h>
  77. #elif defined(CONFIG_44x)
  78. #include <asm/pte-44x.h>
  79. #elif defined(CONFIG_FSL_BOOKE)
  80. #include <asm/pte-fsl-booke.h>
  81. #elif defined(CONFIG_8xx)
  82. #include <asm/pte-8xx.h>
  83. #else /* CONFIG_6xx */
  84. #include <asm/pte-hash32.h>
  85. #endif
  86. /* If _PAGE_SPECIAL is defined, then we advertise our support for it */
  87. #ifdef _PAGE_SPECIAL
  88. #define __HAVE_ARCH_PTE_SPECIAL
  89. #endif
  90. /*
  91. * Some bits are only used on some cpu families... Make sure that all
  92. * the undefined gets defined as 0
  93. */
  94. #ifndef _PAGE_HASHPTE
  95. #define _PAGE_HASHPTE 0
  96. #endif
  97. #ifndef _PTE_NONE_MASK
  98. #define _PTE_NONE_MASK 0
  99. #endif
  100. #ifndef _PAGE_SHARED
  101. #define _PAGE_SHARED 0
  102. #endif
  103. #ifndef _PAGE_HWWRITE
  104. #define _PAGE_HWWRITE 0
  105. #endif
  106. #ifndef _PAGE_HWEXEC
  107. #define _PAGE_HWEXEC 0
  108. #endif
  109. #ifndef _PAGE_EXEC
  110. #define _PAGE_EXEC 0
  111. #endif
  112. #ifndef _PAGE_ENDIAN
  113. #define _PAGE_ENDIAN 0
  114. #endif
  115. #ifndef _PAGE_COHERENT
  116. #define _PAGE_COHERENT 0
  117. #endif
  118. #ifndef _PAGE_WRITETHRU
  119. #define _PAGE_WRITETHRU 0
  120. #endif
  121. #ifndef _PAGE_SPECIAL
  122. #define _PAGE_SPECIAL 0
  123. #endif
  124. #ifndef _PMD_PRESENT_MASK
  125. #define _PMD_PRESENT_MASK _PMD_PRESENT
  126. #endif
  127. #ifndef _PMD_SIZE
  128. #define _PMD_SIZE 0
  129. #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
  130. #endif
  131. #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
  132. /* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT
  133. * here (ie, naturally aligned). Platform who don't just pre-define the
  134. * value so we don't override it here
  135. */
  136. #ifndef PTE_RPN_SHIFT
  137. #define PTE_RPN_SHIFT (PAGE_SHIFT)
  138. #endif
  139. #ifdef CONFIG_PTE_64BIT
  140. #define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT))
  141. #define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1))
  142. #else
  143. #define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT))
  144. #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
  145. #endif
  146. /* _PAGE_CHG_MASK masks of bits that are to be preserved accross
  147. * pgprot changes
  148. */
  149. #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
  150. _PAGE_ACCESSED | _PAGE_SPECIAL)
  151. /* Mask of bits returned by pte_pgprot() */
  152. #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
  153. _PAGE_WRITETHRU | _PAGE_ENDIAN | \
  154. _PAGE_USER | _PAGE_ACCESSED | \
  155. _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
  156. _PAGE_EXEC | _PAGE_HWEXEC)
  157. /*
  158. * We define 2 sets of base prot bits, one for basic pages (ie,
  159. * cacheable kernel and user pages) and one for non cacheable
  160. * pages. We always set _PAGE_COHERENT when SMP is enabled or
  161. * the processor might need it for DMA coherency.
  162. */
  163. #if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
  164. #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
  165. #else
  166. #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
  167. #endif
  168. #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)
  169. #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
  170. #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
  171. #define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE)
  172. #ifdef CONFIG_PPC_STD_MMU
  173. /* On standard PPC MMU, no user access implies kernel read/write access,
  174. * so to write-protect kernel memory we must turn on user access */
  175. #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)
  176. #else
  177. #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
  178. #endif
  179. #define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED)
  180. #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
  181. #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
  182. defined(CONFIG_KPROBES)
  183. /* We want the debuggers to be able to set breakpoints anywhere, so
  184. * don't write protect the kernel text */
  185. #define _PAGE_RAM_TEXT _PAGE_RAM
  186. #else
  187. #define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC)
  188. #endif
  189. #define PAGE_NONE __pgprot(_PAGE_BASE)
  190. #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
  191. #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
  192. #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
  193. #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
  194. #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
  195. #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
  196. #define PAGE_KERNEL __pgprot(_PAGE_RAM)
  197. #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
  198. /*
  199. * The PowerPC can only do execute protection on a segment (256MB) basis,
  200. * not on a page basis. So we consider execute permission the same as read.
  201. * Also, write permissions imply read permissions.
  202. * This is the closest we can get..
  203. */
  204. #define __P000 PAGE_NONE
  205. #define __P001 PAGE_READONLY_X
  206. #define __P010 PAGE_COPY
  207. #define __P011 PAGE_COPY_X
  208. #define __P100 PAGE_READONLY
  209. #define __P101 PAGE_READONLY_X
  210. #define __P110 PAGE_COPY
  211. #define __P111 PAGE_COPY_X
  212. #define __S000 PAGE_NONE
  213. #define __S001 PAGE_READONLY_X
  214. #define __S010 PAGE_SHARED
  215. #define __S011 PAGE_SHARED_X
  216. #define __S100 PAGE_READONLY
  217. #define __S101 PAGE_READONLY_X
  218. #define __S110 PAGE_SHARED
  219. #define __S111 PAGE_SHARED_X
  220. #ifndef __ASSEMBLY__
  221. /* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
  222. * kernel without large page PMD support */
  223. extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
  224. /*
  225. * Conversions between PTE values and page frame numbers.
  226. */
  227. #define pte_pfn(x) (pte_val(x) >> PTE_RPN_SHIFT)
  228. #define pte_page(x) pfn_to_page(pte_pfn(x))
  229. #define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |\
  230. pgprot_val(prot))
  231. #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
  232. #endif /* __ASSEMBLY__ */
  233. #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
  234. #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
  235. #define pte_clear(mm, addr, ptep) \
  236. do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
  237. #define pmd_none(pmd) (!pmd_val(pmd))
  238. #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
  239. #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
  240. #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
  241. #ifndef __ASSEMBLY__
  242. /*
  243. * The following only work if pte_present() is true.
  244. * Undefined behaviour if not..
  245. */
  246. static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
  247. static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
  248. static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
  249. static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
  250. static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
  251. static inline pte_t pte_wrprotect(pte_t pte) {
  252. pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
  253. static inline pte_t pte_mkclean(pte_t pte) {
  254. pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
  255. static inline pte_t pte_mkold(pte_t pte) {
  256. pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
  257. static inline pte_t pte_mkwrite(pte_t pte) {
  258. pte_val(pte) |= _PAGE_RW; return pte; }
  259. static inline pte_t pte_mkdirty(pte_t pte) {
  260. pte_val(pte) |= _PAGE_DIRTY; return pte; }
  261. static inline pte_t pte_mkyoung(pte_t pte) {
  262. pte_val(pte) |= _PAGE_ACCESSED; return pte; }
  263. static inline pte_t pte_mkspecial(pte_t pte) {
  264. pte_val(pte) |= _PAGE_SPECIAL; return pte; }
  265. static inline pgprot_t pte_pgprot(pte_t pte)
  266. {
  267. return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
  268. }
  269. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  270. {
  271. pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
  272. return pte;
  273. }
  274. /*
  275. * When flushing the tlb entry for a page, we also need to flush the hash
  276. * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
  277. */
  278. extern int flush_hash_pages(unsigned context, unsigned long va,
  279. unsigned long pmdval, int count);
  280. /* Add an HPTE to the hash table */
  281. extern void add_hash_page(unsigned context, unsigned long va,
  282. unsigned long pmdval);
  283. /* Flush an entry from the TLB/hash table */
  284. extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
  285. unsigned long address);
  286. /*
  287. * PTE updates. This function is called whenever an existing
  288. * valid PTE is updated. This does -not- include set_pte_at()
  289. * which nowadays only sets a new PTE.
  290. *
  291. * Depending on the type of MMU, we may need to use atomic updates
  292. * and the PTE may be either 32 or 64 bit wide. In the later case,
  293. * when using atomic updates, only the low part of the PTE is
  294. * accessed atomically.
  295. *
  296. * In addition, on 44x, we also maintain a global flag indicating
  297. * that an executable user mapping was modified, which is needed
  298. * to properly flush the virtually tagged instruction cache of
  299. * those implementations.
  300. */
  301. #ifndef CONFIG_PTE_64BIT
  302. static inline unsigned long pte_update(pte_t *p,
  303. unsigned long clr,
  304. unsigned long set)
  305. {
  306. #ifdef PTE_ATOMIC_UPDATES
  307. unsigned long old, tmp;
  308. __asm__ __volatile__("\
  309. 1: lwarx %0,0,%3\n\
  310. andc %1,%0,%4\n\
  311. or %1,%1,%5\n"
  312. PPC405_ERR77(0,%3)
  313. " stwcx. %1,0,%3\n\
  314. bne- 1b"
  315. : "=&r" (old), "=&r" (tmp), "=m" (*p)
  316. : "r" (p), "r" (clr), "r" (set), "m" (*p)
  317. : "cc" );
  318. #else /* PTE_ATOMIC_UPDATES */
  319. unsigned long old = pte_val(*p);
  320. *p = __pte((old & ~clr) | set);
  321. #endif /* !PTE_ATOMIC_UPDATES */
  322. #ifdef CONFIG_44x
  323. if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
  324. icache_44x_need_flush = 1;
  325. #endif
  326. return old;
  327. }
  328. #else /* CONFIG_PTE_64BIT */
  329. static inline unsigned long long pte_update(pte_t *p,
  330. unsigned long clr,
  331. unsigned long set)
  332. {
  333. #ifdef PTE_ATOMIC_UPDATES
  334. unsigned long long old;
  335. unsigned long tmp;
  336. __asm__ __volatile__("\
  337. 1: lwarx %L0,0,%4\n\
  338. lwzx %0,0,%3\n\
  339. andc %1,%L0,%5\n\
  340. or %1,%1,%6\n"
  341. PPC405_ERR77(0,%3)
  342. " stwcx. %1,0,%4\n\
  343. bne- 1b"
  344. : "=&r" (old), "=&r" (tmp), "=m" (*p)
  345. : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
  346. : "cc" );
  347. #else /* PTE_ATOMIC_UPDATES */
  348. unsigned long long old = pte_val(*p);
  349. *p = __pte((old & ~(unsigned long long)clr) | set);
  350. #endif /* !PTE_ATOMIC_UPDATES */
  351. #ifdef CONFIG_44x
  352. if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
  353. icache_44x_need_flush = 1;
  354. #endif
  355. return old;
  356. }
  357. #endif /* CONFIG_PTE_64BIT */
  358. /*
  359. * 2.6 calls this without flushing the TLB entry; this is wrong
  360. * for our hash-based implementation, we fix that up here.
  361. */
  362. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  363. static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
  364. {
  365. unsigned long old;
  366. old = pte_update(ptep, _PAGE_ACCESSED, 0);
  367. #if _PAGE_HASHPTE != 0
  368. if (old & _PAGE_HASHPTE) {
  369. unsigned long ptephys = __pa(ptep) & PAGE_MASK;
  370. flush_hash_pages(context, addr, ptephys, 1);
  371. }
  372. #endif
  373. return (old & _PAGE_ACCESSED) != 0;
  374. }
  375. #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
  376. __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
  377. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  378. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  379. pte_t *ptep)
  380. {
  381. return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
  382. }
  383. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  384. static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
  385. pte_t *ptep)
  386. {
  387. pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
  388. }
  389. static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
  390. unsigned long addr, pte_t *ptep)
  391. {
  392. ptep_set_wrprotect(mm, addr, ptep);
  393. }
  394. static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
  395. {
  396. unsigned long bits = pte_val(entry) &
  397. (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
  398. _PAGE_HWEXEC | _PAGE_EXEC);
  399. pte_update(ptep, 0, bits);
  400. }
  401. #define __HAVE_ARCH_PTE_SAME
  402. #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
  403. /*
  404. * Note that on Book E processors, the pmd contains the kernel virtual
  405. * (lowmem) address of the pte page. The physical address is less useful
  406. * because everything runs with translation enabled (even the TLB miss
  407. * handler). On everything else the pmd contains the physical address
  408. * of the pte page. -- paulus
  409. */
  410. #ifndef CONFIG_BOOKE
  411. #define pmd_page_vaddr(pmd) \
  412. ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  413. #define pmd_page(pmd) \
  414. (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
  415. #else
  416. #define pmd_page_vaddr(pmd) \
  417. ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
  418. #define pmd_page(pmd) \
  419. pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
  420. #endif
  421. /* to find an entry in a kernel page-table-directory */
  422. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  423. /* to find an entry in a page-table-directory */
  424. #define pgd_index(address) ((address) >> PGDIR_SHIFT)
  425. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  426. /* Find an entry in the third-level page table.. */
  427. #define pte_index(address) \
  428. (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  429. #define pte_offset_kernel(dir, addr) \
  430. ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
  431. #define pte_offset_map(dir, addr) \
  432. ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
  433. #define pte_offset_map_nested(dir, addr) \
  434. ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
  435. #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
  436. #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
  437. /*
  438. * Encode and decode a swap entry.
  439. * Note that the bits we use in a PTE for representing a swap entry
  440. * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
  441. *_PAGE_HASHPTE bit (if used). -- paulus
  442. */
  443. #define __swp_type(entry) ((entry).val & 0x1f)
  444. #define __swp_offset(entry) ((entry).val >> 5)
  445. #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
  446. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
  447. #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
  448. /* Encode and decode a nonlinear file mapping entry */
  449. #define PTE_FILE_MAX_BITS 29
  450. #define pte_to_pgoff(pte) (pte_val(pte) >> 3)
  451. #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
  452. /*
  453. * No page table caches to initialise
  454. */
  455. #define pgtable_cache_init() do { } while (0)
  456. extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
  457. pmd_t **pmdp);
  458. #endif /* !__ASSEMBLY__ */
  459. #endif /* _ASM_POWERPC_PGTABLE_PPC32_H */