page.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. #ifndef _ASM_POWERPC_PAGE_H
  2. #define _ASM_POWERPC_PAGE_H
  3. /*
  4. * Copyright (C) 2001,2005 IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #ifndef __ASSEMBLY__
  12. #include <linux/types.h>
  13. #else
  14. #include <asm/types.h>
  15. #endif
  16. #include <asm/asm-compat.h>
  17. #include <asm/kdump.h>
  18. /*
  19. * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
  20. * on PPC44x). For PPC64 we support either 4K or 64K software
  21. * page size. When using 64K pages however, whether we are really supporting
  22. * 64K pages in HW or not is irrelevant to those definitions.
  23. */
  24. #if defined(CONFIG_PPC_256K_PAGES)
  25. #define PAGE_SHIFT 18
  26. #elif defined(CONFIG_PPC_64K_PAGES)
  27. #define PAGE_SHIFT 16
  28. #elif defined(CONFIG_PPC_16K_PAGES)
  29. #define PAGE_SHIFT 14
  30. #else
  31. #define PAGE_SHIFT 12
  32. #endif
  33. #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
  34. #ifndef __ASSEMBLY__
  35. #ifdef CONFIG_HUGETLB_PAGE
  36. extern unsigned int HPAGE_SHIFT;
  37. #else
  38. #define HPAGE_SHIFT PAGE_SHIFT
  39. #endif
  40. #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
  41. #define HPAGE_MASK (~(HPAGE_SIZE - 1))
  42. #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  43. #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
  44. #endif
  45. /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
  46. #define __HAVE_ARCH_GATE_AREA 1
  47. /*
  48. * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
  49. * assign PAGE_MASK to a larger type it gets extended the way we want
  50. * (i.e. with 1s in the high bits)
  51. */
  52. #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
  53. /*
  54. * KERNELBASE is the virtual address of the start of the kernel, it's often
  55. * the same as PAGE_OFFSET, but _might not be_.
  56. *
  57. * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
  58. *
  59. * PAGE_OFFSET is the virtual address of the start of lowmem.
  60. *
  61. * PHYSICAL_START is the physical address of the start of the kernel.
  62. *
  63. * MEMORY_START is the physical address of the start of lowmem.
  64. *
  65. * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
  66. * ppc32 and based on how they are set we determine MEMORY_START.
  67. *
  68. * For the linear mapping the following equation should be true:
  69. * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
  70. *
  71. * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
  72. *
  73. * There are two was to determine a physical address from a virtual one:
  74. * va = pa + PAGE_OFFSET - MEMORY_START
  75. * va = pa + KERNELBASE - PHYSICAL_START
  76. *
  77. * If you want to know something's offset from the start of the kernel you
  78. * should subtract KERNELBASE.
  79. *
  80. * If you want to test if something's a kernel address, use is_kernel_addr().
  81. */
  82. #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
  83. #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
  84. #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
  85. #if defined(CONFIG_NONSTATIC_KERNEL)
  86. #ifndef __ASSEMBLY__
  87. extern phys_addr_t memstart_addr;
  88. extern phys_addr_t kernstart_addr;
  89. #ifdef CONFIG_RELOCATABLE_PPC32
  90. extern long long virt_phys_offset;
  91. #endif
  92. #endif /* __ASSEMBLY__ */
  93. #define PHYSICAL_START kernstart_addr
  94. #else /* !CONFIG_NONSTATIC_KERNEL */
  95. #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
  96. #endif
  97. /* See Description below for VIRT_PHYS_OFFSET */
  98. #ifdef CONFIG_RELOCATABLE_PPC32
  99. #define VIRT_PHYS_OFFSET virt_phys_offset
  100. #else
  101. #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
  102. #endif
  103. #ifdef CONFIG_PPC64
  104. #define MEMORY_START 0UL
  105. #elif defined(CONFIG_NONSTATIC_KERNEL)
  106. #define MEMORY_START memstart_addr
  107. #else
  108. #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
  109. #endif
  110. #ifdef CONFIG_FLATMEM
  111. #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
  112. #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
  113. #endif
  114. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  115. #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
  116. #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  117. /*
  118. * On Book-E parts we need __va to parse the device tree and we can't
  119. * determine MEMORY_START until then. However we can determine PHYSICAL_START
  120. * from information at hand (program counter, TLB lookup).
  121. *
  122. * On BookE with RELOCATABLE (RELOCATABLE_PPC32)
  123. *
  124. * With RELOCATABLE_PPC32, we support loading the kernel at any physical
  125. * address without any restriction on the page alignment.
  126. *
  127. * We find the runtime address of _stext and relocate ourselves based on
  128. * the following calculation:
  129. *
  130. * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
  131. * MODULO(_stext.run,256M)
  132. * and create the following mapping:
  133. *
  134. * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
  135. *
  136. * When we process relocations, we cannot depend on the
  137. * existing equation for the __va()/__pa() translations:
  138. *
  139. * __va(x) = (x) - PHYSICAL_START + KERNELBASE
  140. *
  141. * Where:
  142. * PHYSICAL_START = kernstart_addr = Physical address of _stext
  143. * KERNELBASE = Compiled virtual address of _stext.
  144. *
  145. * This formula holds true iff, kernel load address is TLB page aligned.
  146. *
  147. * In our case, we need to also account for the shift in the kernel Virtual
  148. * address.
  149. *
  150. * E.g.,
  151. *
  152. * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
  153. * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
  154. *
  155. * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
  156. * = 0xbc100000 , which is wrong.
  157. *
  158. * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
  159. * according to our mapping.
  160. *
  161. * Hence we use the following formula to get the translations right:
  162. *
  163. * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
  164. *
  165. * Where :
  166. * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
  167. * Effective KERNELBASE = virtual_base =
  168. * = ALIGN_DOWN(KERNELBASE,256M) +
  169. * MODULO(PHYSICAL_START,256M)
  170. *
  171. * To make the cost of __va() / __pa() more light weight, we introduce
  172. * a new variable virt_phys_offset, which will hold :
  173. *
  174. * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
  175. * = ALIGN_DOWN(KERNELBASE,256M) -
  176. * ALIGN_DOWN(PHYSICALSTART,256M)
  177. *
  178. * Hence :
  179. *
  180. * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
  181. * = x + virt_phys_offset
  182. *
  183. * and
  184. * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
  185. * = x - virt_phys_offset
  186. *
  187. * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
  188. * the other definitions for __va & __pa.
  189. */
  190. #ifdef CONFIG_BOOKE
  191. #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
  192. #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
  193. #else
  194. #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
  195. #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
  196. #endif
  197. /*
  198. * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
  199. * and needs to be executable. This means the whole heap ends
  200. * up being executable.
  201. */
  202. #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
  203. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  204. #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
  205. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  206. #ifdef __powerpc64__
  207. #include <asm/page_64.h>
  208. #else
  209. #include <asm/page_32.h>
  210. #endif
  211. /* align addr on a size boundary - adjust address up/down if needed */
  212. #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
  213. #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
  214. /* align addr on a size boundary - adjust address up if needed */
  215. #define _ALIGN(addr,size) _ALIGN_UP(addr,size)
  216. /*
  217. * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
  218. * "kernelness", use is_kernel_addr() - it should do what you want.
  219. */
  220. #ifdef CONFIG_PPC_BOOK3E_64
  221. #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
  222. #else
  223. #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
  224. #endif
  225. /*
  226. * Use the top bit of the higher-level page table entries to indicate whether
  227. * the entries we point to contain hugepages. This works because we know that
  228. * the page tables live in kernel space. If we ever decide to support having
  229. * page tables at arbitrary addresses, this breaks and will have to change.
  230. */
  231. #ifdef CONFIG_PPC64
  232. #define PD_HUGE 0x8000000000000000
  233. #else
  234. #define PD_HUGE 0x80000000
  235. #endif
  236. /*
  237. * Some number of bits at the level of the page table that points to
  238. * a hugepte are used to encode the size. This masks those bits.
  239. */
  240. #define HUGEPD_SHIFT_MASK 0x3f
  241. #ifndef __ASSEMBLY__
  242. #undef STRICT_MM_TYPECHECKS
  243. #ifdef STRICT_MM_TYPECHECKS
  244. /* These are used to make use of C type-checking. */
  245. /* PTE level */
  246. typedef struct { pte_basic_t pte; } pte_t;
  247. #define pte_val(x) ((x).pte)
  248. #define __pte(x) ((pte_t) { (x) })
  249. /* 64k pages additionally define a bigger "real PTE" type that gathers
  250. * the "second half" part of the PTE for pseudo 64k pages
  251. */
  252. #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
  253. typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
  254. #else
  255. typedef struct { pte_t pte; } real_pte_t;
  256. #endif
  257. /* PMD level */
  258. #ifdef CONFIG_PPC64
  259. typedef struct { unsigned long pmd; } pmd_t;
  260. #define pmd_val(x) ((x).pmd)
  261. #define __pmd(x) ((pmd_t) { (x) })
  262. /* PUD level exusts only on 4k pages */
  263. #ifndef CONFIG_PPC_64K_PAGES
  264. typedef struct { unsigned long pud; } pud_t;
  265. #define pud_val(x) ((x).pud)
  266. #define __pud(x) ((pud_t) { (x) })
  267. #endif /* !CONFIG_PPC_64K_PAGES */
  268. #endif /* CONFIG_PPC64 */
  269. /* PGD level */
  270. typedef struct { unsigned long pgd; } pgd_t;
  271. #define pgd_val(x) ((x).pgd)
  272. #define __pgd(x) ((pgd_t) { (x) })
  273. /* Page protection bits */
  274. typedef struct { unsigned long pgprot; } pgprot_t;
  275. #define pgprot_val(x) ((x).pgprot)
  276. #define __pgprot(x) ((pgprot_t) { (x) })
  277. #else
  278. /*
  279. * .. while these make it easier on the compiler
  280. */
  281. typedef pte_basic_t pte_t;
  282. #define pte_val(x) (x)
  283. #define __pte(x) (x)
  284. #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
  285. typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
  286. #else
  287. typedef pte_t real_pte_t;
  288. #endif
  289. #ifdef CONFIG_PPC64
  290. typedef unsigned long pmd_t;
  291. #define pmd_val(x) (x)
  292. #define __pmd(x) (x)
  293. #ifndef CONFIG_PPC_64K_PAGES
  294. typedef unsigned long pud_t;
  295. #define pud_val(x) (x)
  296. #define __pud(x) (x)
  297. #endif /* !CONFIG_PPC_64K_PAGES */
  298. #endif /* CONFIG_PPC64 */
  299. typedef unsigned long pgd_t;
  300. #define pgd_val(x) (x)
  301. #define pgprot_val(x) (x)
  302. typedef unsigned long pgprot_t;
  303. #define __pgd(x) (x)
  304. #define __pgprot(x) (x)
  305. #endif
  306. typedef struct { signed long pd; } hugepd_t;
  307. #ifdef CONFIG_HUGETLB_PAGE
  308. static inline int hugepd_ok(hugepd_t hpd)
  309. {
  310. return (hpd.pd > 0);
  311. }
  312. #define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep))))
  313. #else /* CONFIG_HUGETLB_PAGE */
  314. #define is_hugepd(pdep) 0
  315. #endif /* CONFIG_HUGETLB_PAGE */
  316. struct page;
  317. extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
  318. extern void copy_user_page(void *to, void *from, unsigned long vaddr,
  319. struct page *p);
  320. extern int page_is_ram(unsigned long pfn);
  321. extern int devmem_is_allowed(unsigned long pfn);
  322. #ifdef CONFIG_PPC_SMLPAR
  323. void arch_free_page(struct page *page, int order);
  324. #define HAVE_ARCH_FREE_PAGE
  325. #endif
  326. struct vm_area_struct;
  327. typedef struct page *pgtable_t;
  328. #include <asm-generic/memory_model.h>
  329. #endif /* __ASSEMBLY__ */
  330. #endif /* _ASM_POWERPC_PAGE_H */