mmu.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. #ifndef _ASM_POWERPC_MMU_H_
  2. #define _ASM_POWERPC_MMU_H_
  3. #ifndef CONFIG_PPC64
  4. #include <asm-ppc/mmu.h>
  5. #else
  6. /*
  7. * PowerPC memory management structures
  8. *
  9. * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
  10. * PPC64 rework.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #include <asm/asm-compat.h>
  18. #include <asm/page.h>
  19. /*
  20. * Segment table
  21. */
  22. #define STE_ESID_V 0x80
  23. #define STE_ESID_KS 0x20
  24. #define STE_ESID_KP 0x10
  25. #define STE_ESID_N 0x08
  26. #define STE_VSID_SHIFT 12
  27. /* Location of cpu0's segment table */
  28. #define STAB0_PAGE 0x6
  29. #define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
  30. #ifndef __ASSEMBLY__
  31. extern char initial_stab[];
  32. #endif /* ! __ASSEMBLY */
  33. /*
  34. * SLB
  35. */
  36. #define SLB_NUM_BOLTED 3
  37. #define SLB_CACHE_ENTRIES 8
  38. /* Bits in the SLB ESID word */
  39. #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
  40. /* Bits in the SLB VSID word */
  41. #define SLB_VSID_SHIFT 12
  42. #define SLB_VSID_B ASM_CONST(0xc000000000000000)
  43. #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
  44. #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
  45. #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
  46. #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
  47. #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
  48. #define SLB_VSID_L ASM_CONST(0x0000000000000100)
  49. #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
  50. #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
  51. #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
  52. #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
  53. #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
  54. #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
  55. #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
  56. #define SLB_VSID_KERNEL (SLB_VSID_KP)
  57. #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
  58. #define SLBIE_C (0x08000000)
  59. /*
  60. * Hash table
  61. */
  62. #define HPTES_PER_GROUP 8
  63. #define HPTE_V_AVPN_SHIFT 7
  64. #define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
  65. #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
  66. #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN))
  67. #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
  68. #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
  69. #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
  70. #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
  71. #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
  72. #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
  73. #define HPTE_R_TS ASM_CONST(0x4000000000000000)
  74. #define HPTE_R_RPN_SHIFT 12
  75. #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
  76. #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
  77. #define HPTE_R_PP ASM_CONST(0x0000000000000003)
  78. #define HPTE_R_N ASM_CONST(0x0000000000000004)
  79. /* Values for PP (assumes Ks=0, Kp=1) */
  80. /* pp0 will always be 0 for linux */
  81. #define PP_RWXX 0 /* Supervisor read/write, User none */
  82. #define PP_RWRX 1 /* Supervisor read/write, User read */
  83. #define PP_RWRW 2 /* Supervisor read/write, User read/write */
  84. #define PP_RXRX 3 /* Supervisor read, User read */
  85. #ifndef __ASSEMBLY__
  86. typedef struct {
  87. unsigned long v;
  88. unsigned long r;
  89. } hpte_t;
  90. extern hpte_t *htab_address;
  91. extern unsigned long htab_hash_mask;
  92. /*
  93. * Page size definition
  94. *
  95. * shift : is the "PAGE_SHIFT" value for that page size
  96. * sllp : is a bit mask with the value of SLB L || LP to be or'ed
  97. * directly to a slbmte "vsid" value
  98. * penc : is the HPTE encoding mask for the "LP" field:
  99. *
  100. */
  101. struct mmu_psize_def
  102. {
  103. unsigned int shift; /* number of bits */
  104. unsigned int penc; /* HPTE encoding */
  105. unsigned int tlbiel; /* tlbiel supported for that page size */
  106. unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
  107. unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
  108. };
  109. #endif /* __ASSEMBLY__ */
  110. /*
  111. * The kernel use the constants below to index in the page sizes array.
  112. * The use of fixed constants for this purpose is better for performances
  113. * of the low level hash refill handlers.
  114. *
  115. * A non supported page size has a "shift" field set to 0
  116. *
  117. * Any new page size being implemented can get a new entry in here. Whether
  118. * the kernel will use it or not is a different matter though. The actual page
  119. * size used by hugetlbfs is not defined here and may be made variable
  120. */
  121. #define MMU_PAGE_4K 0 /* 4K */
  122. #define MMU_PAGE_64K 1 /* 64K */
  123. #define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
  124. #define MMU_PAGE_1M 3 /* 1M */
  125. #define MMU_PAGE_16M 4 /* 16M */
  126. #define MMU_PAGE_16G 5 /* 16G */
  127. #define MMU_PAGE_COUNT 6
  128. #ifndef __ASSEMBLY__
  129. /*
  130. * The current system page sizes
  131. */
  132. extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  133. extern int mmu_linear_psize;
  134. extern int mmu_virtual_psize;
  135. #ifdef CONFIG_HUGETLB_PAGE
  136. /*
  137. * The page size index of the huge pages for use by hugetlbfs
  138. */
  139. extern int mmu_huge_psize;
  140. #endif /* CONFIG_HUGETLB_PAGE */
  141. /*
  142. * This function sets the AVPN and L fields of the HPTE appropriately
  143. * for the page size
  144. */
  145. static inline unsigned long hpte_encode_v(unsigned long va, int psize)
  146. {
  147. unsigned long v =
  148. v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
  149. v <<= HPTE_V_AVPN_SHIFT;
  150. if (psize != MMU_PAGE_4K)
  151. v |= HPTE_V_LARGE;
  152. return v;
  153. }
  154. /*
  155. * This function sets the ARPN, and LP fields of the HPTE appropriately
  156. * for the page size. We assume the pa is already "clean" that is properly
  157. * aligned for the requested page size
  158. */
  159. static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
  160. {
  161. unsigned long r;
  162. /* A 4K page needs no special encoding */
  163. if (psize == MMU_PAGE_4K)
  164. return pa & HPTE_R_RPN;
  165. else {
  166. unsigned int penc = mmu_psize_defs[psize].penc;
  167. unsigned int shift = mmu_psize_defs[psize].shift;
  168. return (pa & ~((1ul << shift) - 1)) | (penc << 12);
  169. }
  170. return r;
  171. }
  172. /*
  173. * This hashes a virtual address for a 256Mb segment only for now
  174. */
  175. static inline unsigned long hpt_hash(unsigned long va, unsigned int shift)
  176. {
  177. return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift);
  178. }
  179. extern int __hash_page_4K(unsigned long ea, unsigned long access,
  180. unsigned long vsid, pte_t *ptep, unsigned long trap,
  181. unsigned int local);
  182. extern int __hash_page_64K(unsigned long ea, unsigned long access,
  183. unsigned long vsid, pte_t *ptep, unsigned long trap,
  184. unsigned int local);
  185. struct mm_struct;
  186. extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
  187. unsigned long ea, unsigned long vsid, int local,
  188. unsigned long trap);
  189. extern void htab_finish_init(void);
  190. extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
  191. unsigned long pstart, unsigned long mode,
  192. int psize);
  193. extern void htab_initialize(void);
  194. extern void htab_initialize_secondary(void);
  195. extern void hpte_init_native(void);
  196. extern void hpte_init_lpar(void);
  197. extern void hpte_init_iSeries(void);
  198. extern void mm_init_ppc64(void);
  199. extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
  200. unsigned long va, unsigned long prpn,
  201. unsigned long rflags,
  202. unsigned long vflags, int psize);
  203. extern long native_hpte_insert(unsigned long hpte_group,
  204. unsigned long va, unsigned long prpn,
  205. unsigned long rflags,
  206. unsigned long vflags, int psize);
  207. extern long iSeries_hpte_insert(unsigned long hpte_group,
  208. unsigned long va, unsigned long prpn,
  209. unsigned long rflags,
  210. unsigned long vflags, int psize);
  211. extern void stabs_alloc(void);
  212. extern void slb_initialize(void);
  213. extern void stab_initialize(unsigned long stab);
  214. #endif /* __ASSEMBLY__ */
  215. /*
  216. * VSID allocation
  217. *
  218. * We first generate a 36-bit "proto-VSID". For kernel addresses this
  219. * is equal to the ESID, for user addresses it is:
  220. * (context << 15) | (esid & 0x7fff)
  221. *
  222. * The two forms are distinguishable because the top bit is 0 for user
  223. * addresses, whereas the top two bits are 1 for kernel addresses.
  224. * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
  225. * now.
  226. *
  227. * The proto-VSIDs are then scrambled into real VSIDs with the
  228. * multiplicative hash:
  229. *
  230. * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
  231. * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
  232. * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
  233. *
  234. * This scramble is only well defined for proto-VSIDs below
  235. * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
  236. * reserved. VSID_MULTIPLIER is prime, so in particular it is
  237. * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
  238. * Because the modulus is 2^n-1 we can compute it efficiently without
  239. * a divide or extra multiply (see below).
  240. *
  241. * This scheme has several advantages over older methods:
  242. *
  243. * - We have VSIDs allocated for every kernel address
  244. * (i.e. everything above 0xC000000000000000), except the very top
  245. * segment, which simplifies several things.
  246. *
  247. * - We allow for 15 significant bits of ESID and 20 bits of
  248. * context for user addresses. i.e. 8T (43 bits) of address space for
  249. * up to 1M contexts (although the page table structure and context
  250. * allocation will need changes to take advantage of this).
  251. *
  252. * - The scramble function gives robust scattering in the hash
  253. * table (at least based on some initial results). The previous
  254. * method was more susceptible to pathological cases giving excessive
  255. * hash collisions.
  256. */
  257. /*
  258. * WARNING - If you change these you must make sure the asm
  259. * implementations in slb_allocate (slb_low.S), do_stab_bolted
  260. * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
  261. *
  262. * You'll also need to change the precomputed VSID values in head.S
  263. * which are used by the iSeries firmware.
  264. */
  265. #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
  266. #define VSID_BITS 36
  267. #define VSID_MODULUS ((1UL<<VSID_BITS)-1)
  268. #define CONTEXT_BITS 19
  269. #define USER_ESID_BITS 16
  270. #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
  271. /*
  272. * This macro generates asm code to compute the VSID scramble
  273. * function. Used in slb_allocate() and do_stab_bolted. The function
  274. * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
  275. *
  276. * rt = register continaing the proto-VSID and into which the
  277. * VSID will be stored
  278. * rx = scratch register (clobbered)
  279. *
  280. * - rt and rx must be different registers
  281. * - The answer will end up in the low 36 bits of rt. The higher
  282. * bits may contain other garbage, so you may need to mask the
  283. * result.
  284. */
  285. #define ASM_VSID_SCRAMBLE(rt, rx) \
  286. lis rx,VSID_MULTIPLIER@h; \
  287. ori rx,rx,VSID_MULTIPLIER@l; \
  288. mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
  289. \
  290. srdi rx,rt,VSID_BITS; \
  291. clrldi rt,rt,(64-VSID_BITS); \
  292. add rt,rt,rx; /* add high and low bits */ \
  293. /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
  294. * 2^36-1+2^28-1. That in particular means that if r3 >= \
  295. * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
  296. * the bit clear, r3 already has the answer we want, if it \
  297. * doesn't, the answer is the low 36 bits of r3+1. So in all \
  298. * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
  299. addi rx,rt,1; \
  300. srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
  301. add rt,rt,rx
  302. #ifndef __ASSEMBLY__
  303. typedef unsigned long mm_context_id_t;
  304. typedef struct {
  305. mm_context_id_t id;
  306. #ifdef CONFIG_HUGETLB_PAGE
  307. u16 low_htlb_areas, high_htlb_areas;
  308. #endif
  309. } mm_context_t;
  310. static inline unsigned long vsid_scramble(unsigned long protovsid)
  311. {
  312. #if 0
  313. /* The code below is equivalent to this function for arguments
  314. * < 2^VSID_BITS, which is all this should ever be called
  315. * with. However gcc is not clever enough to compute the
  316. * modulus (2^n-1) without a second multiply. */
  317. return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
  318. #else /* 1 */
  319. unsigned long x;
  320. x = protovsid * VSID_MULTIPLIER;
  321. x = (x >> VSID_BITS) + (x & VSID_MODULUS);
  322. return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
  323. #endif /* 1 */
  324. }
  325. /* This is only valid for addresses >= KERNELBASE */
  326. static inline unsigned long get_kernel_vsid(unsigned long ea)
  327. {
  328. return vsid_scramble(ea >> SID_SHIFT);
  329. }
  330. /* This is only valid for user addresses (which are below 2^41) */
  331. static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
  332. {
  333. return vsid_scramble((context << USER_ESID_BITS)
  334. | (ea >> SID_SHIFT));
  335. }
  336. #define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
  337. #define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
  338. #endif /* __ASSEMBLY */
  339. #endif /* CONFIG_PPC64 */
  340. #endif /* _ASM_POWERPC_MMU_H_ */