mmu.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * PowerPC memory management structures
  3. *
  4. * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
  5. * PPC64 rework.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #ifndef _PPC64_MMU_H_
  13. #define _PPC64_MMU_H_
  14. #include <linux/config.h>
  15. #include <asm/ppc_asm.h> /* for ASM_CONST */
  16. #include <asm/page.h>
  17. /*
  18. * Segment table
  19. */
  20. #define STE_ESID_V 0x80
  21. #define STE_ESID_KS 0x20
  22. #define STE_ESID_KP 0x10
  23. #define STE_ESID_N 0x08
  24. #define STE_VSID_SHIFT 12
  25. /* Location of cpu0's segment table */
  26. #define STAB0_PAGE 0x6
  27. #define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
  28. #ifndef __ASSEMBLY__
  29. extern char initial_stab[];
  30. #endif /* ! __ASSEMBLY */
  31. /*
  32. * SLB
  33. */
  34. #define SLB_NUM_BOLTED 3
  35. #define SLB_CACHE_ENTRIES 8
  36. /* Bits in the SLB ESID word */
  37. #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
  38. /* Bits in the SLB VSID word */
  39. #define SLB_VSID_SHIFT 12
  40. #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
  41. #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
  42. #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
  43. #define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage */
  44. #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
  45. #define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */
  46. #define SLB_VSID_KERNEL (SLB_VSID_KP)
  47. #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
  48. #define SLBIE_C (0x08000000)
  49. /*
  50. * Hash table
  51. */
  52. #define HPTES_PER_GROUP 8
  53. #define HPTE_V_AVPN_SHIFT 7
  54. #define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
  55. #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
  56. #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
  57. #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
  58. #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
  59. #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
  60. #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
  61. #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
  62. #define HPTE_R_TS ASM_CONST(0x4000000000000000)
  63. #define HPTE_R_RPN_SHIFT 12
  64. #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
  65. #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
  66. #define HPTE_R_PP ASM_CONST(0x0000000000000003)
  67. /* Values for PP (assumes Ks=0, Kp=1) */
  68. /* pp0 will always be 0 for linux */
  69. #define PP_RWXX 0 /* Supervisor read/write, User none */
  70. #define PP_RWRX 1 /* Supervisor read/write, User read */
  71. #define PP_RWRW 2 /* Supervisor read/write, User read/write */
  72. #define PP_RXRX 3 /* Supervisor read, User read */
  73. #ifndef __ASSEMBLY__
  74. typedef struct {
  75. unsigned long v;
  76. unsigned long r;
  77. } hpte_t;
  78. extern hpte_t *htab_address;
  79. extern unsigned long htab_hash_mask;
  80. static inline unsigned long hpt_hash(unsigned long vpn, int large)
  81. {
  82. unsigned long vsid;
  83. unsigned long page;
  84. if (large) {
  85. vsid = vpn >> 4;
  86. page = vpn & 0xf;
  87. } else {
  88. vsid = vpn >> 16;
  89. page = vpn & 0xffff;
  90. }
  91. return (vsid & 0x7fffffffffUL) ^ page;
  92. }
  93. static inline void __tlbie(unsigned long va, int large)
  94. {
  95. /* clear top 16 bits, non SLS segment */
  96. va &= ~(0xffffULL << 48);
  97. if (large) {
  98. va &= HPAGE_MASK;
  99. asm volatile("tlbie %0,1" : : "r"(va) : "memory");
  100. } else {
  101. va &= PAGE_MASK;
  102. asm volatile("tlbie %0,0" : : "r"(va) : "memory");
  103. }
  104. }
  105. static inline void tlbie(unsigned long va, int large)
  106. {
  107. asm volatile("ptesync": : :"memory");
  108. __tlbie(va, large);
  109. asm volatile("eieio; tlbsync; ptesync": : :"memory");
  110. }
  111. static inline void __tlbiel(unsigned long va)
  112. {
  113. /* clear top 16 bits, non SLS segment */
  114. va &= ~(0xffffULL << 48);
  115. va &= PAGE_MASK;
  116. /*
  117. * Thanks to Alan Modra we are now able to use machine specific
  118. * assembly instructions (like tlbiel) by using the gas -many flag.
  119. * However we have to support older toolchains so for the moment
  120. * we hardwire it.
  121. */
  122. #if 0
  123. asm volatile("tlbiel %0" : : "r"(va) : "memory");
  124. #else
  125. asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
  126. #endif
  127. }
  128. static inline void tlbiel(unsigned long va)
  129. {
  130. asm volatile("ptesync": : :"memory");
  131. __tlbiel(va);
  132. asm volatile("ptesync": : :"memory");
  133. }
  134. static inline unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
  135. {
  136. unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
  137. unsigned long va;
  138. va = avpn << 23;
  139. if (! (hpte_v & HPTE_V_LARGE)) {
  140. unsigned long vpi, pteg;
  141. pteg = slot / HPTES_PER_GROUP;
  142. if (hpte_v & HPTE_V_SECONDARY)
  143. pteg = ~pteg;
  144. vpi = ((va >> 28) ^ pteg) & htab_hash_mask;
  145. va |= vpi << PAGE_SHIFT;
  146. }
  147. return va;
  148. }
  149. /*
  150. * Handle a fault by adding an HPTE. If the address can't be determined
  151. * to be valid via Linux page tables, return 1. If handled return 0
  152. */
  153. extern int __hash_page(unsigned long ea, unsigned long access,
  154. unsigned long vsid, pte_t *ptep, unsigned long trap,
  155. int local);
  156. extern void htab_finish_init(void);
  157. extern void hpte_init_native(void);
  158. extern void hpte_init_lpar(void);
  159. extern void hpte_init_iSeries(void);
  160. extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
  161. unsigned long va, unsigned long prpn,
  162. unsigned long vflags,
  163. unsigned long rflags);
  164. extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
  165. unsigned long prpn,
  166. unsigned long vflags, unsigned long rflags);
  167. extern void stabs_alloc(void);
  168. #endif /* __ASSEMBLY__ */
  169. /*
  170. * VSID allocation
  171. *
  172. * We first generate a 36-bit "proto-VSID". For kernel addresses this
  173. * is equal to the ESID, for user addresses it is:
  174. * (context << 15) | (esid & 0x7fff)
  175. *
  176. * The two forms are distinguishable because the top bit is 0 for user
  177. * addresses, whereas the top two bits are 1 for kernel addresses.
  178. * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
  179. * now.
  180. *
  181. * The proto-VSIDs are then scrambled into real VSIDs with the
  182. * multiplicative hash:
  183. *
  184. * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
  185. * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
  186. * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
  187. *
  188. * This scramble is only well defined for proto-VSIDs below
  189. * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
  190. * reserved. VSID_MULTIPLIER is prime, so in particular it is
  191. * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
  192. * Because the modulus is 2^n-1 we can compute it efficiently without
  193. * a divide or extra multiply (see below).
  194. *
  195. * This scheme has several advantages over older methods:
  196. *
  197. * - We have VSIDs allocated for every kernel address
  198. * (i.e. everything above 0xC000000000000000), except the very top
  199. * segment, which simplifies several things.
  200. *
  201. * - We allow for 15 significant bits of ESID and 20 bits of
  202. * context for user addresses. i.e. 8T (43 bits) of address space for
  203. * up to 1M contexts (although the page table structure and context
  204. * allocation will need changes to take advantage of this).
  205. *
  206. * - The scramble function gives robust scattering in the hash
  207. * table (at least based on some initial results). The previous
  208. * method was more susceptible to pathological cases giving excessive
  209. * hash collisions.
  210. */
  211. /*
  212. * WARNING - If you change these you must make sure the asm
  213. * implementations in slb_allocate (slb_low.S), do_stab_bolted
  214. * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
  215. *
  216. * You'll also need to change the precomputed VSID values in head.S
  217. * which are used by the iSeries firmware.
  218. */
  219. #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
  220. #define VSID_BITS 36
  221. #define VSID_MODULUS ((1UL<<VSID_BITS)-1)
  222. #define CONTEXT_BITS 19
  223. #define USER_ESID_BITS 16
  224. #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
  225. /*
  226. * This macro generates asm code to compute the VSID scramble
  227. * function. Used in slb_allocate() and do_stab_bolted. The function
  228. * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
  229. *
  230. * rt = register continaing the proto-VSID and into which the
  231. * VSID will be stored
  232. * rx = scratch register (clobbered)
  233. *
  234. * - rt and rx must be different registers
  235. * - The answer will end up in the low 36 bits of rt. The higher
  236. * bits may contain other garbage, so you may need to mask the
  237. * result.
  238. */
  239. #define ASM_VSID_SCRAMBLE(rt, rx) \
  240. lis rx,VSID_MULTIPLIER@h; \
  241. ori rx,rx,VSID_MULTIPLIER@l; \
  242. mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
  243. \
  244. srdi rx,rt,VSID_BITS; \
  245. clrldi rt,rt,(64-VSID_BITS); \
  246. add rt,rt,rx; /* add high and low bits */ \
  247. /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
  248. * 2^36-1+2^28-1. That in particular means that if r3 >= \
  249. * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
  250. * the bit clear, r3 already has the answer we want, if it \
  251. * doesn't, the answer is the low 36 bits of r3+1. So in all \
  252. * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
  253. addi rx,rt,1; \
  254. srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
  255. add rt,rt,rx
  256. #ifndef __ASSEMBLY__
  257. typedef unsigned long mm_context_id_t;
  258. typedef struct {
  259. mm_context_id_t id;
  260. #ifdef CONFIG_HUGETLB_PAGE
  261. u16 low_htlb_areas, high_htlb_areas;
  262. #endif
  263. } mm_context_t;
  264. static inline unsigned long vsid_scramble(unsigned long protovsid)
  265. {
  266. #if 0
  267. /* The code below is equivalent to this function for arguments
  268. * < 2^VSID_BITS, which is all this should ever be called
  269. * with. However gcc is not clever enough to compute the
  270. * modulus (2^n-1) without a second multiply. */
  271. return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
  272. #else /* 1 */
  273. unsigned long x;
  274. x = protovsid * VSID_MULTIPLIER;
  275. x = (x >> VSID_BITS) + (x & VSID_MODULUS);
  276. return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
  277. #endif /* 1 */
  278. }
  279. /* This is only valid for addresses >= KERNELBASE */
  280. static inline unsigned long get_kernel_vsid(unsigned long ea)
  281. {
  282. return vsid_scramble(ea >> SID_SHIFT);
  283. }
  284. /* This is only valid for user addresses (which are below 2^41) */
  285. static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
  286. {
  287. return vsid_scramble((context << USER_ESID_BITS)
  288. | (ea >> SID_SHIFT));
  289. }
  290. #define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
  291. #define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
  292. #endif /* __ASSEMBLY */
  293. #endif /* _PPC64_MMU_H_ */