mmu-hash64.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. #ifndef _ASM_POWERPC_MMU_HASH64_H_
  2. #define _ASM_POWERPC_MMU_HASH64_H_
  3. /*
  4. * PowerPC64 memory management structures
  5. *
  6. * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
  7. * PPC64 rework.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <asm/asm-compat.h>
  15. #include <asm/page.h>
  16. /*
  17. * Segment table
  18. */
  19. #define STE_ESID_V 0x80
  20. #define STE_ESID_KS 0x20
  21. #define STE_ESID_KP 0x10
  22. #define STE_ESID_N 0x08
  23. #define STE_VSID_SHIFT 12
  24. /* Location of cpu0's segment table */
  25. #define STAB0_PAGE 0x6
  26. #define STAB0_OFFSET (STAB0_PAGE << 12)
  27. #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
  28. #ifndef __ASSEMBLY__
  29. extern char initial_stab[];
  30. #endif /* ! __ASSEMBLY */
  31. /*
  32. * SLB
  33. */
  34. #define SLB_NUM_BOLTED 3
  35. #define SLB_CACHE_ENTRIES 8
  36. #define SLB_MIN_SIZE 32
  37. /* Bits in the SLB ESID word */
  38. #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
  39. /* Bits in the SLB VSID word */
  40. #define SLB_VSID_SHIFT 12
  41. #define SLB_VSID_SHIFT_1T 24
  42. #define SLB_VSID_SSIZE_SHIFT 62
  43. #define SLB_VSID_B ASM_CONST(0xc000000000000000)
  44. #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
  45. #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
  46. #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
  47. #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
  48. #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
  49. #define SLB_VSID_L ASM_CONST(0x0000000000000100)
  50. #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
  51. #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
  52. #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
  53. #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
  54. #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
  55. #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
  56. #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
  57. #define SLB_VSID_KERNEL (SLB_VSID_KP)
  58. #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
  59. #define SLBIE_C (0x08000000)
  60. #define SLBIE_SSIZE_SHIFT 25
  61. /*
  62. * Hash table
  63. */
  64. #define HPTES_PER_GROUP 8
  65. #define HPTE_V_SSIZE_SHIFT 62
  66. #define HPTE_V_AVPN_SHIFT 7
  67. #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
  68. #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
  69. #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
  70. #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
  71. #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
  72. #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
  73. #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
  74. #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
  75. #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
  76. #define HPTE_R_TS ASM_CONST(0x4000000000000000)
  77. #define HPTE_R_RPN_SHIFT 12
  78. #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
  79. #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
  80. #define HPTE_R_PP ASM_CONST(0x0000000000000003)
  81. #define HPTE_R_N ASM_CONST(0x0000000000000004)
  82. #define HPTE_R_C ASM_CONST(0x0000000000000080)
  83. #define HPTE_R_R ASM_CONST(0x0000000000000100)
  84. #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
  85. #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
  86. /* Values for PP (assumes Ks=0, Kp=1) */
  87. /* pp0 will always be 0 for linux */
  88. #define PP_RWXX 0 /* Supervisor read/write, User none */
  89. #define PP_RWRX 1 /* Supervisor read/write, User read */
  90. #define PP_RWRW 2 /* Supervisor read/write, User read/write */
  91. #define PP_RXRX 3 /* Supervisor read, User read */
  92. #ifndef __ASSEMBLY__
  93. struct hash_pte {
  94. unsigned long v;
  95. unsigned long r;
  96. };
  97. extern struct hash_pte *htab_address;
  98. extern unsigned long htab_size_bytes;
  99. extern unsigned long htab_hash_mask;
  100. /*
  101. * Page size definition
  102. *
  103. * shift : is the "PAGE_SHIFT" value for that page size
  104. * sllp : is a bit mask with the value of SLB L || LP to be or'ed
  105. * directly to a slbmte "vsid" value
  106. * penc : is the HPTE encoding mask for the "LP" field:
  107. *
  108. */
  109. struct mmu_psize_def
  110. {
  111. unsigned int shift; /* number of bits */
  112. unsigned int penc; /* HPTE encoding */
  113. unsigned int tlbiel; /* tlbiel supported for that page size */
  114. unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
  115. unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
  116. };
  117. #endif /* __ASSEMBLY__ */
  118. /*
  119. * Segment sizes.
  120. * These are the values used by hardware in the B field of
  121. * SLB entries and the first dword of MMU hashtable entries.
  122. * The B field is 2 bits; the values 2 and 3 are unused and reserved.
  123. */
  124. #define MMU_SEGSIZE_256M 0
  125. #define MMU_SEGSIZE_1T 1
  126. #ifndef __ASSEMBLY__
  127. /*
  128. * The current system page and segment sizes
  129. */
  130. extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  131. extern int mmu_linear_psize;
  132. extern int mmu_virtual_psize;
  133. extern int mmu_vmalloc_psize;
  134. extern int mmu_vmemmap_psize;
  135. extern int mmu_io_psize;
  136. extern int mmu_kernel_ssize;
  137. extern int mmu_highuser_ssize;
  138. extern u16 mmu_slb_size;
  139. extern unsigned long tce_alloc_start, tce_alloc_end;
  140. /*
  141. * If the processor supports 64k normal pages but not 64k cache
  142. * inhibited pages, we have to be prepared to switch processes
  143. * to use 4k pages when they create cache-inhibited mappings.
  144. * If this is the case, mmu_ci_restrictions will be set to 1.
  145. */
  146. extern int mmu_ci_restrictions;
  147. /*
  148. * This function sets the AVPN and L fields of the HPTE appropriately
  149. * for the page size
  150. */
  151. static inline unsigned long hpte_encode_v(unsigned long va, int psize,
  152. int ssize)
  153. {
  154. unsigned long v;
  155. v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
  156. v <<= HPTE_V_AVPN_SHIFT;
  157. if (psize != MMU_PAGE_4K)
  158. v |= HPTE_V_LARGE;
  159. v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
  160. return v;
  161. }
  162. /*
  163. * This function sets the ARPN, and LP fields of the HPTE appropriately
  164. * for the page size. We assume the pa is already "clean" that is properly
  165. * aligned for the requested page size
  166. */
  167. static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
  168. {
  169. unsigned long r;
  170. /* A 4K page needs no special encoding */
  171. if (psize == MMU_PAGE_4K)
  172. return pa & HPTE_R_RPN;
  173. else {
  174. unsigned int penc = mmu_psize_defs[psize].penc;
  175. unsigned int shift = mmu_psize_defs[psize].shift;
  176. return (pa & ~((1ul << shift) - 1)) | (penc << 12);
  177. }
  178. return r;
  179. }
  180. /*
  181. * Build a VA given VSID, EA and segment size
  182. */
  183. static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
  184. int ssize)
  185. {
  186. if (ssize == MMU_SEGSIZE_256M)
  187. return (vsid << 28) | (ea & 0xfffffffUL);
  188. return (vsid << 40) | (ea & 0xffffffffffUL);
  189. }
  190. /*
  191. * This hashes a virtual address
  192. */
  193. static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
  194. int ssize)
  195. {
  196. unsigned long hash, vsid;
  197. if (ssize == MMU_SEGSIZE_256M) {
  198. hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
  199. } else {
  200. vsid = va >> 40;
  201. hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
  202. }
  203. return hash & 0x7fffffffffUL;
  204. }
  205. extern int __hash_page_4K(unsigned long ea, unsigned long access,
  206. unsigned long vsid, pte_t *ptep, unsigned long trap,
  207. unsigned int local, int ssize, int subpage_prot);
  208. extern int __hash_page_64K(unsigned long ea, unsigned long access,
  209. unsigned long vsid, pte_t *ptep, unsigned long trap,
  210. unsigned int local, int ssize);
  211. struct mm_struct;
  212. unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
  213. extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
  214. int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
  215. pte_t *ptep, unsigned long trap, int local, int ssize,
  216. unsigned int shift, unsigned int mmu_psize);
  217. extern void hash_failure_debug(unsigned long ea, unsigned long access,
  218. unsigned long vsid, unsigned long trap,
  219. int ssize, int psize, unsigned long pte);
  220. extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
  221. unsigned long pstart, unsigned long prot,
  222. int psize, int ssize);
  223. extern void add_gpage(unsigned long addr, unsigned long page_size,
  224. unsigned long number_of_pages);
  225. extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
  226. extern void hpte_init_native(void);
  227. extern void hpte_init_lpar(void);
  228. extern void hpte_init_iSeries(void);
  229. extern void hpte_init_beat(void);
  230. extern void hpte_init_beat_v3(void);
  231. extern void stabs_alloc(void);
  232. extern void slb_initialize(void);
  233. extern void slb_flush_and_rebolt(void);
  234. extern void stab_initialize(unsigned long stab);
  235. extern void slb_vmalloc_update(void);
  236. extern void slb_set_size(u16 size);
  237. #endif /* __ASSEMBLY__ */
  238. /*
  239. * VSID allocation
  240. *
  241. * We first generate a 36-bit "proto-VSID". For kernel addresses this
  242. * is equal to the ESID, for user addresses it is:
  243. * (context << 15) | (esid & 0x7fff)
  244. *
  245. * The two forms are distinguishable because the top bit is 0 for user
  246. * addresses, whereas the top two bits are 1 for kernel addresses.
  247. * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
  248. * now.
  249. *
  250. * The proto-VSIDs are then scrambled into real VSIDs with the
  251. * multiplicative hash:
  252. *
  253. * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
  254. * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
  255. * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
  256. *
  257. * This scramble is only well defined for proto-VSIDs below
  258. * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
  259. * reserved. VSID_MULTIPLIER is prime, so in particular it is
  260. * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
  261. * Because the modulus is 2^n-1 we can compute it efficiently without
  262. * a divide or extra multiply (see below).
  263. *
  264. * This scheme has several advantages over older methods:
  265. *
  266. * - We have VSIDs allocated for every kernel address
  267. * (i.e. everything above 0xC000000000000000), except the very top
  268. * segment, which simplifies several things.
  269. *
  270. * - We allow for 15 significant bits of ESID and 20 bits of
  271. * context for user addresses. i.e. 8T (43 bits) of address space for
  272. * up to 1M contexts (although the page table structure and context
  273. * allocation will need changes to take advantage of this).
  274. *
  275. * - The scramble function gives robust scattering in the hash
  276. * table (at least based on some initial results). The previous
  277. * method was more susceptible to pathological cases giving excessive
  278. * hash collisions.
  279. */
  280. /*
  281. * WARNING - If you change these you must make sure the asm
  282. * implementations in slb_allocate (slb_low.S), do_stab_bolted
  283. * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
  284. *
  285. * You'll also need to change the precomputed VSID values in head.S
  286. * which are used by the iSeries firmware.
  287. */
  288. #define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */
  289. #define VSID_BITS_256M 36
  290. #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
  291. #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
  292. #define VSID_BITS_1T 24
  293. #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
  294. #define CONTEXT_BITS 19
  295. #define USER_ESID_BITS 16
  296. #define USER_ESID_BITS_1T 4
  297. #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
  298. /*
  299. * This macro generates asm code to compute the VSID scramble
  300. * function. Used in slb_allocate() and do_stab_bolted. The function
  301. * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
  302. *
  303. * rt = register continaing the proto-VSID and into which the
  304. * VSID will be stored
  305. * rx = scratch register (clobbered)
  306. *
  307. * - rt and rx must be different registers
  308. * - The answer will end up in the low VSID_BITS bits of rt. The higher
  309. * bits may contain other garbage, so you may need to mask the
  310. * result.
  311. */
  312. #define ASM_VSID_SCRAMBLE(rt, rx, size) \
  313. lis rx,VSID_MULTIPLIER_##size@h; \
  314. ori rx,rx,VSID_MULTIPLIER_##size@l; \
  315. mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
  316. \
  317. srdi rx,rt,VSID_BITS_##size; \
  318. clrldi rt,rt,(64-VSID_BITS_##size); \
  319. add rt,rt,rx; /* add high and low bits */ \
  320. /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
  321. * 2^36-1+2^28-1. That in particular means that if r3 >= \
  322. * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
  323. * the bit clear, r3 already has the answer we want, if it \
  324. * doesn't, the answer is the low 36 bits of r3+1. So in all \
  325. * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
  326. addi rx,rt,1; \
  327. srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
  328. add rt,rt,rx
  329. #ifndef __ASSEMBLY__
  330. #ifdef CONFIG_PPC_SUBPAGE_PROT
  331. /*
  332. * For the sub-page protection option, we extend the PGD with one of
  333. * these. Basically we have a 3-level tree, with the top level being
  334. * the protptrs array. To optimize speed and memory consumption when
  335. * only addresses < 4GB are being protected, pointers to the first
  336. * four pages of sub-page protection words are stored in the low_prot
  337. * array.
  338. * Each page of sub-page protection words protects 1GB (4 bytes
  339. * protects 64k). For the 3-level tree, each page of pointers then
  340. * protects 8TB.
  341. */
  342. struct subpage_prot_table {
  343. unsigned long maxaddr; /* only addresses < this are protected */
  344. unsigned int **protptrs[2];
  345. unsigned int *low_prot[4];
  346. };
  347. #define SBP_L1_BITS (PAGE_SHIFT - 2)
  348. #define SBP_L2_BITS (PAGE_SHIFT - 3)
  349. #define SBP_L1_COUNT (1 << SBP_L1_BITS)
  350. #define SBP_L2_COUNT (1 << SBP_L2_BITS)
  351. #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
  352. #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
  353. extern void subpage_prot_free(struct mm_struct *mm);
  354. extern void subpage_prot_init_new_context(struct mm_struct *mm);
  355. #else
  356. static inline void subpage_prot_free(struct mm_struct *mm) {}
  357. static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
  358. #endif /* CONFIG_PPC_SUBPAGE_PROT */
  359. typedef unsigned long mm_context_id_t;
  360. typedef struct {
  361. mm_context_id_t id;
  362. u16 user_psize; /* page size index */
  363. #ifdef CONFIG_PPC_MM_SLICES
  364. u64 low_slices_psize; /* SLB page size encodings */
  365. u64 high_slices_psize; /* 4 bits per slice for now */
  366. #else
  367. u16 sllp; /* SLB page size encoding */
  368. #endif
  369. unsigned long vdso_base;
  370. #ifdef CONFIG_PPC_SUBPAGE_PROT
  371. struct subpage_prot_table spt;
  372. #endif /* CONFIG_PPC_SUBPAGE_PROT */
  373. } mm_context_t;
  374. #if 0
  375. /*
  376. * The code below is equivalent to this function for arguments
  377. * < 2^VSID_BITS, which is all this should ever be called
  378. * with. However gcc is not clever enough to compute the
  379. * modulus (2^n-1) without a second multiply.
  380. */
  381. #define vsid_scrample(protovsid, size) \
  382. ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
  383. #else /* 1 */
  384. #define vsid_scramble(protovsid, size) \
  385. ({ \
  386. unsigned long x; \
  387. x = (protovsid) * VSID_MULTIPLIER_##size; \
  388. x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
  389. (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
  390. })
  391. #endif /* 1 */
  392. /* This is only valid for addresses >= PAGE_OFFSET */
  393. static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
  394. {
  395. if (ssize == MMU_SEGSIZE_256M)
  396. return vsid_scramble(ea >> SID_SHIFT, 256M);
  397. return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
  398. }
  399. /* Returns the segment size indicator for a user address */
  400. static inline int user_segment_size(unsigned long addr)
  401. {
  402. /* Use 1T segments if possible for addresses >= 1T */
  403. if (addr >= (1UL << SID_SHIFT_1T))
  404. return mmu_highuser_ssize;
  405. return MMU_SEGSIZE_256M;
  406. }
  407. /* This is only valid for user addresses (which are below 2^44) */
  408. static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
  409. int ssize)
  410. {
  411. if (ssize == MMU_SEGSIZE_256M)
  412. return vsid_scramble((context << USER_ESID_BITS)
  413. | (ea >> SID_SHIFT), 256M);
  414. return vsid_scramble((context << USER_ESID_BITS_1T)
  415. | (ea >> SID_SHIFT_1T), 1T);
  416. }
  417. /*
  418. * This is only used on legacy iSeries in lparmap.c,
  419. * hence the 256MB segment assumption.
  420. */
  421. #define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \
  422. VSID_MODULUS_256M)
  423. #define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
  424. #endif /* __ASSEMBLY__ */
  425. #endif /* _ASM_POWERPC_MMU_HASH64_H_ */