hash_low_32.S 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
  5. * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
  6. * Adapted for Power Macintosh by Paul Mackerras.
  7. * Low-level exception handlers and MMU support
  8. * rewritten by Paul Mackerras.
  9. * Copyright (C) 1996 Paul Mackerras.
  10. *
  11. * This file contains low-level assembler routines for managing
  12. * the PowerPC MMU hash table. (PPC 8xx processors don't use a
  13. * hash table, so this file is not used on them.)
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. *
  20. */
  21. #include <asm/reg.h>
  22. #include <asm/page.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/cputable.h>
  25. #include <asm/ppc_asm.h>
  26. #include <asm/thread_info.h>
  27. #include <asm/asm-offsets.h>
  28. #ifdef CONFIG_SMP
  29. .section .bss
  30. .align 2
  31. .globl mmu_hash_lock
  32. mmu_hash_lock:
  33. .space 4
  34. #endif /* CONFIG_SMP */
  35. /*
  36. * Sync CPUs with hash_page taking & releasing the hash
  37. * table lock
  38. */
  39. #ifdef CONFIG_SMP
  40. .text
  41. _GLOBAL(hash_page_sync)
  42. mfmsr r10
  43. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  44. mtmsr r0
  45. lis r8,mmu_hash_lock@h
  46. ori r8,r8,mmu_hash_lock@l
  47. lis r0,0x0fff
  48. b 10f
  49. 11: lwz r6,0(r8)
  50. cmpwi 0,r6,0
  51. bne 11b
  52. 10: lwarx r6,0,r8
  53. cmpwi 0,r6,0
  54. bne- 11b
  55. stwcx. r0,0,r8
  56. bne- 10b
  57. isync
  58. eieio
  59. li r0,0
  60. stw r0,0(r8)
  61. mtmsr r10
  62. blr
  63. #endif /* CONFIG_SMP */
  64. /*
  65. * Load a PTE into the hash table, if possible.
  66. * The address is in r4, and r3 contains an access flag:
  67. * _PAGE_RW (0x400) if a write.
  68. * r9 contains the SRR1 value, from which we use the MSR_PR bit.
  69. * SPRG3 contains the physical address of the current task's thread.
  70. *
  71. * Returns to the caller if the access is illegal or there is no
  72. * mapping for the address. Otherwise it places an appropriate PTE
  73. * in the hash table and returns from the exception.
  74. * Uses r0, r3 - r8, r10, ctr, lr.
  75. */
  76. .text
  77. _GLOBAL(hash_page)
  78. tophys(r7,0) /* gets -KERNELBASE into r7 */
  79. #ifdef CONFIG_SMP
  80. addis r8,r7,mmu_hash_lock@h
  81. ori r8,r8,mmu_hash_lock@l
  82. lis r0,0x0fff
  83. b 10f
  84. 11: lwz r6,0(r8)
  85. cmpwi 0,r6,0
  86. bne 11b
  87. 10: lwarx r6,0,r8
  88. cmpwi 0,r6,0
  89. bne- 11b
  90. stwcx. r0,0,r8
  91. bne- 10b
  92. isync
  93. #endif
  94. /* Get PTE (linux-style) and check access */
  95. lis r0,KERNELBASE@h /* check if kernel address */
  96. cmplw 0,r4,r0
  97. mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
  98. ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
  99. lwz r5,PGDIR(r8) /* virt page-table root */
  100. blt+ 112f /* assume user more likely */
  101. lis r5,swapper_pg_dir@ha /* if kernel address, use */
  102. addi r5,r5,swapper_pg_dir@l /* kernel page table */
  103. rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
  104. 112: add r5,r5,r7 /* convert to phys addr */
  105. #ifndef CONFIG_PTE_64BIT
  106. rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
  107. lwz r8,0(r5) /* get pmd entry */
  108. rlwinm. r8,r8,0,0,19 /* extract address of pte page */
  109. #else
  110. rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
  111. lwzx r8,r8,r5 /* Get L1 entry */
  112. rlwinm. r8,r8,0,0,20 /* extract pt base address */
  113. #endif
  114. #ifdef CONFIG_SMP
  115. beq- hash_page_out /* return if no mapping */
  116. #else
  117. /* XXX it seems like the 601 will give a machine fault on the
  118. rfi if its alignment is wrong (bottom 4 bits of address are
  119. 8 or 0xc) and we have had a not-taken conditional branch
  120. to the address following the rfi. */
  121. beqlr-
  122. #endif
  123. #ifndef CONFIG_PTE_64BIT
  124. rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
  125. #else
  126. rlwimi r8,r4,23,20,28 /* compute pte address */
  127. #endif
  128. rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
  129. ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
  130. /*
  131. * Update the linux PTE atomically. We do the lwarx up-front
  132. * because almost always, there won't be a permission violation
  133. * and there won't already be an HPTE, and thus we will have
  134. * to update the PTE to set _PAGE_HASHPTE. -- paulus.
  135. *
  136. * If PTE_64BIT is set, the low word is the flags word; use that
  137. * word for locking since it contains all the interesting bits.
  138. */
  139. #if (PTE_FLAGS_OFFSET != 0)
  140. addi r8,r8,PTE_FLAGS_OFFSET
  141. #endif
  142. retry:
  143. lwarx r6,0,r8 /* get linux-style pte, flag word */
  144. andc. r5,r3,r6 /* check access & ~permission */
  145. #ifdef CONFIG_SMP
  146. bne- hash_page_out /* return if access not permitted */
  147. #else
  148. bnelr-
  149. #endif
  150. or r5,r0,r6 /* set accessed/dirty bits */
  151. #ifdef CONFIG_PTE_64BIT
  152. #ifdef CONFIG_SMP
  153. subf r10,r6,r8 /* create false data dependency */
  154. subi r10,r10,PTE_FLAGS_OFFSET
  155. lwzx r10,r6,r10 /* Get upper PTE word */
  156. #else
  157. lwz r10,-PTE_FLAGS_OFFSET(r8)
  158. #endif /* CONFIG_SMP */
  159. #endif /* CONFIG_PTE_64BIT */
  160. stwcx. r5,0,r8 /* attempt to update PTE */
  161. bne- retry /* retry if someone got there first */
  162. mfsrin r3,r4 /* get segment reg for segment */
  163. mfctr r0
  164. stw r0,_CTR(r11)
  165. bl create_hpte /* add the hash table entry */
  166. #ifdef CONFIG_SMP
  167. eieio
  168. addis r8,r7,mmu_hash_lock@ha
  169. li r0,0
  170. stw r0,mmu_hash_lock@l(r8)
  171. #endif
  172. /* Return from the exception */
  173. lwz r5,_CTR(r11)
  174. mtctr r5
  175. lwz r0,GPR0(r11)
  176. lwz r7,GPR7(r11)
  177. lwz r8,GPR8(r11)
  178. b fast_exception_return
  179. #ifdef CONFIG_SMP
  180. hash_page_out:
  181. eieio
  182. addis r8,r7,mmu_hash_lock@ha
  183. li r0,0
  184. stw r0,mmu_hash_lock@l(r8)
  185. blr
  186. #endif /* CONFIG_SMP */
  187. /*
  188. * Add an entry for a particular page to the hash table.
  189. *
  190. * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
  191. *
  192. * We assume any necessary modifications to the pte (e.g. setting
  193. * the accessed bit) have already been done and that there is actually
  194. * a hash table in use (i.e. we're not on a 603).
  195. */
  196. _GLOBAL(add_hash_page)
  197. mflr r0
  198. stw r0,4(r1)
  199. /* Convert context and va to VSID */
  200. mulli r3,r3,897*16 /* multiply context by context skew */
  201. rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
  202. mulli r0,r0,0x111 /* multiply by ESID skew */
  203. add r3,r3,r0 /* note create_hpte trims to 24 bits */
  204. #ifdef CONFIG_SMP
  205. rlwinm r8,r1,0,0,(31-THREAD_SHIFT) /* use cpu number to make tag */
  206. lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
  207. oris r8,r8,12
  208. #endif /* CONFIG_SMP */
  209. /*
  210. * We disable interrupts here, even on UP, because we don't
  211. * want to race with hash_page, and because we want the
  212. * _PAGE_HASHPTE bit to be a reliable indication of whether
  213. * the HPTE exists (or at least whether one did once).
  214. * We also turn off the MMU for data accesses so that we
  215. * we can't take a hash table miss (assuming the code is
  216. * covered by a BAT). -- paulus
  217. */
  218. mfmsr r9
  219. SYNC
  220. rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
  221. rlwinm r0,r0,0,28,26 /* clear MSR_DR */
  222. mtmsr r0
  223. SYNC_601
  224. isync
  225. tophys(r7,0)
  226. #ifdef CONFIG_SMP
  227. addis r6,r7,mmu_hash_lock@ha
  228. addi r6,r6,mmu_hash_lock@l
  229. 10: lwarx r0,0,r6 /* take the mmu_hash_lock */
  230. cmpi 0,r0,0
  231. bne- 11f
  232. stwcx. r8,0,r6
  233. beq+ 12f
  234. 11: lwz r0,0(r6)
  235. cmpi 0,r0,0
  236. beq 10b
  237. b 11b
  238. 12: isync
  239. #endif
  240. /*
  241. * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
  242. * If _PAGE_HASHPTE was already set, we don't replace the existing
  243. * HPTE, so we just unlock and return.
  244. */
  245. mr r8,r5
  246. #ifndef CONFIG_PTE_64BIT
  247. rlwimi r8,r4,22,20,29
  248. #else
  249. rlwimi r8,r4,23,20,28
  250. addi r8,r8,PTE_FLAGS_OFFSET
  251. #endif
  252. 1: lwarx r6,0,r8
  253. andi. r0,r6,_PAGE_HASHPTE
  254. bne 9f /* if HASHPTE already set, done */
  255. #ifdef CONFIG_PTE_64BIT
  256. #ifdef CONFIG_SMP
  257. subf r10,r6,r8 /* create false data dependency */
  258. subi r10,r10,PTE_FLAGS_OFFSET
  259. lwzx r10,r6,r10 /* Get upper PTE word */
  260. #else
  261. lwz r10,-PTE_FLAGS_OFFSET(r8)
  262. #endif /* CONFIG_SMP */
  263. #endif /* CONFIG_PTE_64BIT */
  264. ori r5,r6,_PAGE_HASHPTE
  265. stwcx. r5,0,r8
  266. bne- 1b
  267. bl create_hpte
  268. 9:
  269. #ifdef CONFIG_SMP
  270. addis r6,r7,mmu_hash_lock@ha
  271. addi r6,r6,mmu_hash_lock@l
  272. eieio
  273. li r0,0
  274. stw r0,0(r6) /* clear mmu_hash_lock */
  275. #endif
  276. /* reenable interrupts and DR */
  277. mtmsr r9
  278. SYNC_601
  279. isync
  280. lwz r0,4(r1)
  281. mtlr r0
  282. blr
  283. /*
  284. * This routine adds a hardware PTE to the hash table.
  285. * It is designed to be called with the MMU either on or off.
  286. * r3 contains the VSID, r4 contains the virtual address,
  287. * r5 contains the linux PTE, r6 contains the old value of the
  288. * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
  289. * offset to be added to addresses (0 if the MMU is on,
  290. * -KERNELBASE if it is off). r10 contains the upper half of
  291. * the PTE if CONFIG_PTE_64BIT.
  292. * On SMP, the caller should have the mmu_hash_lock held.
  293. * We assume that the caller has (or will) set the _PAGE_HASHPTE
  294. * bit in the linux PTE in memory. The value passed in r6 should
  295. * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
  296. * this routine will skip the search for an existing HPTE.
  297. * This procedure modifies r0, r3 - r6, r8, cr0.
  298. * -- paulus.
  299. *
  300. * For speed, 4 of the instructions get patched once the size and
  301. * physical address of the hash table are known. These definitions
  302. * of Hash_base and Hash_bits below are just an example.
  303. */
  304. Hash_base = 0xc0180000
  305. Hash_bits = 12 /* e.g. 256kB hash table */
  306. Hash_msk = (((1 << Hash_bits) - 1) * 64)
  307. /* defines for the PTE format for 32-bit PPCs */
  308. #define HPTE_SIZE 8
  309. #define PTEG_SIZE 64
  310. #define LG_PTEG_SIZE 6
  311. #define LDPTEu lwzu
  312. #define LDPTE lwz
  313. #define STPTE stw
  314. #define CMPPTE cmpw
  315. #define PTE_H 0x40
  316. #define PTE_V 0x80000000
  317. #define TST_V(r) rlwinm. r,r,0,0,0
  318. #define SET_V(r) oris r,r,PTE_V@h
  319. #define CLR_V(r,t) rlwinm r,r,0,1,31
  320. #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
  321. #define HASH_RIGHT 31-LG_PTEG_SIZE
  322. _GLOBAL(create_hpte)
  323. /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
  324. rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
  325. rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
  326. and r8,r8,r0 /* writable if _RW & _DIRTY */
  327. rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
  328. rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
  329. ori r8,r8,0xe14 /* clear out reserved bits and M */
  330. andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
  331. BEGIN_FTR_SECTION
  332. ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
  333. END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
  334. #ifdef CONFIG_PTE_64BIT
  335. /* Put the XPN bits into the PTE */
  336. rlwimi r8,r10,8,20,22
  337. rlwimi r8,r10,2,29,29
  338. #endif
  339. /* Construct the high word of the PPC-style PTE (r5) */
  340. rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
  341. rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
  342. SET_V(r5) /* set V (valid) bit */
  343. /* Get the address of the primary PTE group in the hash table (r3) */
  344. _GLOBAL(hash_page_patch_A)
  345. addis r0,r7,Hash_base@h /* base address of hash table */
  346. rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
  347. rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
  348. xor r3,r3,r0 /* make primary hash */
  349. li r0,8 /* PTEs/group */
  350. /*
  351. * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
  352. * if it is clear, meaning that the HPTE isn't there already...
  353. */
  354. andi. r6,r6,_PAGE_HASHPTE
  355. beq+ 10f /* no PTE: go look for an empty slot */
  356. tlbie r4
  357. addis r4,r7,htab_hash_searches@ha
  358. lwz r6,htab_hash_searches@l(r4)
  359. addi r6,r6,1 /* count how many searches we do */
  360. stw r6,htab_hash_searches@l(r4)
  361. /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
  362. mtctr r0
  363. addi r4,r3,-HPTE_SIZE
  364. 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
  365. CMPPTE 0,r6,r5
  366. bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
  367. beq+ found_slot
  368. /* Search the secondary PTEG for a matching PTE */
  369. ori r5,r5,PTE_H /* set H (secondary hash) bit */
  370. _GLOBAL(hash_page_patch_B)
  371. xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
  372. xori r4,r4,(-PTEG_SIZE & 0xffff)
  373. addi r4,r4,-HPTE_SIZE
  374. mtctr r0
  375. 2: LDPTEu r6,HPTE_SIZE(r4)
  376. CMPPTE 0,r6,r5
  377. bdnzf 2,2b
  378. beq+ found_slot
  379. xori r5,r5,PTE_H /* clear H bit again */
  380. /* Search the primary PTEG for an empty slot */
  381. 10: mtctr r0
  382. addi r4,r3,-HPTE_SIZE /* search primary PTEG */
  383. 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
  384. TST_V(r6) /* test valid bit */
  385. bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
  386. beq+ found_empty
  387. /* update counter of times that the primary PTEG is full */
  388. addis r4,r7,primary_pteg_full@ha
  389. lwz r6,primary_pteg_full@l(r4)
  390. addi r6,r6,1
  391. stw r6,primary_pteg_full@l(r4)
  392. /* Search the secondary PTEG for an empty slot */
  393. ori r5,r5,PTE_H /* set H (secondary hash) bit */
  394. _GLOBAL(hash_page_patch_C)
  395. xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
  396. xori r4,r4,(-PTEG_SIZE & 0xffff)
  397. addi r4,r4,-HPTE_SIZE
  398. mtctr r0
  399. 2: LDPTEu r6,HPTE_SIZE(r4)
  400. TST_V(r6)
  401. bdnzf 2,2b
  402. beq+ found_empty
  403. xori r5,r5,PTE_H /* clear H bit again */
  404. /*
  405. * Choose an arbitrary slot in the primary PTEG to overwrite.
  406. * Since both the primary and secondary PTEGs are full, and we
  407. * have no information that the PTEs in the primary PTEG are
  408. * more important or useful than those in the secondary PTEG,
  409. * and we know there is a definite (although small) speed
  410. * advantage to putting the PTE in the primary PTEG, we always
  411. * put the PTE in the primary PTEG.
  412. *
  413. * In addition, we skip any slot that is mapping kernel text in
  414. * order to avoid a deadlock when not using BAT mappings if
  415. * trying to hash in the kernel hash code itself after it has
  416. * already taken the hash table lock. This works in conjunction
  417. * with pre-faulting of the kernel text.
  418. *
  419. * If the hash table bucket is full of kernel text entries, we'll
  420. * lockup here but that shouldn't happen
  421. */
  422. 1: addis r4,r7,next_slot@ha /* get next evict slot */
  423. lwz r6,next_slot@l(r4)
  424. addi r6,r6,HPTE_SIZE /* search for candidate */
  425. andi. r6,r6,7*HPTE_SIZE
  426. stw r6,next_slot@l(r4)
  427. add r4,r3,r6
  428. LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */
  429. clrrwi r0,r0,12
  430. lis r6,etext@h
  431. ori r6,r6,etext@l /* get etext */
  432. tophys(r6,r6)
  433. cmpl cr0,r0,r6 /* compare and try again */
  434. blt 1b
  435. #ifndef CONFIG_SMP
  436. /* Store PTE in PTEG */
  437. found_empty:
  438. STPTE r5,0(r4)
  439. found_slot:
  440. STPTE r8,HPTE_SIZE/2(r4)
  441. #else /* CONFIG_SMP */
  442. /*
  443. * Between the tlbie above and updating the hash table entry below,
  444. * another CPU could read the hash table entry and put it in its TLB.
  445. * There are 3 cases:
  446. * 1. using an empty slot
  447. * 2. updating an earlier entry to change permissions (i.e. enable write)
  448. * 3. taking over the PTE for an unrelated address
  449. *
  450. * In each case it doesn't really matter if the other CPUs have the old
  451. * PTE in their TLB. So we don't need to bother with another tlbie here,
  452. * which is convenient as we've overwritten the register that had the
  453. * address. :-) The tlbie above is mainly to make sure that this CPU comes
  454. * and gets the new PTE from the hash table.
  455. *
  456. * We do however have to make sure that the PTE is never in an invalid
  457. * state with the V bit set.
  458. */
  459. found_empty:
  460. found_slot:
  461. CLR_V(r5,r0) /* clear V (valid) bit in PTE */
  462. STPTE r5,0(r4)
  463. sync
  464. TLBSYNC
  465. STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
  466. sync
  467. SET_V(r5)
  468. STPTE r5,0(r4) /* finally set V bit in PTE */
  469. #endif /* CONFIG_SMP */
  470. sync /* make sure pte updates get to memory */
  471. blr
  472. .section .bss
  473. .align 2
  474. next_slot:
  475. .space 4
  476. primary_pteg_full:
  477. .space 4
  478. htab_hash_searches:
  479. .space 4
  480. .previous
  481. /*
  482. * Flush the entry for a particular page from the hash table.
  483. *
  484. * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
  485. * int count)
  486. *
  487. * We assume that there is a hash table in use (Hash != 0).
  488. */
  489. _GLOBAL(flush_hash_pages)
  490. tophys(r7,0)
  491. /*
  492. * We disable interrupts here, even on UP, because we want
  493. * the _PAGE_HASHPTE bit to be a reliable indication of
  494. * whether the HPTE exists (or at least whether one did once).
  495. * We also turn off the MMU for data accesses so that we
  496. * we can't take a hash table miss (assuming the code is
  497. * covered by a BAT). -- paulus
  498. */
  499. mfmsr r10
  500. SYNC
  501. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  502. rlwinm r0,r0,0,28,26 /* clear MSR_DR */
  503. mtmsr r0
  504. SYNC_601
  505. isync
  506. /* First find a PTE in the range that has _PAGE_HASHPTE set */
  507. #ifndef CONFIG_PTE_64BIT
  508. rlwimi r5,r4,22,20,29
  509. #else
  510. rlwimi r5,r4,23,20,28
  511. #endif
  512. 1: lwz r0,PTE_FLAGS_OFFSET(r5)
  513. cmpwi cr1,r6,1
  514. andi. r0,r0,_PAGE_HASHPTE
  515. bne 2f
  516. ble cr1,19f
  517. addi r4,r4,0x1000
  518. addi r5,r5,PTE_SIZE
  519. addi r6,r6,-1
  520. b 1b
  521. /* Convert context and va to VSID */
  522. 2: mulli r3,r3,897*16 /* multiply context by context skew */
  523. rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
  524. mulli r0,r0,0x111 /* multiply by ESID skew */
  525. add r3,r3,r0 /* note code below trims to 24 bits */
  526. /* Construct the high word of the PPC-style PTE (r11) */
  527. rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
  528. rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
  529. SET_V(r11) /* set V (valid) bit */
  530. #ifdef CONFIG_SMP
  531. addis r9,r7,mmu_hash_lock@ha
  532. addi r9,r9,mmu_hash_lock@l
  533. rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
  534. add r8,r8,r7
  535. lwz r8,TI_CPU(r8)
  536. oris r8,r8,9
  537. 10: lwarx r0,0,r9
  538. cmpi 0,r0,0
  539. bne- 11f
  540. stwcx. r8,0,r9
  541. beq+ 12f
  542. 11: lwz r0,0(r9)
  543. cmpi 0,r0,0
  544. beq 10b
  545. b 11b
  546. 12: isync
  547. #endif
  548. /*
  549. * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
  550. * already clear, we're done (for this pte). If not,
  551. * clear it (atomically) and proceed. -- paulus.
  552. */
  553. #if (PTE_FLAGS_OFFSET != 0)
  554. addi r5,r5,PTE_FLAGS_OFFSET
  555. #endif
  556. 33: lwarx r8,0,r5 /* fetch the pte flags word */
  557. andi. r0,r8,_PAGE_HASHPTE
  558. beq 8f /* done if HASHPTE is already clear */
  559. rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
  560. stwcx. r8,0,r5 /* update the pte */
  561. bne- 33b
  562. /* Get the address of the primary PTE group in the hash table (r3) */
  563. _GLOBAL(flush_hash_patch_A)
  564. addis r8,r7,Hash_base@h /* base address of hash table */
  565. rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
  566. rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
  567. xor r8,r0,r8 /* make primary hash */
  568. /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
  569. li r0,8 /* PTEs/group */
  570. mtctr r0
  571. addi r12,r8,-HPTE_SIZE
  572. 1: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */
  573. CMPPTE 0,r0,r11
  574. bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
  575. beq+ 3f
  576. /* Search the secondary PTEG for a matching PTE */
  577. ori r11,r11,PTE_H /* set H (secondary hash) bit */
  578. li r0,8 /* PTEs/group */
  579. _GLOBAL(flush_hash_patch_B)
  580. xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
  581. xori r12,r12,(-PTEG_SIZE & 0xffff)
  582. addi r12,r12,-HPTE_SIZE
  583. mtctr r0
  584. 2: LDPTEu r0,HPTE_SIZE(r12)
  585. CMPPTE 0,r0,r11
  586. bdnzf 2,2b
  587. xori r11,r11,PTE_H /* clear H again */
  588. bne- 4f /* should rarely fail to find it */
  589. 3: li r0,0
  590. STPTE r0,0(r12) /* invalidate entry */
  591. 4: sync
  592. tlbie r4 /* in hw tlb too */
  593. sync
  594. 8: ble cr1,9f /* if all ptes checked */
  595. 81: addi r6,r6,-1
  596. addi r5,r5,PTE_SIZE
  597. addi r4,r4,0x1000
  598. lwz r0,0(r5) /* check next pte */
  599. cmpwi cr1,r6,1
  600. andi. r0,r0,_PAGE_HASHPTE
  601. bne 33b
  602. bgt cr1,81b
  603. 9:
  604. #ifdef CONFIG_SMP
  605. TLBSYNC
  606. li r0,0
  607. stw r0,0(r9) /* clear mmu_hash_lock */
  608. #endif
  609. 19: mtmsr r10
  610. SYNC_601
  611. isync
  612. blr