hashtable.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /*
  2. * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
  3. *
  4. * PowerPC version
  5. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6. * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
  7. * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
  8. * Adapted for Power Macintosh by Paul Mackerras.
  9. * Low-level exception handlers and MMU support
  10. * rewritten by Paul Mackerras.
  11. * Copyright (C) 1996 Paul Mackerras.
  12. *
  13. * This file contains low-level assembler routines for managing
  14. * the PowerPC MMU hash table. (PPC 8xx processors don't use a
  15. * hash table, so this file is not used on them.)
  16. *
  17. * This program is free software; you can redistribute it and/or
  18. * modify it under the terms of the GNU General Public License
  19. * as published by the Free Software Foundation; either version
  20. * 2 of the License, or (at your option) any later version.
  21. *
  22. */
  23. #include <asm/processor.h>
  24. #include <asm/page.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/cputable.h>
  27. #include <asm/ppc_asm.h>
  28. #include <asm/thread_info.h>
  29. #include <asm/asm-offsets.h>
  30. #ifdef CONFIG_SMP
  31. .section .bss
  32. .align 2
  33. .globl mmu_hash_lock
  34. mmu_hash_lock:
  35. .space 4
  36. #endif /* CONFIG_SMP */
  37. /*
  38. * Sync CPUs with hash_page taking & releasing the hash
  39. * table lock
  40. */
  41. #ifdef CONFIG_SMP
  42. .text
  43. _GLOBAL(hash_page_sync)
  44. lis r8,mmu_hash_lock@h
  45. ori r8,r8,mmu_hash_lock@l
  46. lis r0,0x0fff
  47. b 10f
  48. 11: lwz r6,0(r8)
  49. cmpwi 0,r6,0
  50. bne 11b
  51. 10: lwarx r6,0,r8
  52. cmpwi 0,r6,0
  53. bne- 11b
  54. stwcx. r0,0,r8
  55. bne- 10b
  56. isync
  57. eieio
  58. li r0,0
  59. stw r0,0(r8)
  60. blr
  61. #endif
  62. /*
  63. * Load a PTE into the hash table, if possible.
  64. * The address is in r4, and r3 contains an access flag:
  65. * _PAGE_RW (0x400) if a write.
  66. * r9 contains the SRR1 value, from which we use the MSR_PR bit.
  67. * SPRG3 contains the physical address of the current task's thread.
  68. *
  69. * Returns to the caller if the access is illegal or there is no
  70. * mapping for the address. Otherwise it places an appropriate PTE
  71. * in the hash table and returns from the exception.
  72. * Uses r0, r3 - r8, ctr, lr.
  73. */
  74. .text
  75. _GLOBAL(hash_page)
  76. tophys(r7,0) /* gets -KERNELBASE into r7 */
  77. #ifdef CONFIG_SMP
  78. addis r8,r7,mmu_hash_lock@h
  79. ori r8,r8,mmu_hash_lock@l
  80. lis r0,0x0fff
  81. b 10f
  82. 11: lwz r6,0(r8)
  83. cmpwi 0,r6,0
  84. bne 11b
  85. 10: lwarx r6,0,r8
  86. cmpwi 0,r6,0
  87. bne- 11b
  88. stwcx. r0,0,r8
  89. bne- 10b
  90. isync
  91. #endif
  92. /* Get PTE (linux-style) and check access */
  93. lis r0,KERNELBASE@h /* check if kernel address */
  94. cmplw 0,r4,r0
  95. mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
  96. ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
  97. lwz r5,PGDIR(r8) /* virt page-table root */
  98. blt+ 112f /* assume user more likely */
  99. lis r5,swapper_pg_dir@ha /* if kernel address, use */
  100. addi r5,r5,swapper_pg_dir@l /* kernel page table */
  101. rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
  102. 112: add r5,r5,r7 /* convert to phys addr */
  103. rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
  104. lwz r8,0(r5) /* get pmd entry */
  105. rlwinm. r8,r8,0,0,19 /* extract address of pte page */
  106. #ifdef CONFIG_SMP
  107. beq- hash_page_out /* return if no mapping */
  108. #else
  109. /* XXX it seems like the 601 will give a machine fault on the
  110. rfi if its alignment is wrong (bottom 4 bits of address are
  111. 8 or 0xc) and we have had a not-taken conditional branch
  112. to the address following the rfi. */
  113. beqlr-
  114. #endif
  115. rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
  116. rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
  117. ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
  118. /*
  119. * Update the linux PTE atomically. We do the lwarx up-front
  120. * because almost always, there won't be a permission violation
  121. * and there won't already be an HPTE, and thus we will have
  122. * to update the PTE to set _PAGE_HASHPTE. -- paulus.
  123. */
  124. retry:
  125. lwarx r6,0,r8 /* get linux-style pte */
  126. andc. r5,r3,r6 /* check access & ~permission */
  127. #ifdef CONFIG_SMP
  128. bne- hash_page_out /* return if access not permitted */
  129. #else
  130. bnelr-
  131. #endif
  132. or r5,r0,r6 /* set accessed/dirty bits */
  133. stwcx. r5,0,r8 /* attempt to update PTE */
  134. bne- retry /* retry if someone got there first */
  135. mfsrin r3,r4 /* get segment reg for segment */
  136. mfctr r0
  137. stw r0,_CTR(r11)
  138. bl create_hpte /* add the hash table entry */
  139. /*
  140. * htab_reloads counts the number of times we have to fault an
  141. * HPTE into the hash table. This should only happen after a
  142. * fork (because fork does a flush_tlb_mm) or a vmalloc or ioremap.
  143. * Where a page is faulted into a process's address space,
  144. * update_mmu_cache gets called to put the HPTE into the hash table
  145. * and those are counted as preloads rather than reloads.
  146. */
  147. addis r8,r7,htab_reloads@ha
  148. lwz r3,htab_reloads@l(r8)
  149. addi r3,r3,1
  150. stw r3,htab_reloads@l(r8)
  151. #ifdef CONFIG_SMP
  152. eieio
  153. addis r8,r7,mmu_hash_lock@ha
  154. li r0,0
  155. stw r0,mmu_hash_lock@l(r8)
  156. #endif
  157. /* Return from the exception */
  158. lwz r5,_CTR(r11)
  159. mtctr r5
  160. lwz r0,GPR0(r11)
  161. lwz r7,GPR7(r11)
  162. lwz r8,GPR8(r11)
  163. b fast_exception_return
  164. #ifdef CONFIG_SMP
  165. hash_page_out:
  166. eieio
  167. addis r8,r7,mmu_hash_lock@ha
  168. li r0,0
  169. stw r0,mmu_hash_lock@l(r8)
  170. blr
  171. #endif /* CONFIG_SMP */
  172. /*
  173. * Add an entry for a particular page to the hash table.
  174. *
  175. * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
  176. *
  177. * We assume any necessary modifications to the pte (e.g. setting
  178. * the accessed bit) have already been done and that there is actually
  179. * a hash table in use (i.e. we're not on a 603).
  180. */
  181. _GLOBAL(add_hash_page)
  182. mflr r0
  183. stw r0,4(r1)
  184. /* Convert context and va to VSID */
  185. mulli r3,r3,897*16 /* multiply context by context skew */
  186. rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
  187. mulli r0,r0,0x111 /* multiply by ESID skew */
  188. add r3,r3,r0 /* note create_hpte trims to 24 bits */
  189. #ifdef CONFIG_SMP
  190. rlwinm r8,r1,0,0,18 /* use cpu number to make tag */
  191. lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
  192. oris r8,r8,12
  193. #endif /* CONFIG_SMP */
  194. /*
  195. * We disable interrupts here, even on UP, because we don't
  196. * want to race with hash_page, and because we want the
  197. * _PAGE_HASHPTE bit to be a reliable indication of whether
  198. * the HPTE exists (or at least whether one did once).
  199. * We also turn off the MMU for data accesses so that we
  200. * we can't take a hash table miss (assuming the code is
  201. * covered by a BAT). -- paulus
  202. */
  203. mfmsr r10
  204. SYNC
  205. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  206. rlwinm r0,r0,0,28,26 /* clear MSR_DR */
  207. mtmsr r0
  208. SYNC_601
  209. isync
  210. tophys(r7,0)
  211. #ifdef CONFIG_SMP
  212. addis r9,r7,mmu_hash_lock@ha
  213. addi r9,r9,mmu_hash_lock@l
  214. 10: lwarx r0,0,r9 /* take the mmu_hash_lock */
  215. cmpi 0,r0,0
  216. bne- 11f
  217. stwcx. r8,0,r9
  218. beq+ 12f
  219. 11: lwz r0,0(r9)
  220. cmpi 0,r0,0
  221. beq 10b
  222. b 11b
  223. 12: isync
  224. #endif
  225. /*
  226. * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
  227. * If _PAGE_HASHPTE was already set, we don't replace the existing
  228. * HPTE, so we just unlock and return.
  229. */
  230. mr r8,r5
  231. rlwimi r8,r4,22,20,29
  232. 1: lwarx r6,0,r8
  233. andi. r0,r6,_PAGE_HASHPTE
  234. bne 9f /* if HASHPTE already set, done */
  235. ori r5,r6,_PAGE_HASHPTE
  236. stwcx. r5,0,r8
  237. bne- 1b
  238. bl create_hpte
  239. addis r8,r7,htab_preloads@ha
  240. lwz r3,htab_preloads@l(r8)
  241. addi r3,r3,1
  242. stw r3,htab_preloads@l(r8)
  243. 9:
  244. #ifdef CONFIG_SMP
  245. eieio
  246. li r0,0
  247. stw r0,0(r9) /* clear mmu_hash_lock */
  248. #endif
  249. /* reenable interrupts and DR */
  250. mtmsr r10
  251. SYNC_601
  252. isync
  253. lwz r0,4(r1)
  254. mtlr r0
  255. blr
  256. /*
  257. * This routine adds a hardware PTE to the hash table.
  258. * It is designed to be called with the MMU either on or off.
  259. * r3 contains the VSID, r4 contains the virtual address,
  260. * r5 contains the linux PTE, r6 contains the old value of the
  261. * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
  262. * offset to be added to addresses (0 if the MMU is on,
  263. * -KERNELBASE if it is off).
  264. * On SMP, the caller should have the mmu_hash_lock held.
  265. * We assume that the caller has (or will) set the _PAGE_HASHPTE
  266. * bit in the linux PTE in memory. The value passed in r6 should
  267. * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
  268. * this routine will skip the search for an existing HPTE.
  269. * This procedure modifies r0, r3 - r6, r8, cr0.
  270. * -- paulus.
  271. *
  272. * For speed, 4 of the instructions get patched once the size and
  273. * physical address of the hash table are known. These definitions
  274. * of Hash_base and Hash_bits below are just an example.
  275. */
  276. Hash_base = 0xc0180000
  277. Hash_bits = 12 /* e.g. 256kB hash table */
  278. Hash_msk = (((1 << Hash_bits) - 1) * 64)
  279. /* defines for the PTE format for 32-bit PPCs */
  280. #define PTE_SIZE 8
  281. #define PTEG_SIZE 64
  282. #define LG_PTEG_SIZE 6
  283. #define LDPTEu lwzu
  284. #define STPTE stw
  285. #define CMPPTE cmpw
  286. #define PTE_H 0x40
  287. #define PTE_V 0x80000000
  288. #define TST_V(r) rlwinm. r,r,0,0,0
  289. #define SET_V(r) oris r,r,PTE_V@h
  290. #define CLR_V(r,t) rlwinm r,r,0,1,31
  291. #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
  292. #define HASH_RIGHT 31-LG_PTEG_SIZE
  293. _GLOBAL(create_hpte)
  294. /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
  295. rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
  296. rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
  297. and r8,r8,r0 /* writable if _RW & _DIRTY */
  298. rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
  299. rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
  300. ori r8,r8,0xe14 /* clear out reserved bits and M */
  301. andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
  302. BEGIN_FTR_SECTION
  303. ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
  304. END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
  305. /* Construct the high word of the PPC-style PTE (r5) */
  306. rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
  307. rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
  308. SET_V(r5) /* set V (valid) bit */
  309. /* Get the address of the primary PTE group in the hash table (r3) */
  310. _GLOBAL(hash_page_patch_A)
  311. addis r0,r7,Hash_base@h /* base address of hash table */
  312. rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
  313. rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
  314. xor r3,r3,r0 /* make primary hash */
  315. li r0,8 /* PTEs/group */
  316. /*
  317. * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
  318. * if it is clear, meaning that the HPTE isn't there already...
  319. */
  320. andi. r6,r6,_PAGE_HASHPTE
  321. beq+ 10f /* no PTE: go look for an empty slot */
  322. tlbie r4
  323. addis r4,r7,htab_hash_searches@ha
  324. lwz r6,htab_hash_searches@l(r4)
  325. addi r6,r6,1 /* count how many searches we do */
  326. stw r6,htab_hash_searches@l(r4)
  327. /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
  328. mtctr r0
  329. addi r4,r3,-PTE_SIZE
  330. 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
  331. CMPPTE 0,r6,r5
  332. bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
  333. beq+ found_slot
  334. /* Search the secondary PTEG for a matching PTE */
  335. ori r5,r5,PTE_H /* set H (secondary hash) bit */
  336. _GLOBAL(hash_page_patch_B)
  337. xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
  338. xori r4,r4,(-PTEG_SIZE & 0xffff)
  339. addi r4,r4,-PTE_SIZE
  340. mtctr r0
  341. 2: LDPTEu r6,PTE_SIZE(r4)
  342. CMPPTE 0,r6,r5
  343. bdnzf 2,2b
  344. beq+ found_slot
  345. xori r5,r5,PTE_H /* clear H bit again */
  346. /* Search the primary PTEG for an empty slot */
  347. 10: mtctr r0
  348. addi r4,r3,-PTE_SIZE /* search primary PTEG */
  349. 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
  350. TST_V(r6) /* test valid bit */
  351. bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
  352. beq+ found_empty
  353. /* update counter of times that the primary PTEG is full */
  354. addis r4,r7,primary_pteg_full@ha
  355. lwz r6,primary_pteg_full@l(r4)
  356. addi r6,r6,1
  357. stw r6,primary_pteg_full@l(r4)
  358. /* Search the secondary PTEG for an empty slot */
  359. ori r5,r5,PTE_H /* set H (secondary hash) bit */
  360. _GLOBAL(hash_page_patch_C)
  361. xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
  362. xori r4,r4,(-PTEG_SIZE & 0xffff)
  363. addi r4,r4,-PTE_SIZE
  364. mtctr r0
  365. 2: LDPTEu r6,PTE_SIZE(r4)
  366. TST_V(r6)
  367. bdnzf 2,2b
  368. beq+ found_empty
  369. xori r5,r5,PTE_H /* clear H bit again */
  370. /*
  371. * Choose an arbitrary slot in the primary PTEG to overwrite.
  372. * Since both the primary and secondary PTEGs are full, and we
  373. * have no information that the PTEs in the primary PTEG are
  374. * more important or useful than those in the secondary PTEG,
  375. * and we know there is a definite (although small) speed
  376. * advantage to putting the PTE in the primary PTEG, we always
  377. * put the PTE in the primary PTEG.
  378. */
  379. addis r4,r7,next_slot@ha
  380. lwz r6,next_slot@l(r4)
  381. addi r6,r6,PTE_SIZE
  382. andi. r6,r6,7*PTE_SIZE
  383. stw r6,next_slot@l(r4)
  384. add r4,r3,r6
  385. /* update counter of evicted pages */
  386. addis r6,r7,htab_evicts@ha
  387. lwz r3,htab_evicts@l(r6)
  388. addi r3,r3,1
  389. stw r3,htab_evicts@l(r6)
  390. #ifndef CONFIG_SMP
  391. /* Store PTE in PTEG */
  392. found_empty:
  393. STPTE r5,0(r4)
  394. found_slot:
  395. STPTE r8,PTE_SIZE/2(r4)
  396. #else /* CONFIG_SMP */
  397. /*
  398. * Between the tlbie above and updating the hash table entry below,
  399. * another CPU could read the hash table entry and put it in its TLB.
  400. * There are 3 cases:
  401. * 1. using an empty slot
  402. * 2. updating an earlier entry to change permissions (i.e. enable write)
  403. * 3. taking over the PTE for an unrelated address
  404. *
  405. * In each case it doesn't really matter if the other CPUs have the old
  406. * PTE in their TLB. So we don't need to bother with another tlbie here,
  407. * which is convenient as we've overwritten the register that had the
  408. * address. :-) The tlbie above is mainly to make sure that this CPU comes
  409. * and gets the new PTE from the hash table.
  410. *
  411. * We do however have to make sure that the PTE is never in an invalid
  412. * state with the V bit set.
  413. */
  414. found_empty:
  415. found_slot:
  416. CLR_V(r5,r0) /* clear V (valid) bit in PTE */
  417. STPTE r5,0(r4)
  418. sync
  419. TLBSYNC
  420. STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
  421. sync
  422. SET_V(r5)
  423. STPTE r5,0(r4) /* finally set V bit in PTE */
  424. #endif /* CONFIG_SMP */
  425. sync /* make sure pte updates get to memory */
  426. blr
  427. .section .bss
  428. .align 2
  429. next_slot:
  430. .space 4
  431. .globl primary_pteg_full
  432. primary_pteg_full:
  433. .space 4
  434. .globl htab_hash_searches
  435. htab_hash_searches:
  436. .space 4
  437. .previous
  438. /*
  439. * Flush the entry for a particular page from the hash table.
  440. *
  441. * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
  442. * int count)
  443. *
  444. * We assume that there is a hash table in use (Hash != 0).
  445. */
  446. _GLOBAL(flush_hash_pages)
  447. tophys(r7,0)
  448. /*
  449. * We disable interrupts here, even on UP, because we want
  450. * the _PAGE_HASHPTE bit to be a reliable indication of
  451. * whether the HPTE exists (or at least whether one did once).
  452. * We also turn off the MMU for data accesses so that we
  453. * we can't take a hash table miss (assuming the code is
  454. * covered by a BAT). -- paulus
  455. */
  456. mfmsr r10
  457. SYNC
  458. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  459. rlwinm r0,r0,0,28,26 /* clear MSR_DR */
  460. mtmsr r0
  461. SYNC_601
  462. isync
  463. /* First find a PTE in the range that has _PAGE_HASHPTE set */
  464. rlwimi r5,r4,22,20,29
  465. 1: lwz r0,0(r5)
  466. cmpwi cr1,r6,1
  467. andi. r0,r0,_PAGE_HASHPTE
  468. bne 2f
  469. ble cr1,19f
  470. addi r4,r4,0x1000
  471. addi r5,r5,4
  472. addi r6,r6,-1
  473. b 1b
  474. /* Convert context and va to VSID */
  475. 2: mulli r3,r3,897*16 /* multiply context by context skew */
  476. rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
  477. mulli r0,r0,0x111 /* multiply by ESID skew */
  478. add r3,r3,r0 /* note code below trims to 24 bits */
  479. /* Construct the high word of the PPC-style PTE (r11) */
  480. rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
  481. rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
  482. SET_V(r11) /* set V (valid) bit */
  483. #ifdef CONFIG_SMP
  484. addis r9,r7,mmu_hash_lock@ha
  485. addi r9,r9,mmu_hash_lock@l
  486. rlwinm r8,r1,0,0,18
  487. add r8,r8,r7
  488. lwz r8,TI_CPU(r8)
  489. oris r8,r8,9
  490. 10: lwarx r0,0,r9
  491. cmpi 0,r0,0
  492. bne- 11f
  493. stwcx. r8,0,r9
  494. beq+ 12f
  495. 11: lwz r0,0(r9)
  496. cmpi 0,r0,0
  497. beq 10b
  498. b 11b
  499. 12: isync
  500. #endif
  501. /*
  502. * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
  503. * already clear, we're done (for this pte). If not,
  504. * clear it (atomically) and proceed. -- paulus.
  505. */
  506. 33: lwarx r8,0,r5 /* fetch the pte */
  507. andi. r0,r8,_PAGE_HASHPTE
  508. beq 8f /* done if HASHPTE is already clear */
  509. rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
  510. stwcx. r8,0,r5 /* update the pte */
  511. bne- 33b
  512. /* Get the address of the primary PTE group in the hash table (r3) */
  513. _GLOBAL(flush_hash_patch_A)
  514. addis r8,r7,Hash_base@h /* base address of hash table */
  515. rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
  516. rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
  517. xor r8,r0,r8 /* make primary hash */
  518. /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
  519. li r0,8 /* PTEs/group */
  520. mtctr r0
  521. addi r12,r8,-PTE_SIZE
  522. 1: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */
  523. CMPPTE 0,r0,r11
  524. bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
  525. beq+ 3f
  526. /* Search the secondary PTEG for a matching PTE */
  527. ori r11,r11,PTE_H /* set H (secondary hash) bit */
  528. li r0,8 /* PTEs/group */
  529. _GLOBAL(flush_hash_patch_B)
  530. xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
  531. xori r12,r12,(-PTEG_SIZE & 0xffff)
  532. addi r12,r12,-PTE_SIZE
  533. mtctr r0
  534. 2: LDPTEu r0,PTE_SIZE(r12)
  535. CMPPTE 0,r0,r11
  536. bdnzf 2,2b
  537. xori r11,r11,PTE_H /* clear H again */
  538. bne- 4f /* should rarely fail to find it */
  539. 3: li r0,0
  540. STPTE r0,0(r12) /* invalidate entry */
  541. 4: sync
  542. tlbie r4 /* in hw tlb too */
  543. sync
  544. 8: ble cr1,9f /* if all ptes checked */
  545. 81: addi r6,r6,-1
  546. addi r5,r5,4 /* advance to next pte */
  547. addi r4,r4,0x1000
  548. lwz r0,0(r5) /* check next pte */
  549. cmpwi cr1,r6,1
  550. andi. r0,r0,_PAGE_HASHPTE
  551. bne 33b
  552. bgt cr1,81b
  553. 9:
  554. #ifdef CONFIG_SMP
  555. TLBSYNC
  556. li r0,0
  557. stw r0,0(r9) /* clear mmu_hash_lock */
  558. #endif
  559. 19: mtmsr r10
  560. SYNC_601
  561. isync
  562. blr