pacache.S 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072
  1. /*
  2. * PARISC TLB and cache flushing support
  3. * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
  4. * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
  5. * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2, or (at your option)
  10. * any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. /*
  22. * NOTE: fdc,fic, and pdc instructions that use base register modification
  23. * should only use index and base registers that are not shadowed,
  24. * so that the fast path emulation in the non access miss handler
  25. * can be used.
  26. */
  27. #ifdef CONFIG_64BIT
  28. .level 2.0w
  29. #else
  30. .level 2.0
  31. #endif
  32. #include <asm/psw.h>
  33. #include <asm/assembly.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/cache.h>
  36. #include <linux/linkage.h>
  37. #include <linux/init.h>
  38. __HEAD
  39. .align 128
  40. ENTRY(flush_tlb_all_local)
  41. .proc
  42. .callinfo NO_CALLS
  43. .entry
  44. /*
  45. * The pitlbe and pdtlbe instructions should only be used to
  46. * flush the entire tlb. Also, there needs to be no intervening
  47. * tlb operations, e.g. tlb misses, so the operation needs
  48. * to happen in real mode with all interruptions disabled.
  49. */
  50. /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
  51. rsm PSW_SM_I, %r19 /* save I-bit state */
  52. load32 PA(1f), %r1
  53. nop
  54. nop
  55. nop
  56. nop
  57. nop
  58. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  59. mtctl %r0, %cr17 /* Clear IIASQ tail */
  60. mtctl %r0, %cr17 /* Clear IIASQ head */
  61. mtctl %r1, %cr18 /* IIAOQ head */
  62. ldo 4(%r1), %r1
  63. mtctl %r1, %cr18 /* IIAOQ tail */
  64. load32 REAL_MODE_PSW, %r1
  65. mtctl %r1, %ipsw
  66. rfi
  67. nop
  68. 1: load32 PA(cache_info), %r1
  69. /* Flush Instruction Tlb */
  70. LDREG ITLB_SID_BASE(%r1), %r20
  71. LDREG ITLB_SID_STRIDE(%r1), %r21
  72. LDREG ITLB_SID_COUNT(%r1), %r22
  73. LDREG ITLB_OFF_BASE(%r1), %arg0
  74. LDREG ITLB_OFF_STRIDE(%r1), %arg1
  75. LDREG ITLB_OFF_COUNT(%r1), %arg2
  76. LDREG ITLB_LOOP(%r1), %arg3
  77. addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
  78. movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
  79. copy %arg0, %r28 /* Init base addr */
  80. fitmanyloop: /* Loop if LOOP >= 2 */
  81. mtsp %r20, %sr1
  82. add %r21, %r20, %r20 /* increment space */
  83. copy %arg2, %r29 /* Init middle loop count */
  84. fitmanymiddle: /* Loop if LOOP >= 2 */
  85. addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
  86. pitlbe 0(%sr1, %r28)
  87. pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
  88. addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
  89. copy %arg3, %r31 /* Re-init inner loop count */
  90. movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
  91. addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
  92. fitoneloop: /* Loop if LOOP = 1 */
  93. mtsp %r20, %sr1
  94. copy %arg0, %r28 /* init base addr */
  95. copy %arg2, %r29 /* init middle loop count */
  96. fitonemiddle: /* Loop if LOOP = 1 */
  97. addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
  98. pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
  99. addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
  100. add %r21, %r20, %r20 /* increment space */
  101. fitdone:
  102. /* Flush Data Tlb */
  103. LDREG DTLB_SID_BASE(%r1), %r20
  104. LDREG DTLB_SID_STRIDE(%r1), %r21
  105. LDREG DTLB_SID_COUNT(%r1), %r22
  106. LDREG DTLB_OFF_BASE(%r1), %arg0
  107. LDREG DTLB_OFF_STRIDE(%r1), %arg1
  108. LDREG DTLB_OFF_COUNT(%r1), %arg2
  109. LDREG DTLB_LOOP(%r1), %arg3
  110. addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
  111. movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
  112. copy %arg0, %r28 /* Init base addr */
  113. fdtmanyloop: /* Loop if LOOP >= 2 */
  114. mtsp %r20, %sr1
  115. add %r21, %r20, %r20 /* increment space */
  116. copy %arg2, %r29 /* Init middle loop count */
  117. fdtmanymiddle: /* Loop if LOOP >= 2 */
  118. addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
  119. pdtlbe 0(%sr1, %r28)
  120. pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
  121. addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
  122. copy %arg3, %r31 /* Re-init inner loop count */
  123. movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
  124. addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
  125. fdtoneloop: /* Loop if LOOP = 1 */
  126. mtsp %r20, %sr1
  127. copy %arg0, %r28 /* init base addr */
  128. copy %arg2, %r29 /* init middle loop count */
  129. fdtonemiddle: /* Loop if LOOP = 1 */
  130. addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
  131. pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
  132. addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
  133. add %r21, %r20, %r20 /* increment space */
  134. fdtdone:
  135. /*
  136. * Switch back to virtual mode
  137. */
  138. /* pcxt_ssm_bug */
  139. rsm PSW_SM_I, %r0
  140. load32 2f, %r1
  141. nop
  142. nop
  143. nop
  144. nop
  145. nop
  146. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  147. mtctl %r0, %cr17 /* Clear IIASQ tail */
  148. mtctl %r0, %cr17 /* Clear IIASQ head */
  149. mtctl %r1, %cr18 /* IIAOQ head */
  150. ldo 4(%r1), %r1
  151. mtctl %r1, %cr18 /* IIAOQ tail */
  152. load32 KERNEL_PSW, %r1
  153. or %r1, %r19, %r1 /* I-bit to state on entry */
  154. mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
  155. rfi
  156. nop
  157. 2: bv %r0(%r2)
  158. nop
  159. .exit
  160. .procend
  161. ENDPROC(flush_tlb_all_local)
  162. .import cache_info,data
  163. ENTRY(flush_instruction_cache_local)
  164. .proc
  165. .callinfo NO_CALLS
  166. .entry
  167. mtsp %r0, %sr1
  168. load32 cache_info, %r1
  169. /* Flush Instruction Cache */
  170. LDREG ICACHE_BASE(%r1), %arg0
  171. LDREG ICACHE_STRIDE(%r1), %arg1
  172. LDREG ICACHE_COUNT(%r1), %arg2
  173. LDREG ICACHE_LOOP(%r1), %arg3
  174. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  175. addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
  176. movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
  177. fimanyloop: /* Loop if LOOP >= 2 */
  178. addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
  179. fice %r0(%sr1, %arg0)
  180. fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
  181. movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
  182. addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
  183. fioneloop: /* Loop if LOOP = 1 */
  184. addib,COND(>) -1, %arg2, fioneloop /* Outer loop count decr */
  185. fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
  186. fisync:
  187. sync
  188. mtsm %r22 /* restore I-bit */
  189. bv %r0(%r2)
  190. nop
  191. .exit
  192. .procend
  193. ENDPROC(flush_instruction_cache_local)
  194. .import cache_info, data
  195. ENTRY(flush_data_cache_local)
  196. .proc
  197. .callinfo NO_CALLS
  198. .entry
  199. mtsp %r0, %sr1
  200. load32 cache_info, %r1
  201. /* Flush Data Cache */
  202. LDREG DCACHE_BASE(%r1), %arg0
  203. LDREG DCACHE_STRIDE(%r1), %arg1
  204. LDREG DCACHE_COUNT(%r1), %arg2
  205. LDREG DCACHE_LOOP(%r1), %arg3
  206. rsm PSW_SM_I, %r22
  207. addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
  208. movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
  209. fdmanyloop: /* Loop if LOOP >= 2 */
  210. addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
  211. fdce %r0(%sr1, %arg0)
  212. fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
  213. movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
  214. addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
  215. fdoneloop: /* Loop if LOOP = 1 */
  216. addib,COND(>) -1, %arg2, fdoneloop /* Outer loop count decr */
  217. fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
  218. fdsync:
  219. syncdma
  220. sync
  221. mtsm %r22 /* restore I-bit */
  222. bv %r0(%r2)
  223. nop
  224. .exit
  225. .procend
  226. ENDPROC(flush_data_cache_local)
  227. .align 16
  228. ENTRY(copy_user_page_asm)
  229. .proc
  230. .callinfo NO_CALLS
  231. .entry
  232. #ifdef CONFIG_64BIT
  233. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  234. * Unroll the loop by hand and arrange insn appropriately.
  235. * GCC probably can do this just as well.
  236. */
  237. ldd 0(%r25), %r19
  238. ldi (PAGE_SIZE / 128), %r1
  239. ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
  240. ldw 128(%r25), %r0 /* prefetch 2 */
  241. 1: ldd 8(%r25), %r20
  242. ldw 192(%r25), %r0 /* prefetch 3 */
  243. ldw 256(%r25), %r0 /* prefetch 4 */
  244. ldd 16(%r25), %r21
  245. ldd 24(%r25), %r22
  246. std %r19, 0(%r26)
  247. std %r20, 8(%r26)
  248. ldd 32(%r25), %r19
  249. ldd 40(%r25), %r20
  250. std %r21, 16(%r26)
  251. std %r22, 24(%r26)
  252. ldd 48(%r25), %r21
  253. ldd 56(%r25), %r22
  254. std %r19, 32(%r26)
  255. std %r20, 40(%r26)
  256. ldd 64(%r25), %r19
  257. ldd 72(%r25), %r20
  258. std %r21, 48(%r26)
  259. std %r22, 56(%r26)
  260. ldd 80(%r25), %r21
  261. ldd 88(%r25), %r22
  262. std %r19, 64(%r26)
  263. std %r20, 72(%r26)
  264. ldd 96(%r25), %r19
  265. ldd 104(%r25), %r20
  266. std %r21, 80(%r26)
  267. std %r22, 88(%r26)
  268. ldd 112(%r25), %r21
  269. ldd 120(%r25), %r22
  270. std %r19, 96(%r26)
  271. std %r20, 104(%r26)
  272. ldo 128(%r25), %r25
  273. std %r21, 112(%r26)
  274. std %r22, 120(%r26)
  275. ldo 128(%r26), %r26
  276. /* conditional branches nullify on forward taken branch, and on
  277. * non-taken backward branch. Note that .+4 is a backwards branch.
  278. * The ldd should only get executed if the branch is taken.
  279. */
  280. addib,COND(>),n -1, %r1, 1b /* bundle 10 */
  281. ldd 0(%r25), %r19 /* start next loads */
  282. #else
  283. /*
  284. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  285. * bundles (very restricted rules for bundling).
  286. * Note that until (if) we start saving
  287. * the full 64 bit register values on interrupt, we can't
  288. * use ldd/std on a 32 bit kernel.
  289. */
  290. ldw 0(%r25), %r19
  291. ldi (PAGE_SIZE / 64), %r1
  292. 1:
  293. ldw 4(%r25), %r20
  294. ldw 8(%r25), %r21
  295. ldw 12(%r25), %r22
  296. stw %r19, 0(%r26)
  297. stw %r20, 4(%r26)
  298. stw %r21, 8(%r26)
  299. stw %r22, 12(%r26)
  300. ldw 16(%r25), %r19
  301. ldw 20(%r25), %r20
  302. ldw 24(%r25), %r21
  303. ldw 28(%r25), %r22
  304. stw %r19, 16(%r26)
  305. stw %r20, 20(%r26)
  306. stw %r21, 24(%r26)
  307. stw %r22, 28(%r26)
  308. ldw 32(%r25), %r19
  309. ldw 36(%r25), %r20
  310. ldw 40(%r25), %r21
  311. ldw 44(%r25), %r22
  312. stw %r19, 32(%r26)
  313. stw %r20, 36(%r26)
  314. stw %r21, 40(%r26)
  315. stw %r22, 44(%r26)
  316. ldw 48(%r25), %r19
  317. ldw 52(%r25), %r20
  318. ldw 56(%r25), %r21
  319. ldw 60(%r25), %r22
  320. stw %r19, 48(%r26)
  321. stw %r20, 52(%r26)
  322. ldo 64(%r25), %r25
  323. stw %r21, 56(%r26)
  324. stw %r22, 60(%r26)
  325. ldo 64(%r26), %r26
  326. addib,COND(>),n -1, %r1, 1b
  327. ldw 0(%r25), %r19
  328. #endif
  329. bv %r0(%r2)
  330. nop
  331. .exit
  332. .procend
  333. ENDPROC(copy_user_page_asm)
  334. /*
  335. * NOTE: Code in clear_user_page has a hard coded dependency on the
  336. * maximum alias boundary being 4 Mb. We've been assured by the
  337. * parisc chip designers that there will not ever be a parisc
  338. * chip with a larger alias boundary (Never say never :-) ).
  339. *
  340. * Subtle: the dtlb miss handlers support the temp alias region by
  341. * "knowing" that if a dtlb miss happens within the temp alias
  342. * region it must have occurred while in clear_user_page. Since
  343. * this routine makes use of processor local translations, we
  344. * don't want to insert them into the kernel page table. Instead,
  345. * we load up some general registers (they need to be registers
  346. * which aren't shadowed) with the physical page numbers (preshifted
  347. * for tlb insertion) needed to insert the translations. When we
  348. * miss on the translation, the dtlb miss handler inserts the
  349. * translation into the tlb using these values:
  350. *
  351. * %r26 physical page (shifted for tlb insert) of "to" translation
  352. * %r23 physical page (shifted for tlb insert) of "from" translation
  353. */
  354. #if 0
  355. /*
  356. * We can't do this since copy_user_page is used to bring in
  357. * file data that might have instructions. Since the data would
  358. * then need to be flushed out so the i-fetch can see it, it
  359. * makes more sense to just copy through the kernel translation
  360. * and flush it.
  361. *
  362. * I'm still keeping this around because it may be possible to
  363. * use it if more information is passed into copy_user_page().
  364. * Have to do some measurements to see if it is worthwhile to
  365. * lobby for such a change.
  366. */
  367. ENTRY(copy_user_page_asm)
  368. .proc
  369. .callinfo NO_CALLS
  370. .entry
  371. ldil L%(__PAGE_OFFSET), %r1
  372. sub %r26, %r1, %r26
  373. sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
  374. ldil L%(TMPALIAS_MAP_START), %r28
  375. /* FIXME for different page sizes != 4k */
  376. #ifdef CONFIG_64BIT
  377. extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
  378. extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
  379. depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
  380. depdi 0, 63,12, %r28 /* Clear any offset bits */
  381. copy %r28, %r29
  382. depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
  383. #else
  384. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  385. extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
  386. depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
  387. depwi 0, 31,12, %r28 /* Clear any offset bits */
  388. copy %r28, %r29
  389. depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
  390. #endif
  391. /* Purge any old translations */
  392. pdtlb 0(%r28)
  393. pdtlb 0(%r29)
  394. ldi 64, %r1
  395. /*
  396. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  397. * bundles (very restricted rules for bundling). It probably
  398. * does OK on PCXU and better, but we could do better with
  399. * ldd/std instructions. Note that until (if) we start saving
  400. * the full 64 bit register values on interrupt, we can't
  401. * use ldd/std on a 32 bit kernel.
  402. */
  403. 1:
  404. ldw 0(%r29), %r19
  405. ldw 4(%r29), %r20
  406. ldw 8(%r29), %r21
  407. ldw 12(%r29), %r22
  408. stw %r19, 0(%r28)
  409. stw %r20, 4(%r28)
  410. stw %r21, 8(%r28)
  411. stw %r22, 12(%r28)
  412. ldw 16(%r29), %r19
  413. ldw 20(%r29), %r20
  414. ldw 24(%r29), %r21
  415. ldw 28(%r29), %r22
  416. stw %r19, 16(%r28)
  417. stw %r20, 20(%r28)
  418. stw %r21, 24(%r28)
  419. stw %r22, 28(%r28)
  420. ldw 32(%r29), %r19
  421. ldw 36(%r29), %r20
  422. ldw 40(%r29), %r21
  423. ldw 44(%r29), %r22
  424. stw %r19, 32(%r28)
  425. stw %r20, 36(%r28)
  426. stw %r21, 40(%r28)
  427. stw %r22, 44(%r28)
  428. ldw 48(%r29), %r19
  429. ldw 52(%r29), %r20
  430. ldw 56(%r29), %r21
  431. ldw 60(%r29), %r22
  432. stw %r19, 48(%r28)
  433. stw %r20, 52(%r28)
  434. stw %r21, 56(%r28)
  435. stw %r22, 60(%r28)
  436. ldo 64(%r28), %r28
  437. addib,COND(>) -1, %r1,1b
  438. ldo 64(%r29), %r29
  439. bv %r0(%r2)
  440. nop
  441. .exit
  442. .procend
  443. ENDPROC(copy_user_page_asm)
  444. #endif
  445. ENTRY(__clear_user_page_asm)
  446. .proc
  447. .callinfo NO_CALLS
  448. .entry
  449. tophys_r1 %r26
  450. ldil L%(TMPALIAS_MAP_START), %r28
  451. #ifdef CONFIG_64BIT
  452. #if (TMPALIAS_MAP_START >= 0x80000000)
  453. depdi 0, 31,32, %r28 /* clear any sign extension */
  454. /* FIXME: page size dependend */
  455. #endif
  456. extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
  457. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  458. depdi 0, 63,12, %r28 /* Clear any offset bits */
  459. #else
  460. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  461. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  462. depwi 0, 31,12, %r28 /* Clear any offset bits */
  463. #endif
  464. /* Purge any old translation */
  465. pdtlb 0(%r28)
  466. #ifdef CONFIG_64BIT
  467. ldi (PAGE_SIZE / 128), %r1
  468. /* PREFETCH (Write) has not (yet) been proven to help here */
  469. /* #define PREFETCHW_OP ldd 256(%0), %r0 */
  470. 1: std %r0, 0(%r28)
  471. std %r0, 8(%r28)
  472. std %r0, 16(%r28)
  473. std %r0, 24(%r28)
  474. std %r0, 32(%r28)
  475. std %r0, 40(%r28)
  476. std %r0, 48(%r28)
  477. std %r0, 56(%r28)
  478. std %r0, 64(%r28)
  479. std %r0, 72(%r28)
  480. std %r0, 80(%r28)
  481. std %r0, 88(%r28)
  482. std %r0, 96(%r28)
  483. std %r0, 104(%r28)
  484. std %r0, 112(%r28)
  485. std %r0, 120(%r28)
  486. addib,COND(>) -1, %r1, 1b
  487. ldo 128(%r28), %r28
  488. #else /* ! CONFIG_64BIT */
  489. ldi (PAGE_SIZE / 64), %r1
  490. 1:
  491. stw %r0, 0(%r28)
  492. stw %r0, 4(%r28)
  493. stw %r0, 8(%r28)
  494. stw %r0, 12(%r28)
  495. stw %r0, 16(%r28)
  496. stw %r0, 20(%r28)
  497. stw %r0, 24(%r28)
  498. stw %r0, 28(%r28)
  499. stw %r0, 32(%r28)
  500. stw %r0, 36(%r28)
  501. stw %r0, 40(%r28)
  502. stw %r0, 44(%r28)
  503. stw %r0, 48(%r28)
  504. stw %r0, 52(%r28)
  505. stw %r0, 56(%r28)
  506. stw %r0, 60(%r28)
  507. addib,COND(>) -1, %r1, 1b
  508. ldo 64(%r28), %r28
  509. #endif /* CONFIG_64BIT */
  510. bv %r0(%r2)
  511. nop
  512. .exit
  513. .procend
  514. ENDPROC(__clear_user_page_asm)
  515. ENTRY(flush_kernel_dcache_page_asm)
  516. .proc
  517. .callinfo NO_CALLS
  518. .entry
  519. ldil L%dcache_stride, %r1
  520. ldw R%dcache_stride(%r1), %r23
  521. #ifdef CONFIG_64BIT
  522. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  523. #else
  524. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  525. #endif
  526. add %r26, %r25, %r25
  527. sub %r25, %r23, %r25
  528. 1: fdc,m %r23(%r26)
  529. fdc,m %r23(%r26)
  530. fdc,m %r23(%r26)
  531. fdc,m %r23(%r26)
  532. fdc,m %r23(%r26)
  533. fdc,m %r23(%r26)
  534. fdc,m %r23(%r26)
  535. fdc,m %r23(%r26)
  536. fdc,m %r23(%r26)
  537. fdc,m %r23(%r26)
  538. fdc,m %r23(%r26)
  539. fdc,m %r23(%r26)
  540. fdc,m %r23(%r26)
  541. fdc,m %r23(%r26)
  542. fdc,m %r23(%r26)
  543. cmpb,COND(<<) %r26, %r25,1b
  544. fdc,m %r23(%r26)
  545. sync
  546. bv %r0(%r2)
  547. nop
  548. .exit
  549. .procend
  550. ENDPROC(flush_kernel_dcache_page_asm)
  551. ENTRY(flush_user_dcache_page)
  552. .proc
  553. .callinfo NO_CALLS
  554. .entry
  555. ldil L%dcache_stride, %r1
  556. ldw R%dcache_stride(%r1), %r23
  557. #ifdef CONFIG_64BIT
  558. depdi,z 1,63-PAGE_SHIFT,1, %r25
  559. #else
  560. depwi,z 1,31-PAGE_SHIFT,1, %r25
  561. #endif
  562. add %r26, %r25, %r25
  563. sub %r25, %r23, %r25
  564. 1: fdc,m %r23(%sr3, %r26)
  565. fdc,m %r23(%sr3, %r26)
  566. fdc,m %r23(%sr3, %r26)
  567. fdc,m %r23(%sr3, %r26)
  568. fdc,m %r23(%sr3, %r26)
  569. fdc,m %r23(%sr3, %r26)
  570. fdc,m %r23(%sr3, %r26)
  571. fdc,m %r23(%sr3, %r26)
  572. fdc,m %r23(%sr3, %r26)
  573. fdc,m %r23(%sr3, %r26)
  574. fdc,m %r23(%sr3, %r26)
  575. fdc,m %r23(%sr3, %r26)
  576. fdc,m %r23(%sr3, %r26)
  577. fdc,m %r23(%sr3, %r26)
  578. fdc,m %r23(%sr3, %r26)
  579. cmpb,COND(<<) %r26, %r25,1b
  580. fdc,m %r23(%sr3, %r26)
  581. sync
  582. bv %r0(%r2)
  583. nop
  584. .exit
  585. .procend
  586. ENDPROC(flush_user_dcache_page)
  587. ENTRY(flush_user_icache_page)
  588. .proc
  589. .callinfo NO_CALLS
  590. .entry
  591. ldil L%dcache_stride, %r1
  592. ldw R%dcache_stride(%r1), %r23
  593. #ifdef CONFIG_64BIT
  594. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  595. #else
  596. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  597. #endif
  598. add %r26, %r25, %r25
  599. sub %r25, %r23, %r25
  600. 1: fic,m %r23(%sr3, %r26)
  601. fic,m %r23(%sr3, %r26)
  602. fic,m %r23(%sr3, %r26)
  603. fic,m %r23(%sr3, %r26)
  604. fic,m %r23(%sr3, %r26)
  605. fic,m %r23(%sr3, %r26)
  606. fic,m %r23(%sr3, %r26)
  607. fic,m %r23(%sr3, %r26)
  608. fic,m %r23(%sr3, %r26)
  609. fic,m %r23(%sr3, %r26)
  610. fic,m %r23(%sr3, %r26)
  611. fic,m %r23(%sr3, %r26)
  612. fic,m %r23(%sr3, %r26)
  613. fic,m %r23(%sr3, %r26)
  614. fic,m %r23(%sr3, %r26)
  615. cmpb,COND(<<) %r26, %r25,1b
  616. fic,m %r23(%sr3, %r26)
  617. sync
  618. bv %r0(%r2)
  619. nop
  620. .exit
  621. .procend
  622. ENDPROC(flush_user_icache_page)
  623. ENTRY(purge_kernel_dcache_page)
  624. .proc
  625. .callinfo NO_CALLS
  626. .entry
  627. ldil L%dcache_stride, %r1
  628. ldw R%dcache_stride(%r1), %r23
  629. #ifdef CONFIG_64BIT
  630. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  631. #else
  632. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  633. #endif
  634. add %r26, %r25, %r25
  635. sub %r25, %r23, %r25
  636. 1: pdc,m %r23(%r26)
  637. pdc,m %r23(%r26)
  638. pdc,m %r23(%r26)
  639. pdc,m %r23(%r26)
  640. pdc,m %r23(%r26)
  641. pdc,m %r23(%r26)
  642. pdc,m %r23(%r26)
  643. pdc,m %r23(%r26)
  644. pdc,m %r23(%r26)
  645. pdc,m %r23(%r26)
  646. pdc,m %r23(%r26)
  647. pdc,m %r23(%r26)
  648. pdc,m %r23(%r26)
  649. pdc,m %r23(%r26)
  650. pdc,m %r23(%r26)
  651. cmpb,COND(<<) %r26, %r25, 1b
  652. pdc,m %r23(%r26)
  653. sync
  654. bv %r0(%r2)
  655. nop
  656. .exit
  657. .procend
  658. ENDPROC(purge_kernel_dcache_page)
  659. #if 0
  660. /* Currently not used, but it still is a possible alternate
  661. * solution.
  662. */
  663. ENTRY(flush_alias_page)
  664. .proc
  665. .callinfo NO_CALLS
  666. .entry
  667. tophys_r1 %r26
  668. ldil L%(TMPALIAS_MAP_START), %r28
  669. #ifdef CONFIG_64BIT
  670. extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
  671. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  672. depdi 0, 63,12, %r28 /* Clear any offset bits */
  673. #else
  674. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  675. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  676. depwi 0, 31,12, %r28 /* Clear any offset bits */
  677. #endif
  678. /* Purge any old translation */
  679. pdtlb 0(%r28)
  680. ldil L%dcache_stride, %r1
  681. ldw R%dcache_stride(%r1), %r23
  682. #ifdef CONFIG_64BIT
  683. depdi,z 1, 63-PAGE_SHIFT,1, %r29
  684. #else
  685. depwi,z 1, 31-PAGE_SHIFT,1, %r29
  686. #endif
  687. add %r28, %r29, %r29
  688. sub %r29, %r23, %r29
  689. 1: fdc,m %r23(%r28)
  690. fdc,m %r23(%r28)
  691. fdc,m %r23(%r28)
  692. fdc,m %r23(%r28)
  693. fdc,m %r23(%r28)
  694. fdc,m %r23(%r28)
  695. fdc,m %r23(%r28)
  696. fdc,m %r23(%r28)
  697. fdc,m %r23(%r28)
  698. fdc,m %r23(%r28)
  699. fdc,m %r23(%r28)
  700. fdc,m %r23(%r28)
  701. fdc,m %r23(%r28)
  702. fdc,m %r23(%r28)
  703. fdc,m %r23(%r28)
  704. cmpb,COND(<<) %r28, %r29, 1b
  705. fdc,m %r23(%r28)
  706. sync
  707. bv %r0(%r2)
  708. nop
  709. .exit
  710. .procend
  711. #endif
  712. .export flush_user_dcache_range_asm
  713. flush_user_dcache_range_asm:
  714. .proc
  715. .callinfo NO_CALLS
  716. .entry
  717. ldil L%dcache_stride, %r1
  718. ldw R%dcache_stride(%r1), %r23
  719. ldo -1(%r23), %r21
  720. ANDCM %r26, %r21, %r26
  721. 1: cmpb,COND(<<),n %r26, %r25, 1b
  722. fdc,m %r23(%sr3, %r26)
  723. sync
  724. bv %r0(%r2)
  725. nop
  726. .exit
  727. .procend
  728. ENDPROC(flush_alias_page)
  729. ENTRY(flush_kernel_dcache_range_asm)
  730. .proc
  731. .callinfo NO_CALLS
  732. .entry
  733. ldil L%dcache_stride, %r1
  734. ldw R%dcache_stride(%r1), %r23
  735. ldo -1(%r23), %r21
  736. ANDCM %r26, %r21, %r26
  737. 1: cmpb,COND(<<),n %r26, %r25,1b
  738. fdc,m %r23(%r26)
  739. sync
  740. syncdma
  741. bv %r0(%r2)
  742. nop
  743. .exit
  744. .procend
  745. ENDPROC(flush_kernel_dcache_range_asm)
  746. ENTRY(flush_user_icache_range_asm)
  747. .proc
  748. .callinfo NO_CALLS
  749. .entry
  750. ldil L%icache_stride, %r1
  751. ldw R%icache_stride(%r1), %r23
  752. ldo -1(%r23), %r21
  753. ANDCM %r26, %r21, %r26
  754. 1: cmpb,COND(<<),n %r26, %r25,1b
  755. fic,m %r23(%sr3, %r26)
  756. sync
  757. bv %r0(%r2)
  758. nop
  759. .exit
  760. .procend
  761. ENDPROC(flush_user_icache_range_asm)
  762. ENTRY(flush_kernel_icache_page)
  763. .proc
  764. .callinfo NO_CALLS
  765. .entry
  766. ldil L%icache_stride, %r1
  767. ldw R%icache_stride(%r1), %r23
  768. #ifdef CONFIG_64BIT
  769. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  770. #else
  771. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  772. #endif
  773. add %r26, %r25, %r25
  774. sub %r25, %r23, %r25
  775. 1: fic,m %r23(%sr4, %r26)
  776. fic,m %r23(%sr4, %r26)
  777. fic,m %r23(%sr4, %r26)
  778. fic,m %r23(%sr4, %r26)
  779. fic,m %r23(%sr4, %r26)
  780. fic,m %r23(%sr4, %r26)
  781. fic,m %r23(%sr4, %r26)
  782. fic,m %r23(%sr4, %r26)
  783. fic,m %r23(%sr4, %r26)
  784. fic,m %r23(%sr4, %r26)
  785. fic,m %r23(%sr4, %r26)
  786. fic,m %r23(%sr4, %r26)
  787. fic,m %r23(%sr4, %r26)
  788. fic,m %r23(%sr4, %r26)
  789. fic,m %r23(%sr4, %r26)
  790. cmpb,COND(<<) %r26, %r25, 1b
  791. fic,m %r23(%sr4, %r26)
  792. sync
  793. bv %r0(%r2)
  794. nop
  795. .exit
  796. .procend
  797. ENDPROC(flush_kernel_icache_page)
  798. ENTRY(flush_kernel_icache_range_asm)
  799. .proc
  800. .callinfo NO_CALLS
  801. .entry
  802. ldil L%icache_stride, %r1
  803. ldw R%icache_stride(%r1), %r23
  804. ldo -1(%r23), %r21
  805. ANDCM %r26, %r21, %r26
  806. 1: cmpb,COND(<<),n %r26, %r25, 1b
  807. fic,m %r23(%sr4, %r26)
  808. sync
  809. bv %r0(%r2)
  810. nop
  811. .exit
  812. .procend
  813. ENDPROC(flush_kernel_icache_range_asm)
  814. /* align should cover use of rfi in disable_sr_hashing_asm and
  815. * srdis_done.
  816. */
  817. .align 256
  818. ENTRY(disable_sr_hashing_asm)
  819. .proc
  820. .callinfo NO_CALLS
  821. .entry
  822. /*
  823. * Switch to real mode
  824. */
  825. /* pcxt_ssm_bug */
  826. rsm PSW_SM_I, %r0
  827. load32 PA(1f), %r1
  828. nop
  829. nop
  830. nop
  831. nop
  832. nop
  833. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  834. mtctl %r0, %cr17 /* Clear IIASQ tail */
  835. mtctl %r0, %cr17 /* Clear IIASQ head */
  836. mtctl %r1, %cr18 /* IIAOQ head */
  837. ldo 4(%r1), %r1
  838. mtctl %r1, %cr18 /* IIAOQ tail */
  839. load32 REAL_MODE_PSW, %r1
  840. mtctl %r1, %ipsw
  841. rfi
  842. nop
  843. 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
  844. cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
  845. cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
  846. b,n srdis_done
  847. srdis_pcxs:
  848. /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
  849. .word 0x141c1a00 /* mfdiag %dr0, %r28 */
  850. .word 0x141c1a00 /* must issue twice */
  851. depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
  852. depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
  853. .word 0x141c1600 /* mtdiag %r28, %dr0 */
  854. .word 0x141c1600 /* must issue twice */
  855. b,n srdis_done
  856. srdis_pcxl:
  857. /* Disable Space Register Hashing for PCXL */
  858. .word 0x141c0600 /* mfdiag %dr0, %r28 */
  859. depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
  860. .word 0x141c0240 /* mtdiag %r28, %dr0 */
  861. b,n srdis_done
  862. srdis_pa20:
  863. /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
  864. .word 0x144008bc /* mfdiag %dr2, %r28 */
  865. depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
  866. .word 0x145c1840 /* mtdiag %r28, %dr2 */
  867. srdis_done:
  868. /* Switch back to virtual mode */
  869. rsm PSW_SM_I, %r0 /* prep to load iia queue */
  870. load32 2f, %r1
  871. nop
  872. nop
  873. nop
  874. nop
  875. nop
  876. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  877. mtctl %r0, %cr17 /* Clear IIASQ tail */
  878. mtctl %r0, %cr17 /* Clear IIASQ head */
  879. mtctl %r1, %cr18 /* IIAOQ head */
  880. ldo 4(%r1), %r1
  881. mtctl %r1, %cr18 /* IIAOQ tail */
  882. load32 KERNEL_PSW, %r1
  883. mtctl %r1, %ipsw
  884. rfi
  885. nop
  886. 2: bv %r0(%r2)
  887. nop
  888. .exit
  889. .procend
  890. ENDPROC(disable_sr_hashing_asm)
  891. .end