pacache.S 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071
  1. /*
  2. * PARISC TLB and cache flushing support
  3. * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
  4. * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
  5. * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2, or (at your option)
  10. * any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. /*
  22. * NOTE: fdc,fic, and pdc instructions that use base register modification
  23. * should only use index and base registers that are not shadowed,
  24. * so that the fast path emulation in the non access miss handler
  25. * can be used.
  26. */
  27. #ifdef CONFIG_64BIT
  28. .level 2.0w
  29. #else
  30. .level 2.0
  31. #endif
  32. #include <asm/psw.h>
  33. #include <asm/assembly.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/cache.h>
  36. #include <linux/linkage.h>
  37. .text
  38. .align 128
  39. ENTRY(flush_tlb_all_local)
  40. .proc
  41. .callinfo NO_CALLS
  42. .entry
  43. /*
  44. * The pitlbe and pdtlbe instructions should only be used to
  45. * flush the entire tlb. Also, there needs to be no intervening
  46. * tlb operations, e.g. tlb misses, so the operation needs
  47. * to happen in real mode with all interruptions disabled.
  48. */
  49. /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
  50. rsm PSW_SM_I, %r19 /* save I-bit state */
  51. load32 PA(1f), %r1
  52. nop
  53. nop
  54. nop
  55. nop
  56. nop
  57. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  58. mtctl %r0, %cr17 /* Clear IIASQ tail */
  59. mtctl %r0, %cr17 /* Clear IIASQ head */
  60. mtctl %r1, %cr18 /* IIAOQ head */
  61. ldo 4(%r1), %r1
  62. mtctl %r1, %cr18 /* IIAOQ tail */
  63. load32 REAL_MODE_PSW, %r1
  64. mtctl %r1, %ipsw
  65. rfi
  66. nop
  67. 1: load32 PA(cache_info), %r1
  68. /* Flush Instruction Tlb */
  69. LDREG ITLB_SID_BASE(%r1), %r20
  70. LDREG ITLB_SID_STRIDE(%r1), %r21
  71. LDREG ITLB_SID_COUNT(%r1), %r22
  72. LDREG ITLB_OFF_BASE(%r1), %arg0
  73. LDREG ITLB_OFF_STRIDE(%r1), %arg1
  74. LDREG ITLB_OFF_COUNT(%r1), %arg2
  75. LDREG ITLB_LOOP(%r1), %arg3
  76. addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
  77. movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
  78. copy %arg0, %r28 /* Init base addr */
  79. fitmanyloop: /* Loop if LOOP >= 2 */
  80. mtsp %r20, %sr1
  81. add %r21, %r20, %r20 /* increment space */
  82. copy %arg2, %r29 /* Init middle loop count */
  83. fitmanymiddle: /* Loop if LOOP >= 2 */
  84. addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
  85. pitlbe 0(%sr1, %r28)
  86. pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
  87. addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
  88. copy %arg3, %r31 /* Re-init inner loop count */
  89. movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
  90. addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
  91. fitoneloop: /* Loop if LOOP = 1 */
  92. mtsp %r20, %sr1
  93. copy %arg0, %r28 /* init base addr */
  94. copy %arg2, %r29 /* init middle loop count */
  95. fitonemiddle: /* Loop if LOOP = 1 */
  96. addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
  97. pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
  98. addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
  99. add %r21, %r20, %r20 /* increment space */
  100. fitdone:
  101. /* Flush Data Tlb */
  102. LDREG DTLB_SID_BASE(%r1), %r20
  103. LDREG DTLB_SID_STRIDE(%r1), %r21
  104. LDREG DTLB_SID_COUNT(%r1), %r22
  105. LDREG DTLB_OFF_BASE(%r1), %arg0
  106. LDREG DTLB_OFF_STRIDE(%r1), %arg1
  107. LDREG DTLB_OFF_COUNT(%r1), %arg2
  108. LDREG DTLB_LOOP(%r1), %arg3
  109. addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
  110. movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
  111. copy %arg0, %r28 /* Init base addr */
  112. fdtmanyloop: /* Loop if LOOP >= 2 */
  113. mtsp %r20, %sr1
  114. add %r21, %r20, %r20 /* increment space */
  115. copy %arg2, %r29 /* Init middle loop count */
  116. fdtmanymiddle: /* Loop if LOOP >= 2 */
  117. addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
  118. pdtlbe 0(%sr1, %r28)
  119. pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
  120. addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
  121. copy %arg3, %r31 /* Re-init inner loop count */
  122. movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
  123. addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
  124. fdtoneloop: /* Loop if LOOP = 1 */
  125. mtsp %r20, %sr1
  126. copy %arg0, %r28 /* init base addr */
  127. copy %arg2, %r29 /* init middle loop count */
  128. fdtonemiddle: /* Loop if LOOP = 1 */
  129. addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
  130. pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
  131. addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
  132. add %r21, %r20, %r20 /* increment space */
  133. fdtdone:
  134. /*
  135. * Switch back to virtual mode
  136. */
  137. /* pcxt_ssm_bug */
  138. rsm PSW_SM_I, %r0
  139. load32 2f, %r1
  140. nop
  141. nop
  142. nop
  143. nop
  144. nop
  145. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  146. mtctl %r0, %cr17 /* Clear IIASQ tail */
  147. mtctl %r0, %cr17 /* Clear IIASQ head */
  148. mtctl %r1, %cr18 /* IIAOQ head */
  149. ldo 4(%r1), %r1
  150. mtctl %r1, %cr18 /* IIAOQ tail */
  151. load32 KERNEL_PSW, %r1
  152. or %r1, %r19, %r1 /* I-bit to state on entry */
  153. mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
  154. rfi
  155. nop
  156. 2: bv %r0(%r2)
  157. nop
  158. .exit
  159. .procend
  160. ENDPROC(flush_tlb_all_local)
  161. .import cache_info,data
  162. ENTRY(flush_instruction_cache_local)
  163. .proc
  164. .callinfo NO_CALLS
  165. .entry
  166. mtsp %r0, %sr1
  167. load32 cache_info, %r1
  168. /* Flush Instruction Cache */
  169. LDREG ICACHE_BASE(%r1), %arg0
  170. LDREG ICACHE_STRIDE(%r1), %arg1
  171. LDREG ICACHE_COUNT(%r1), %arg2
  172. LDREG ICACHE_LOOP(%r1), %arg3
  173. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  174. addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
  175. movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
  176. fimanyloop: /* Loop if LOOP >= 2 */
  177. addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
  178. fice %r0(%sr1, %arg0)
  179. fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
  180. movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
  181. addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
  182. fioneloop: /* Loop if LOOP = 1 */
  183. addib,COND(>) -1, %arg2, fioneloop /* Outer loop count decr */
  184. fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
  185. fisync:
  186. sync
  187. mtsm %r22 /* restore I-bit */
  188. bv %r0(%r2)
  189. nop
  190. .exit
  191. .procend
  192. ENDPROC(flush_instruction_cache_local)
  193. .import cache_info, data
  194. ENTRY(flush_data_cache_local)
  195. .proc
  196. .callinfo NO_CALLS
  197. .entry
  198. mtsp %r0, %sr1
  199. load32 cache_info, %r1
  200. /* Flush Data Cache */
  201. LDREG DCACHE_BASE(%r1), %arg0
  202. LDREG DCACHE_STRIDE(%r1), %arg1
  203. LDREG DCACHE_COUNT(%r1), %arg2
  204. LDREG DCACHE_LOOP(%r1), %arg3
  205. rsm PSW_SM_I, %r22
  206. addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
  207. movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
  208. fdmanyloop: /* Loop if LOOP >= 2 */
  209. addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
  210. fdce %r0(%sr1, %arg0)
  211. fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
  212. movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
  213. addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
  214. fdoneloop: /* Loop if LOOP = 1 */
  215. addib,COND(>) -1, %arg2, fdoneloop /* Outer loop count decr */
  216. fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
  217. fdsync:
  218. syncdma
  219. sync
  220. mtsm %r22 /* restore I-bit */
  221. bv %r0(%r2)
  222. nop
  223. .exit
  224. .procend
  225. ENDPROC(flush_data_cache_local)
  226. .align 16
  227. ENTRY(copy_user_page_asm)
  228. .proc
  229. .callinfo NO_CALLS
  230. .entry
  231. #ifdef CONFIG_64BIT
  232. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  233. * Unroll the loop by hand and arrange insn appropriately.
  234. * GCC probably can do this just as well.
  235. */
  236. ldd 0(%r25), %r19
  237. ldi (PAGE_SIZE / 128), %r1
  238. ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
  239. ldw 128(%r25), %r0 /* prefetch 2 */
  240. 1: ldd 8(%r25), %r20
  241. ldw 192(%r25), %r0 /* prefetch 3 */
  242. ldw 256(%r25), %r0 /* prefetch 4 */
  243. ldd 16(%r25), %r21
  244. ldd 24(%r25), %r22
  245. std %r19, 0(%r26)
  246. std %r20, 8(%r26)
  247. ldd 32(%r25), %r19
  248. ldd 40(%r25), %r20
  249. std %r21, 16(%r26)
  250. std %r22, 24(%r26)
  251. ldd 48(%r25), %r21
  252. ldd 56(%r25), %r22
  253. std %r19, 32(%r26)
  254. std %r20, 40(%r26)
  255. ldd 64(%r25), %r19
  256. ldd 72(%r25), %r20
  257. std %r21, 48(%r26)
  258. std %r22, 56(%r26)
  259. ldd 80(%r25), %r21
  260. ldd 88(%r25), %r22
  261. std %r19, 64(%r26)
  262. std %r20, 72(%r26)
  263. ldd 96(%r25), %r19
  264. ldd 104(%r25), %r20
  265. std %r21, 80(%r26)
  266. std %r22, 88(%r26)
  267. ldd 112(%r25), %r21
  268. ldd 120(%r25), %r22
  269. std %r19, 96(%r26)
  270. std %r20, 104(%r26)
  271. ldo 128(%r25), %r25
  272. std %r21, 112(%r26)
  273. std %r22, 120(%r26)
  274. ldo 128(%r26), %r26
  275. /* conditional branches nullify on forward taken branch, and on
  276. * non-taken backward branch. Note that .+4 is a backwards branch.
  277. * The ldd should only get executed if the branch is taken.
  278. */
  279. addib,COND(>),n -1, %r1, 1b /* bundle 10 */
  280. ldd 0(%r25), %r19 /* start next loads */
  281. #else
  282. /*
  283. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  284. * bundles (very restricted rules for bundling).
  285. * Note that until (if) we start saving
  286. * the full 64 bit register values on interrupt, we can't
  287. * use ldd/std on a 32 bit kernel.
  288. */
  289. ldw 0(%r25), %r19
  290. ldi (PAGE_SIZE / 64), %r1
  291. 1:
  292. ldw 4(%r25), %r20
  293. ldw 8(%r25), %r21
  294. ldw 12(%r25), %r22
  295. stw %r19, 0(%r26)
  296. stw %r20, 4(%r26)
  297. stw %r21, 8(%r26)
  298. stw %r22, 12(%r26)
  299. ldw 16(%r25), %r19
  300. ldw 20(%r25), %r20
  301. ldw 24(%r25), %r21
  302. ldw 28(%r25), %r22
  303. stw %r19, 16(%r26)
  304. stw %r20, 20(%r26)
  305. stw %r21, 24(%r26)
  306. stw %r22, 28(%r26)
  307. ldw 32(%r25), %r19
  308. ldw 36(%r25), %r20
  309. ldw 40(%r25), %r21
  310. ldw 44(%r25), %r22
  311. stw %r19, 32(%r26)
  312. stw %r20, 36(%r26)
  313. stw %r21, 40(%r26)
  314. stw %r22, 44(%r26)
  315. ldw 48(%r25), %r19
  316. ldw 52(%r25), %r20
  317. ldw 56(%r25), %r21
  318. ldw 60(%r25), %r22
  319. stw %r19, 48(%r26)
  320. stw %r20, 52(%r26)
  321. ldo 64(%r25), %r25
  322. stw %r21, 56(%r26)
  323. stw %r22, 60(%r26)
  324. ldo 64(%r26), %r26
  325. addib,COND(>),n -1, %r1, 1b
  326. ldw 0(%r25), %r19
  327. #endif
  328. bv %r0(%r2)
  329. nop
  330. .exit
  331. .procend
  332. ENDPROC(copy_user_page_asm)
  333. /*
  334. * NOTE: Code in clear_user_page has a hard coded dependency on the
  335. * maximum alias boundary being 4 Mb. We've been assured by the
  336. * parisc chip designers that there will not ever be a parisc
  337. * chip with a larger alias boundary (Never say never :-) ).
  338. *
  339. * Subtle: the dtlb miss handlers support the temp alias region by
  340. * "knowing" that if a dtlb miss happens within the temp alias
  341. * region it must have occurred while in clear_user_page. Since
  342. * this routine makes use of processor local translations, we
  343. * don't want to insert them into the kernel page table. Instead,
  344. * we load up some general registers (they need to be registers
  345. * which aren't shadowed) with the physical page numbers (preshifted
  346. * for tlb insertion) needed to insert the translations. When we
  347. * miss on the translation, the dtlb miss handler inserts the
  348. * translation into the tlb using these values:
  349. *
  350. * %r26 physical page (shifted for tlb insert) of "to" translation
  351. * %r23 physical page (shifted for tlb insert) of "from" translation
  352. */
  353. #if 0
  354. /*
  355. * We can't do this since copy_user_page is used to bring in
  356. * file data that might have instructions. Since the data would
  357. * then need to be flushed out so the i-fetch can see it, it
  358. * makes more sense to just copy through the kernel translation
  359. * and flush it.
  360. *
  361. * I'm still keeping this around because it may be possible to
  362. * use it if more information is passed into copy_user_page().
  363. * Have to do some measurements to see if it is worthwhile to
  364. * lobby for such a change.
  365. */
  366. ENTRY(copy_user_page_asm)
  367. .proc
  368. .callinfo NO_CALLS
  369. .entry
  370. ldil L%(__PAGE_OFFSET), %r1
  371. sub %r26, %r1, %r26
  372. sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
  373. ldil L%(TMPALIAS_MAP_START), %r28
  374. /* FIXME for different page sizes != 4k */
  375. #ifdef CONFIG_64BIT
  376. extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
  377. extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
  378. depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
  379. depdi 0, 63,12, %r28 /* Clear any offset bits */
  380. copy %r28, %r29
  381. depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
  382. #else
  383. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  384. extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
  385. depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
  386. depwi 0, 31,12, %r28 /* Clear any offset bits */
  387. copy %r28, %r29
  388. depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
  389. #endif
  390. /* Purge any old translations */
  391. pdtlb 0(%r28)
  392. pdtlb 0(%r29)
  393. ldi 64, %r1
  394. /*
  395. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  396. * bundles (very restricted rules for bundling). It probably
  397. * does OK on PCXU and better, but we could do better with
  398. * ldd/std instructions. Note that until (if) we start saving
  399. * the full 64 bit register values on interrupt, we can't
  400. * use ldd/std on a 32 bit kernel.
  401. */
  402. 1:
  403. ldw 0(%r29), %r19
  404. ldw 4(%r29), %r20
  405. ldw 8(%r29), %r21
  406. ldw 12(%r29), %r22
  407. stw %r19, 0(%r28)
  408. stw %r20, 4(%r28)
  409. stw %r21, 8(%r28)
  410. stw %r22, 12(%r28)
  411. ldw 16(%r29), %r19
  412. ldw 20(%r29), %r20
  413. ldw 24(%r29), %r21
  414. ldw 28(%r29), %r22
  415. stw %r19, 16(%r28)
  416. stw %r20, 20(%r28)
  417. stw %r21, 24(%r28)
  418. stw %r22, 28(%r28)
  419. ldw 32(%r29), %r19
  420. ldw 36(%r29), %r20
  421. ldw 40(%r29), %r21
  422. ldw 44(%r29), %r22
  423. stw %r19, 32(%r28)
  424. stw %r20, 36(%r28)
  425. stw %r21, 40(%r28)
  426. stw %r22, 44(%r28)
  427. ldw 48(%r29), %r19
  428. ldw 52(%r29), %r20
  429. ldw 56(%r29), %r21
  430. ldw 60(%r29), %r22
  431. stw %r19, 48(%r28)
  432. stw %r20, 52(%r28)
  433. stw %r21, 56(%r28)
  434. stw %r22, 60(%r28)
  435. ldo 64(%r28), %r28
  436. addib,COND(>) -1, %r1,1b
  437. ldo 64(%r29), %r29
  438. bv %r0(%r2)
  439. nop
  440. .exit
  441. .procend
  442. ENDPROC(copy_user_page_asm)
  443. #endif
  444. ENTRY(__clear_user_page_asm)
  445. .proc
  446. .callinfo NO_CALLS
  447. .entry
  448. tophys_r1 %r26
  449. ldil L%(TMPALIAS_MAP_START), %r28
  450. #ifdef CONFIG_64BIT
  451. #if (TMPALIAS_MAP_START >= 0x80000000)
  452. depdi 0, 31,32, %r28 /* clear any sign extension */
  453. /* FIXME: page size dependend */
  454. #endif
  455. extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
  456. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  457. depdi 0, 63,12, %r28 /* Clear any offset bits */
  458. #else
  459. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  460. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  461. depwi 0, 31,12, %r28 /* Clear any offset bits */
  462. #endif
  463. /* Purge any old translation */
  464. pdtlb 0(%r28)
  465. #ifdef CONFIG_64BIT
  466. ldi (PAGE_SIZE / 128), %r1
  467. /* PREFETCH (Write) has not (yet) been proven to help here */
  468. /* #define PREFETCHW_OP ldd 256(%0), %r0 */
  469. 1: std %r0, 0(%r28)
  470. std %r0, 8(%r28)
  471. std %r0, 16(%r28)
  472. std %r0, 24(%r28)
  473. std %r0, 32(%r28)
  474. std %r0, 40(%r28)
  475. std %r0, 48(%r28)
  476. std %r0, 56(%r28)
  477. std %r0, 64(%r28)
  478. std %r0, 72(%r28)
  479. std %r0, 80(%r28)
  480. std %r0, 88(%r28)
  481. std %r0, 96(%r28)
  482. std %r0, 104(%r28)
  483. std %r0, 112(%r28)
  484. std %r0, 120(%r28)
  485. addib,COND(>) -1, %r1, 1b
  486. ldo 128(%r28), %r28
  487. #else /* ! CONFIG_64BIT */
  488. ldi (PAGE_SIZE / 64), %r1
  489. 1:
  490. stw %r0, 0(%r28)
  491. stw %r0, 4(%r28)
  492. stw %r0, 8(%r28)
  493. stw %r0, 12(%r28)
  494. stw %r0, 16(%r28)
  495. stw %r0, 20(%r28)
  496. stw %r0, 24(%r28)
  497. stw %r0, 28(%r28)
  498. stw %r0, 32(%r28)
  499. stw %r0, 36(%r28)
  500. stw %r0, 40(%r28)
  501. stw %r0, 44(%r28)
  502. stw %r0, 48(%r28)
  503. stw %r0, 52(%r28)
  504. stw %r0, 56(%r28)
  505. stw %r0, 60(%r28)
  506. addib,COND(>) -1, %r1, 1b
  507. ldo 64(%r28), %r28
  508. #endif /* CONFIG_64BIT */
  509. bv %r0(%r2)
  510. nop
  511. .exit
  512. .procend
  513. ENDPROC(__clear_user_page_asm)
  514. ENTRY(flush_kernel_dcache_page_asm)
  515. .proc
  516. .callinfo NO_CALLS
  517. .entry
  518. ldil L%dcache_stride, %r1
  519. ldw R%dcache_stride(%r1), %r23
  520. #ifdef CONFIG_64BIT
  521. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  522. #else
  523. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  524. #endif
  525. add %r26, %r25, %r25
  526. sub %r25, %r23, %r25
  527. 1: fdc,m %r23(%r26)
  528. fdc,m %r23(%r26)
  529. fdc,m %r23(%r26)
  530. fdc,m %r23(%r26)
  531. fdc,m %r23(%r26)
  532. fdc,m %r23(%r26)
  533. fdc,m %r23(%r26)
  534. fdc,m %r23(%r26)
  535. fdc,m %r23(%r26)
  536. fdc,m %r23(%r26)
  537. fdc,m %r23(%r26)
  538. fdc,m %r23(%r26)
  539. fdc,m %r23(%r26)
  540. fdc,m %r23(%r26)
  541. fdc,m %r23(%r26)
  542. cmpb,COND(<<) %r26, %r25,1b
  543. fdc,m %r23(%r26)
  544. sync
  545. bv %r0(%r2)
  546. nop
  547. .exit
  548. .procend
  549. ENDPROC(flush_kernel_dcache_page_asm)
  550. ENTRY(flush_user_dcache_page)
  551. .proc
  552. .callinfo NO_CALLS
  553. .entry
  554. ldil L%dcache_stride, %r1
  555. ldw R%dcache_stride(%r1), %r23
  556. #ifdef CONFIG_64BIT
  557. depdi,z 1,63-PAGE_SHIFT,1, %r25
  558. #else
  559. depwi,z 1,31-PAGE_SHIFT,1, %r25
  560. #endif
  561. add %r26, %r25, %r25
  562. sub %r25, %r23, %r25
  563. 1: fdc,m %r23(%sr3, %r26)
  564. fdc,m %r23(%sr3, %r26)
  565. fdc,m %r23(%sr3, %r26)
  566. fdc,m %r23(%sr3, %r26)
  567. fdc,m %r23(%sr3, %r26)
  568. fdc,m %r23(%sr3, %r26)
  569. fdc,m %r23(%sr3, %r26)
  570. fdc,m %r23(%sr3, %r26)
  571. fdc,m %r23(%sr3, %r26)
  572. fdc,m %r23(%sr3, %r26)
  573. fdc,m %r23(%sr3, %r26)
  574. fdc,m %r23(%sr3, %r26)
  575. fdc,m %r23(%sr3, %r26)
  576. fdc,m %r23(%sr3, %r26)
  577. fdc,m %r23(%sr3, %r26)
  578. cmpb,COND(<<) %r26, %r25,1b
  579. fdc,m %r23(%sr3, %r26)
  580. sync
  581. bv %r0(%r2)
  582. nop
  583. .exit
  584. .procend
  585. ENDPROC(flush_user_dcache_page)
  586. ENTRY(flush_user_icache_page)
  587. .proc
  588. .callinfo NO_CALLS
  589. .entry
  590. ldil L%dcache_stride, %r1
  591. ldw R%dcache_stride(%r1), %r23
  592. #ifdef CONFIG_64BIT
  593. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  594. #else
  595. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  596. #endif
  597. add %r26, %r25, %r25
  598. sub %r25, %r23, %r25
  599. 1: fic,m %r23(%sr3, %r26)
  600. fic,m %r23(%sr3, %r26)
  601. fic,m %r23(%sr3, %r26)
  602. fic,m %r23(%sr3, %r26)
  603. fic,m %r23(%sr3, %r26)
  604. fic,m %r23(%sr3, %r26)
  605. fic,m %r23(%sr3, %r26)
  606. fic,m %r23(%sr3, %r26)
  607. fic,m %r23(%sr3, %r26)
  608. fic,m %r23(%sr3, %r26)
  609. fic,m %r23(%sr3, %r26)
  610. fic,m %r23(%sr3, %r26)
  611. fic,m %r23(%sr3, %r26)
  612. fic,m %r23(%sr3, %r26)
  613. fic,m %r23(%sr3, %r26)
  614. cmpb,COND(<<) %r26, %r25,1b
  615. fic,m %r23(%sr3, %r26)
  616. sync
  617. bv %r0(%r2)
  618. nop
  619. .exit
  620. .procend
  621. ENDPROC(flush_user_icache_page)
  622. ENTRY(purge_kernel_dcache_page)
  623. .proc
  624. .callinfo NO_CALLS
  625. .entry
  626. ldil L%dcache_stride, %r1
  627. ldw R%dcache_stride(%r1), %r23
  628. #ifdef CONFIG_64BIT
  629. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  630. #else
  631. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  632. #endif
  633. add %r26, %r25, %r25
  634. sub %r25, %r23, %r25
  635. 1: pdc,m %r23(%r26)
  636. pdc,m %r23(%r26)
  637. pdc,m %r23(%r26)
  638. pdc,m %r23(%r26)
  639. pdc,m %r23(%r26)
  640. pdc,m %r23(%r26)
  641. pdc,m %r23(%r26)
  642. pdc,m %r23(%r26)
  643. pdc,m %r23(%r26)
  644. pdc,m %r23(%r26)
  645. pdc,m %r23(%r26)
  646. pdc,m %r23(%r26)
  647. pdc,m %r23(%r26)
  648. pdc,m %r23(%r26)
  649. pdc,m %r23(%r26)
  650. cmpb,COND(<<) %r26, %r25, 1b
  651. pdc,m %r23(%r26)
  652. sync
  653. bv %r0(%r2)
  654. nop
  655. .exit
  656. .procend
  657. ENDPROC(purge_kernel_dcache_page)
  658. #if 0
  659. /* Currently not used, but it still is a possible alternate
  660. * solution.
  661. */
  662. ENTRY(flush_alias_page)
  663. .proc
  664. .callinfo NO_CALLS
  665. .entry
  666. tophys_r1 %r26
  667. ldil L%(TMPALIAS_MAP_START), %r28
  668. #ifdef CONFIG_64BIT
  669. extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
  670. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  671. depdi 0, 63,12, %r28 /* Clear any offset bits */
  672. #else
  673. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  674. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  675. depwi 0, 31,12, %r28 /* Clear any offset bits */
  676. #endif
  677. /* Purge any old translation */
  678. pdtlb 0(%r28)
  679. ldil L%dcache_stride, %r1
  680. ldw R%dcache_stride(%r1), %r23
  681. #ifdef CONFIG_64BIT
  682. depdi,z 1, 63-PAGE_SHIFT,1, %r29
  683. #else
  684. depwi,z 1, 31-PAGE_SHIFT,1, %r29
  685. #endif
  686. add %r28, %r29, %r29
  687. sub %r29, %r23, %r29
  688. 1: fdc,m %r23(%r28)
  689. fdc,m %r23(%r28)
  690. fdc,m %r23(%r28)
  691. fdc,m %r23(%r28)
  692. fdc,m %r23(%r28)
  693. fdc,m %r23(%r28)
  694. fdc,m %r23(%r28)
  695. fdc,m %r23(%r28)
  696. fdc,m %r23(%r28)
  697. fdc,m %r23(%r28)
  698. fdc,m %r23(%r28)
  699. fdc,m %r23(%r28)
  700. fdc,m %r23(%r28)
  701. fdc,m %r23(%r28)
  702. fdc,m %r23(%r28)
  703. cmpb,COND(<<) %r28, %r29, 1b
  704. fdc,m %r23(%r28)
  705. sync
  706. bv %r0(%r2)
  707. nop
  708. .exit
  709. .procend
  710. #endif
  711. .export flush_user_dcache_range_asm
  712. flush_user_dcache_range_asm:
  713. .proc
  714. .callinfo NO_CALLS
  715. .entry
  716. ldil L%dcache_stride, %r1
  717. ldw R%dcache_stride(%r1), %r23
  718. ldo -1(%r23), %r21
  719. ANDCM %r26, %r21, %r26
  720. 1: cmpb,COND(<<),n %r26, %r25, 1b
  721. fdc,m %r23(%sr3, %r26)
  722. sync
  723. bv %r0(%r2)
  724. nop
  725. .exit
  726. .procend
  727. ENDPROC(flush_alias_page)
  728. ENTRY(flush_kernel_dcache_range_asm)
  729. .proc
  730. .callinfo NO_CALLS
  731. .entry
  732. ldil L%dcache_stride, %r1
  733. ldw R%dcache_stride(%r1), %r23
  734. ldo -1(%r23), %r21
  735. ANDCM %r26, %r21, %r26
  736. 1: cmpb,COND(<<),n %r26, %r25,1b
  737. fdc,m %r23(%r26)
  738. sync
  739. syncdma
  740. bv %r0(%r2)
  741. nop
  742. .exit
  743. .procend
  744. ENDPROC(flush_kernel_dcache_range_asm)
  745. ENTRY(flush_user_icache_range_asm)
  746. .proc
  747. .callinfo NO_CALLS
  748. .entry
  749. ldil L%icache_stride, %r1
  750. ldw R%icache_stride(%r1), %r23
  751. ldo -1(%r23), %r21
  752. ANDCM %r26, %r21, %r26
  753. 1: cmpb,COND(<<),n %r26, %r25,1b
  754. fic,m %r23(%sr3, %r26)
  755. sync
  756. bv %r0(%r2)
  757. nop
  758. .exit
  759. .procend
  760. ENDPROC(flush_user_icache_range_asm)
  761. ENTRY(flush_kernel_icache_page)
  762. .proc
  763. .callinfo NO_CALLS
  764. .entry
  765. ldil L%icache_stride, %r1
  766. ldw R%icache_stride(%r1), %r23
  767. #ifdef CONFIG_64BIT
  768. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  769. #else
  770. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  771. #endif
  772. add %r26, %r25, %r25
  773. sub %r25, %r23, %r25
  774. 1: fic,m %r23(%sr4, %r26)
  775. fic,m %r23(%sr4, %r26)
  776. fic,m %r23(%sr4, %r26)
  777. fic,m %r23(%sr4, %r26)
  778. fic,m %r23(%sr4, %r26)
  779. fic,m %r23(%sr4, %r26)
  780. fic,m %r23(%sr4, %r26)
  781. fic,m %r23(%sr4, %r26)
  782. fic,m %r23(%sr4, %r26)
  783. fic,m %r23(%sr4, %r26)
  784. fic,m %r23(%sr4, %r26)
  785. fic,m %r23(%sr4, %r26)
  786. fic,m %r23(%sr4, %r26)
  787. fic,m %r23(%sr4, %r26)
  788. fic,m %r23(%sr4, %r26)
  789. cmpb,COND(<<) %r26, %r25, 1b
  790. fic,m %r23(%sr4, %r26)
  791. sync
  792. bv %r0(%r2)
  793. nop
  794. .exit
  795. .procend
  796. ENDPROC(flush_kernel_icache_page)
  797. ENTRY(flush_kernel_icache_range_asm)
  798. .proc
  799. .callinfo NO_CALLS
  800. .entry
  801. ldil L%icache_stride, %r1
  802. ldw R%icache_stride(%r1), %r23
  803. ldo -1(%r23), %r21
  804. ANDCM %r26, %r21, %r26
  805. 1: cmpb,COND(<<),n %r26, %r25, 1b
  806. fic,m %r23(%sr4, %r26)
  807. sync
  808. bv %r0(%r2)
  809. nop
  810. .exit
  811. .procend
  812. ENDPROC(flush_kernel_icache_range_asm)
  813. /* align should cover use of rfi in disable_sr_hashing_asm and
  814. * srdis_done.
  815. */
  816. .align 256
  817. ENTRY(disable_sr_hashing_asm)
  818. .proc
  819. .callinfo NO_CALLS
  820. .entry
  821. /*
  822. * Switch to real mode
  823. */
  824. /* pcxt_ssm_bug */
  825. rsm PSW_SM_I, %r0
  826. load32 PA(1f), %r1
  827. nop
  828. nop
  829. nop
  830. nop
  831. nop
  832. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  833. mtctl %r0, %cr17 /* Clear IIASQ tail */
  834. mtctl %r0, %cr17 /* Clear IIASQ head */
  835. mtctl %r1, %cr18 /* IIAOQ head */
  836. ldo 4(%r1), %r1
  837. mtctl %r1, %cr18 /* IIAOQ tail */
  838. load32 REAL_MODE_PSW, %r1
  839. mtctl %r1, %ipsw
  840. rfi
  841. nop
  842. 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
  843. cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
  844. cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
  845. b,n srdis_done
  846. srdis_pcxs:
  847. /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
  848. .word 0x141c1a00 /* mfdiag %dr0, %r28 */
  849. .word 0x141c1a00 /* must issue twice */
  850. depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
  851. depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
  852. .word 0x141c1600 /* mtdiag %r28, %dr0 */
  853. .word 0x141c1600 /* must issue twice */
  854. b,n srdis_done
  855. srdis_pcxl:
  856. /* Disable Space Register Hashing for PCXL */
  857. .word 0x141c0600 /* mfdiag %dr0, %r28 */
  858. depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
  859. .word 0x141c0240 /* mtdiag %r28, %dr0 */
  860. b,n srdis_done
  861. srdis_pa20:
  862. /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
  863. .word 0x144008bc /* mfdiag %dr2, %r28 */
  864. depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
  865. .word 0x145c1840 /* mtdiag %r28, %dr2 */
  866. srdis_done:
  867. /* Switch back to virtual mode */
  868. rsm PSW_SM_I, %r0 /* prep to load iia queue */
  869. load32 2f, %r1
  870. nop
  871. nop
  872. nop
  873. nop
  874. nop
  875. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  876. mtctl %r0, %cr17 /* Clear IIASQ tail */
  877. mtctl %r0, %cr17 /* Clear IIASQ head */
  878. mtctl %r1, %cr18 /* IIAOQ head */
  879. ldo 4(%r1), %r1
  880. mtctl %r1, %cr18 /* IIAOQ tail */
  881. load32 KERNEL_PSW, %r1
  882. mtctl %r1, %ipsw
  883. rfi
  884. nop
  885. 2: bv %r0(%r2)
  886. nop
  887. .exit
  888. .procend
  889. ENDPROC(disable_sr_hashing_asm)
  890. .end