ultra.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
  2. * ultra.S: Don't expand these all over the place...
  3. *
  4. * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/config.h>
  7. #include <asm/asi.h>
  8. #include <asm/pgtable.h>
  9. #include <asm/page.h>
  10. #include <asm/spitfire.h>
  11. #include <asm/mmu_context.h>
  12. #include <asm/mmu.h>
  13. #include <asm/pil.h>
  14. #include <asm/head.h>
  15. #include <asm/thread_info.h>
  16. #include <asm/cacheflush.h>
  17. /* Basically, most of the Spitfire vs. Cheetah madness
  18. * has to do with the fact that Cheetah does not support
  19. * IMMU flushes out of the secondary context. Someone needs
  20. * to throw a south lake birthday party for the folks
  21. * in Microelectronics who refused to fix this shit.
  22. */
  23. /* This file is meant to be read efficiently by the CPU, not humans.
  24. * Staraj sie tego nikomu nie pierdolnac...
  25. */
  26. .text
  27. .align 32
  28. .globl __flush_tlb_mm
  29. __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  30. ldxa [%o1] ASI_DMMU, %g2
  31. cmp %g2, %o0
  32. bne,pn %icc, __spitfire_flush_tlb_mm_slow
  33. mov 0x50, %g3
  34. stxa %g0, [%g3] ASI_DMMU_DEMAP
  35. stxa %g0, [%g3] ASI_IMMU_DEMAP
  36. sethi %hi(KERNBASE), %g3
  37. flush %g3
  38. retl
  39. nop
  40. nop
  41. nop
  42. nop
  43. nop
  44. nop
  45. nop
  46. nop
  47. nop
  48. nop
  49. .align 32
  50. .globl __flush_tlb_pending
  51. __flush_tlb_pending:
  52. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  53. rdpr %pstate, %g7
  54. sllx %o1, 3, %o1
  55. andn %g7, PSTATE_IE, %g2
  56. wrpr %g2, %pstate
  57. mov SECONDARY_CONTEXT, %o4
  58. ldxa [%o4] ASI_DMMU, %g2
  59. stxa %o0, [%o4] ASI_DMMU
  60. 1: sub %o1, (1 << 3), %o1
  61. ldx [%o2 + %o1], %o3
  62. andcc %o3, 1, %g0
  63. andn %o3, 1, %o3
  64. be,pn %icc, 2f
  65. or %o3, 0x10, %o3
  66. stxa %g0, [%o3] ASI_IMMU_DEMAP
  67. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  68. membar #Sync
  69. brnz,pt %o1, 1b
  70. nop
  71. stxa %g2, [%o4] ASI_DMMU
  72. sethi %hi(KERNBASE), %o4
  73. flush %o4
  74. retl
  75. wrpr %g7, 0x0, %pstate
  76. nop
  77. nop
  78. nop
  79. nop
  80. .align 32
  81. .globl __flush_tlb_kernel_range
  82. __flush_tlb_kernel_range: /* %o0=start, %o1=end */
  83. cmp %o0, %o1
  84. be,pn %xcc, 2f
  85. sethi %hi(PAGE_SIZE), %o4
  86. sub %o1, %o0, %o3
  87. sub %o3, %o4, %o3
  88. or %o0, 0x20, %o0 ! Nucleus
  89. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  90. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  91. membar #Sync
  92. brnz,pt %o3, 1b
  93. sub %o3, %o4, %o3
  94. 2: sethi %hi(KERNBASE), %o3
  95. flush %o3
  96. retl
  97. nop
  98. __spitfire_flush_tlb_mm_slow:
  99. rdpr %pstate, %g1
  100. wrpr %g1, PSTATE_IE, %pstate
  101. stxa %o0, [%o1] ASI_DMMU
  102. stxa %g0, [%g3] ASI_DMMU_DEMAP
  103. stxa %g0, [%g3] ASI_IMMU_DEMAP
  104. flush %g6
  105. stxa %g2, [%o1] ASI_DMMU
  106. sethi %hi(KERNBASE), %o1
  107. flush %o1
  108. retl
  109. wrpr %g1, 0, %pstate
  110. /*
  111. * The following code flushes one page_size worth.
  112. */
  113. #if (PAGE_SHIFT == 13)
  114. #define ITAG_MASK 0xfe
  115. #elif (PAGE_SHIFT == 16)
  116. #define ITAG_MASK 0x7fe
  117. #else
  118. #error unsupported PAGE_SIZE
  119. #endif
  120. .section .kprobes.text, "ax"
  121. .align 32
  122. .globl __flush_icache_page
  123. __flush_icache_page: /* %o0 = phys_page */
  124. membar #StoreStore
  125. srlx %o0, PAGE_SHIFT, %o0
  126. sethi %uhi(PAGE_OFFSET), %g1
  127. sllx %o0, PAGE_SHIFT, %o0
  128. sethi %hi(PAGE_SIZE), %g2
  129. sllx %g1, 32, %g1
  130. add %o0, %g1, %o0
  131. 1: subcc %g2, 32, %g2
  132. bne,pt %icc, 1b
  133. flush %o0 + %g2
  134. retl
  135. nop
  136. #ifdef DCACHE_ALIASING_POSSIBLE
  137. #if (PAGE_SHIFT != 13)
  138. #error only page shift of 13 is supported by dcache flush
  139. #endif
  140. #define DTAG_MASK 0x3
  141. /* This routine is Spitfire specific so the hardcoded
  142. * D-cache size and line-size are OK.
  143. */
  144. .align 64
  145. .globl __flush_dcache_page
  146. __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
  147. sethi %uhi(PAGE_OFFSET), %g1
  148. sllx %g1, 32, %g1
  149. sub %o0, %g1, %o0 ! physical address
  150. srlx %o0, 11, %o0 ! make D-cache TAG
  151. sethi %hi(1 << 14), %o2 ! D-cache size
  152. sub %o2, (1 << 5), %o2 ! D-cache line size
  153. 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
  154. andcc %o3, DTAG_MASK, %g0 ! Valid?
  155. be,pn %xcc, 2f ! Nope, branch
  156. andn %o3, DTAG_MASK, %o3 ! Clear valid bits
  157. cmp %o3, %o0 ! TAG match?
  158. bne,pt %xcc, 2f ! Nope, branch
  159. nop
  160. stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
  161. membar #Sync
  162. 2: brnz,pt %o2, 1b
  163. sub %o2, (1 << 5), %o2 ! D-cache line size
  164. /* The I-cache does not snoop local stores so we
  165. * better flush that too when necessary.
  166. */
  167. brnz,pt %o1, __flush_icache_page
  168. sllx %o0, 11, %o0
  169. retl
  170. nop
  171. #endif /* DCACHE_ALIASING_POSSIBLE */
  172. .previous
  173. /* Cheetah specific versions, patched at boot time. */
  174. __cheetah_flush_tlb_mm: /* 19 insns */
  175. rdpr %pstate, %g7
  176. andn %g7, PSTATE_IE, %g2
  177. wrpr %g2, 0x0, %pstate
  178. wrpr %g0, 1, %tl
  179. mov PRIMARY_CONTEXT, %o2
  180. mov 0x40, %g3
  181. ldxa [%o2] ASI_DMMU, %g2
  182. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
  183. sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
  184. or %o0, %o1, %o0 /* Preserve nucleus page size fields */
  185. stxa %o0, [%o2] ASI_DMMU
  186. stxa %g0, [%g3] ASI_DMMU_DEMAP
  187. stxa %g0, [%g3] ASI_IMMU_DEMAP
  188. stxa %g2, [%o2] ASI_DMMU
  189. sethi %hi(KERNBASE), %o2
  190. flush %o2
  191. wrpr %g0, 0, %tl
  192. retl
  193. wrpr %g7, 0x0, %pstate
  194. __cheetah_flush_tlb_pending: /* 27 insns */
  195. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  196. rdpr %pstate, %g7
  197. sllx %o1, 3, %o1
  198. andn %g7, PSTATE_IE, %g2
  199. wrpr %g2, 0x0, %pstate
  200. wrpr %g0, 1, %tl
  201. mov PRIMARY_CONTEXT, %o4
  202. ldxa [%o4] ASI_DMMU, %g2
  203. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  204. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  205. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  206. stxa %o0, [%o4] ASI_DMMU
  207. 1: sub %o1, (1 << 3), %o1
  208. ldx [%o2 + %o1], %o3
  209. andcc %o3, 1, %g0
  210. be,pn %icc, 2f
  211. andn %o3, 1, %o3
  212. stxa %g0, [%o3] ASI_IMMU_DEMAP
  213. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  214. membar #Sync
  215. brnz,pt %o1, 1b
  216. nop
  217. stxa %g2, [%o4] ASI_DMMU
  218. sethi %hi(KERNBASE), %o4
  219. flush %o4
  220. wrpr %g0, 0, %tl
  221. retl
  222. wrpr %g7, 0x0, %pstate
  223. #ifdef DCACHE_ALIASING_POSSIBLE
  224. __cheetah_flush_dcache_page: /* 11 insns */
  225. sethi %uhi(PAGE_OFFSET), %g1
  226. sllx %g1, 32, %g1
  227. sub %o0, %g1, %o0
  228. sethi %hi(PAGE_SIZE), %o4
  229. 1: subcc %o4, (1 << 5), %o4
  230. stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
  231. membar #Sync
  232. bne,pt %icc, 1b
  233. nop
  234. retl /* I-cache flush never needed on Cheetah, see callers. */
  235. nop
  236. #endif /* DCACHE_ALIASING_POSSIBLE */
  237. cheetah_patch_one:
  238. 1: lduw [%o1], %g1
  239. stw %g1, [%o0]
  240. flush %o0
  241. subcc %o2, 1, %o2
  242. add %o1, 4, %o1
  243. bne,pt %icc, 1b
  244. add %o0, 4, %o0
  245. retl
  246. nop
  247. .globl cheetah_patch_cachetlbops
  248. cheetah_patch_cachetlbops:
  249. save %sp, -128, %sp
  250. sethi %hi(__flush_tlb_mm), %o0
  251. or %o0, %lo(__flush_tlb_mm), %o0
  252. sethi %hi(__cheetah_flush_tlb_mm), %o1
  253. or %o1, %lo(__cheetah_flush_tlb_mm), %o1
  254. call cheetah_patch_one
  255. mov 19, %o2
  256. sethi %hi(__flush_tlb_pending), %o0
  257. or %o0, %lo(__flush_tlb_pending), %o0
  258. sethi %hi(__cheetah_flush_tlb_pending), %o1
  259. or %o1, %lo(__cheetah_flush_tlb_pending), %o1
  260. call cheetah_patch_one
  261. mov 27, %o2
  262. #ifdef DCACHE_ALIASING_POSSIBLE
  263. sethi %hi(__flush_dcache_page), %o0
  264. or %o0, %lo(__flush_dcache_page), %o0
  265. sethi %hi(__cheetah_flush_dcache_page), %o1
  266. or %o1, %lo(__cheetah_flush_dcache_page), %o1
  267. call cheetah_patch_one
  268. mov 11, %o2
  269. #endif /* DCACHE_ALIASING_POSSIBLE */
  270. ret
  271. restore
  272. #ifdef CONFIG_SMP
  273. /* These are all called by the slaves of a cross call, at
  274. * trap level 1, with interrupts fully disabled.
  275. *
  276. * Register usage:
  277. * %g5 mm->context (all tlb flushes)
  278. * %g1 address arg 1 (tlb page and range flushes)
  279. * %g7 address arg 2 (tlb range flush only)
  280. *
  281. * %g6 scratch 1
  282. * %g2 scratch 2
  283. * %g3 scratch 3
  284. * %g4 scratch 4
  285. */
  286. .align 32
  287. .globl xcall_flush_tlb_mm
  288. xcall_flush_tlb_mm:
  289. mov PRIMARY_CONTEXT, %g2
  290. ldxa [%g2] ASI_DMMU, %g3
  291. srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
  292. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  293. or %g5, %g4, %g5 /* Preserve nucleus page size fields */
  294. stxa %g5, [%g2] ASI_DMMU
  295. mov 0x40, %g4
  296. stxa %g0, [%g4] ASI_DMMU_DEMAP
  297. stxa %g0, [%g4] ASI_IMMU_DEMAP
  298. stxa %g3, [%g2] ASI_DMMU
  299. retry
  300. .globl xcall_flush_tlb_pending
  301. xcall_flush_tlb_pending:
  302. /* %g5=context, %g1=nr, %g7=vaddrs[] */
  303. sllx %g1, 3, %g1
  304. mov PRIMARY_CONTEXT, %g4
  305. ldxa [%g4] ASI_DMMU, %g2
  306. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
  307. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  308. or %g5, %g4, %g5
  309. mov PRIMARY_CONTEXT, %g4
  310. stxa %g5, [%g4] ASI_DMMU
  311. 1: sub %g1, (1 << 3), %g1
  312. ldx [%g7 + %g1], %g5
  313. andcc %g5, 0x1, %g0
  314. be,pn %icc, 2f
  315. andn %g5, 0x1, %g5
  316. stxa %g0, [%g5] ASI_IMMU_DEMAP
  317. 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
  318. membar #Sync
  319. brnz,pt %g1, 1b
  320. nop
  321. stxa %g2, [%g4] ASI_DMMU
  322. retry
  323. .globl xcall_flush_tlb_kernel_range
  324. xcall_flush_tlb_kernel_range:
  325. sethi %hi(PAGE_SIZE - 1), %g2
  326. or %g2, %lo(PAGE_SIZE - 1), %g2
  327. andn %g1, %g2, %g1
  328. andn %g7, %g2, %g7
  329. sub %g7, %g1, %g3
  330. add %g2, 1, %g2
  331. sub %g3, %g2, %g3
  332. or %g1, 0x20, %g1 ! Nucleus
  333. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  334. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  335. membar #Sync
  336. brnz,pt %g3, 1b
  337. sub %g3, %g2, %g3
  338. retry
  339. nop
  340. nop
  341. /* This runs in a very controlled environment, so we do
  342. * not need to worry about BH races etc.
  343. */
  344. .globl xcall_sync_tick
  345. xcall_sync_tick:
  346. rdpr %pstate, %g2
  347. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  348. rdpr %pil, %g2
  349. wrpr %g0, 15, %pil
  350. sethi %hi(109f), %g7
  351. b,pt %xcc, etrap_irq
  352. 109: or %g7, %lo(109b), %g7
  353. call smp_synchronize_tick_client
  354. nop
  355. clr %l6
  356. b rtrap_xcall
  357. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  358. /* NOTE: This is SPECIAL!! We do etrap/rtrap however
  359. * we choose to deal with the "BH's run with
  360. * %pil==15" problem (described in asm/pil.h)
  361. * by just invoking rtrap directly past where
  362. * BH's are checked for.
  363. *
  364. * We do it like this because we do not want %pil==15
  365. * lockups to prevent regs being reported.
  366. */
  367. .globl xcall_report_regs
  368. xcall_report_regs:
  369. rdpr %pstate, %g2
  370. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  371. rdpr %pil, %g2
  372. wrpr %g0, 15, %pil
  373. sethi %hi(109f), %g7
  374. b,pt %xcc, etrap_irq
  375. 109: or %g7, %lo(109b), %g7
  376. call __show_regs
  377. add %sp, PTREGS_OFF, %o0
  378. clr %l6
  379. /* Has to be a non-v9 branch due to the large distance. */
  380. b rtrap_xcall
  381. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  382. #ifdef DCACHE_ALIASING_POSSIBLE
  383. .align 32
  384. .globl xcall_flush_dcache_page_cheetah
  385. xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
  386. sethi %hi(PAGE_SIZE), %g3
  387. 1: subcc %g3, (1 << 5), %g3
  388. stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
  389. membar #Sync
  390. bne,pt %icc, 1b
  391. nop
  392. retry
  393. nop
  394. #endif /* DCACHE_ALIASING_POSSIBLE */
  395. .globl xcall_flush_dcache_page_spitfire
  396. xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
  397. %g7 == kernel page virtual address
  398. %g5 == (page->mapping != NULL) */
  399. #ifdef DCACHE_ALIASING_POSSIBLE
  400. srlx %g1, (13 - 2), %g1 ! Form tag comparitor
  401. sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
  402. sub %g3, (1 << 5), %g3 ! D$ linesize == 32
  403. 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
  404. andcc %g2, 0x3, %g0
  405. be,pn %xcc, 2f
  406. andn %g2, 0x3, %g2
  407. cmp %g2, %g1
  408. bne,pt %xcc, 2f
  409. nop
  410. stxa %g0, [%g3] ASI_DCACHE_TAG
  411. membar #Sync
  412. 2: cmp %g3, 0
  413. bne,pt %xcc, 1b
  414. sub %g3, (1 << 5), %g3
  415. brz,pn %g5, 2f
  416. #endif /* DCACHE_ALIASING_POSSIBLE */
  417. sethi %hi(PAGE_SIZE), %g3
  418. 1: flush %g7
  419. subcc %g3, (1 << 5), %g3
  420. bne,pt %icc, 1b
  421. add %g7, (1 << 5), %g7
  422. 2: retry
  423. nop
  424. nop
  425. /* These just get rescheduled to PIL vectors. */
  426. .globl xcall_call_function
  427. xcall_call_function:
  428. wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
  429. retry
  430. .globl xcall_receive_signal
  431. xcall_receive_signal:
  432. wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
  433. retry
  434. .globl xcall_capture
  435. xcall_capture:
  436. wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
  437. retry
  438. #endif /* CONFIG_SMP */