ultra.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. /* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
  2. * ultra.S: Don't expand these all over the place...
  3. *
  4. * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/config.h>
  7. #include <asm/asi.h>
  8. #include <asm/pgtable.h>
  9. #include <asm/page.h>
  10. #include <asm/spitfire.h>
  11. #include <asm/mmu_context.h>
  12. #include <asm/pil.h>
  13. #include <asm/head.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/cacheflush.h>
  16. /* Basically, most of the Spitfire vs. Cheetah madness
  17. * has to do with the fact that Cheetah does not support
  18. * IMMU flushes out of the secondary context. Someone needs
  19. * to throw a south lake birthday party for the folks
  20. * in Microelectronics who refused to fix this shit.
  21. */
  22. /* This file is meant to be read efficiently by the CPU, not humans.
  23. * Staraj sie tego nikomu nie pierdolnac...
  24. */
  25. .text
  26. .align 32
  27. .globl __flush_tlb_mm
  28. __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  29. ldxa [%o1] ASI_DMMU, %g2
  30. cmp %g2, %o0
  31. bne,pn %icc, __spitfire_flush_tlb_mm_slow
  32. mov 0x50, %g3
  33. stxa %g0, [%g3] ASI_DMMU_DEMAP
  34. stxa %g0, [%g3] ASI_IMMU_DEMAP
  35. retl
  36. flush %g6
  37. nop
  38. nop
  39. nop
  40. nop
  41. nop
  42. nop
  43. nop
  44. nop
  45. .align 32
  46. .globl __flush_tlb_pending
  47. __flush_tlb_pending:
  48. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  49. rdpr %pstate, %g7
  50. sllx %o1, 3, %o1
  51. andn %g7, PSTATE_IE, %g2
  52. wrpr %g2, %pstate
  53. mov SECONDARY_CONTEXT, %o4
  54. ldxa [%o4] ASI_DMMU, %g2
  55. stxa %o0, [%o4] ASI_DMMU
  56. 1: sub %o1, (1 << 3), %o1
  57. ldx [%o2 + %o1], %o3
  58. andcc %o3, 1, %g0
  59. andn %o3, 1, %o3
  60. be,pn %icc, 2f
  61. or %o3, 0x10, %o3
  62. stxa %g0, [%o3] ASI_IMMU_DEMAP
  63. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  64. membar #Sync
  65. brnz,pt %o1, 1b
  66. nop
  67. stxa %g2, [%o4] ASI_DMMU
  68. flush %g6
  69. retl
  70. wrpr %g7, 0x0, %pstate
  71. .align 32
  72. .globl __flush_tlb_kernel_range
  73. __flush_tlb_kernel_range: /* %o0=start, %o1=end */
  74. cmp %o0, %o1
  75. be,pn %xcc, 2f
  76. sethi %hi(PAGE_SIZE), %o4
  77. sub %o1, %o0, %o3
  78. sub %o3, %o4, %o3
  79. or %o0, 0x20, %o0 ! Nucleus
  80. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  81. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  82. membar #Sync
  83. brnz,pt %o3, 1b
  84. sub %o3, %o4, %o3
  85. 2: retl
  86. flush %g6
  87. __spitfire_flush_tlb_mm_slow:
  88. rdpr %pstate, %g1
  89. wrpr %g1, PSTATE_IE, %pstate
  90. stxa %o0, [%o1] ASI_DMMU
  91. stxa %g0, [%g3] ASI_DMMU_DEMAP
  92. stxa %g0, [%g3] ASI_IMMU_DEMAP
  93. flush %g6
  94. stxa %g2, [%o1] ASI_DMMU
  95. flush %g6
  96. retl
  97. wrpr %g1, 0, %pstate
  98. /*
  99. * The following code flushes one page_size worth.
  100. */
  101. #if (PAGE_SHIFT == 13)
  102. #define ITAG_MASK 0xfe
  103. #elif (PAGE_SHIFT == 16)
  104. #define ITAG_MASK 0x7fe
  105. #else
  106. #error unsupported PAGE_SIZE
  107. #endif
  108. .align 32
  109. .globl __flush_icache_page
  110. __flush_icache_page: /* %o0 = phys_page */
  111. membar #StoreStore
  112. srlx %o0, PAGE_SHIFT, %o0
  113. sethi %uhi(PAGE_OFFSET), %g1
  114. sllx %o0, PAGE_SHIFT, %o0
  115. sethi %hi(PAGE_SIZE), %g2
  116. sllx %g1, 32, %g1
  117. add %o0, %g1, %o0
  118. 1: subcc %g2, 32, %g2
  119. bne,pt %icc, 1b
  120. flush %o0 + %g2
  121. retl
  122. nop
  123. #ifdef DCACHE_ALIASING_POSSIBLE
  124. #if (PAGE_SHIFT != 13)
  125. #error only page shift of 13 is supported by dcache flush
  126. #endif
  127. #define DTAG_MASK 0x3
  128. .align 64
  129. .globl __flush_dcache_page
  130. __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
  131. sethi %uhi(PAGE_OFFSET), %g1
  132. sllx %g1, 32, %g1
  133. sub %o0, %g1, %o0
  134. clr %o4
  135. srlx %o0, 11, %o0
  136. sethi %hi(1 << 14), %o2
  137. 1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group
  138. add %o4, (1 << 5), %o4 ! IEU0
  139. ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group
  140. add %o4, (1 << 5), %o4 ! IEU0
  141. ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available
  142. add %o4, (1 << 5), %o4 ! IEU0
  143. andn %o3, DTAG_MASK, %o3 ! IEU1
  144. ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group
  145. add %o4, (1 << 5), %o4 ! IEU0
  146. andn %g1, DTAG_MASK, %g1 ! IEU1
  147. cmp %o0, %o3 ! IEU1 Group
  148. be,a,pn %xcc, dflush1 ! CTI
  149. sub %o4, (4 << 5), %o4 ! IEU0 (Group)
  150. cmp %o0, %g1 ! IEU1 Group
  151. andn %g2, DTAG_MASK, %g2 ! IEU0
  152. be,a,pn %xcc, dflush2 ! CTI
  153. sub %o4, (3 << 5), %o4 ! IEU0 (Group)
  154. cmp %o0, %g2 ! IEU1 Group
  155. andn %g3, DTAG_MASK, %g3 ! IEU0
  156. be,a,pn %xcc, dflush3 ! CTI
  157. sub %o4, (2 << 5), %o4 ! IEU0 (Group)
  158. cmp %o0, %g3 ! IEU1 Group
  159. be,a,pn %xcc, dflush4 ! CTI
  160. sub %o4, (1 << 5), %o4 ! IEU0
  161. 2: cmp %o4, %o2 ! IEU1 Group
  162. bne,pt %xcc, 1b ! CTI
  163. nop ! IEU0
  164. /* The I-cache does not snoop local stores so we
  165. * better flush that too when necessary.
  166. */
  167. brnz,pt %o1, __flush_icache_page
  168. sllx %o0, 11, %o0
  169. retl
  170. nop
  171. dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
  172. add %o4, (1 << 5), %o4
  173. dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
  174. add %o4, (1 << 5), %o4
  175. dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
  176. add %o4, (1 << 5), %o4
  177. dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
  178. add %o4, (1 << 5), %o4
  179. membar #Sync
  180. ba,pt %xcc, 2b
  181. nop
  182. #endif /* DCACHE_ALIASING_POSSIBLE */
  183. .align 32
  184. __prefill_dtlb:
  185. rdpr %pstate, %g7
  186. wrpr %g7, PSTATE_IE, %pstate
  187. mov TLB_TAG_ACCESS, %g1
  188. stxa %o5, [%g1] ASI_DMMU
  189. stxa %o2, [%g0] ASI_DTLB_DATA_IN
  190. flush %g6
  191. retl
  192. wrpr %g7, %pstate
  193. __prefill_itlb:
  194. rdpr %pstate, %g7
  195. wrpr %g7, PSTATE_IE, %pstate
  196. mov TLB_TAG_ACCESS, %g1
  197. stxa %o5, [%g1] ASI_IMMU
  198. stxa %o2, [%g0] ASI_ITLB_DATA_IN
  199. flush %g6
  200. retl
  201. wrpr %g7, %pstate
  202. .globl __update_mmu_cache
  203. __update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
  204. srlx %o1, PAGE_SHIFT, %o1
  205. andcc %o3, FAULT_CODE_DTLB, %g0
  206. sllx %o1, PAGE_SHIFT, %o5
  207. bne,pt %xcc, __prefill_dtlb
  208. or %o5, %o0, %o5
  209. ba,a,pt %xcc, __prefill_itlb
  210. /* Cheetah specific versions, patched at boot time.
  211. *
  212. * This writes of the PRIMARY_CONTEXT register in this file are
  213. * safe even on Cheetah+ and later wrt. the page size fields.
  214. * The nucleus page size fields do not matter because we make
  215. * no data references, and these instructions execute out of a
  216. * locked I-TLB entry sitting in the fully assosciative I-TLB.
  217. * This sequence should also never trap.
  218. */
  219. __cheetah_flush_tlb_mm: /* 15 insns */
  220. rdpr %pstate, %g7
  221. andn %g7, PSTATE_IE, %g2
  222. wrpr %g2, 0x0, %pstate
  223. wrpr %g0, 1, %tl
  224. mov PRIMARY_CONTEXT, %o2
  225. mov 0x40, %g3
  226. ldxa [%o2] ASI_DMMU, %g2
  227. stxa %o0, [%o2] ASI_DMMU
  228. stxa %g0, [%g3] ASI_DMMU_DEMAP
  229. stxa %g0, [%g3] ASI_IMMU_DEMAP
  230. stxa %g2, [%o2] ASI_DMMU
  231. flush %g6
  232. wrpr %g0, 0, %tl
  233. retl
  234. wrpr %g7, 0x0, %pstate
  235. __cheetah_flush_tlb_pending: /* 22 insns */
  236. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  237. rdpr %pstate, %g7
  238. sllx %o1, 3, %o1
  239. andn %g7, PSTATE_IE, %g2
  240. wrpr %g2, 0x0, %pstate
  241. wrpr %g0, 1, %tl
  242. mov PRIMARY_CONTEXT, %o4
  243. ldxa [%o4] ASI_DMMU, %g2
  244. stxa %o0, [%o4] ASI_DMMU
  245. 1: sub %o1, (1 << 3), %o1
  246. ldx [%o2 + %o1], %o3
  247. andcc %o3, 1, %g0
  248. be,pn %icc, 2f
  249. andn %o3, 1, %o3
  250. stxa %g0, [%o3] ASI_IMMU_DEMAP
  251. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  252. brnz,pt %o1, 1b
  253. membar #Sync
  254. stxa %g2, [%o4] ASI_DMMU
  255. flush %g6
  256. wrpr %g0, 0, %tl
  257. retl
  258. wrpr %g7, 0x0, %pstate
  259. #ifdef DCACHE_ALIASING_POSSIBLE
  260. flush_dcpage_cheetah: /* 11 insns */
  261. sethi %uhi(PAGE_OFFSET), %g1
  262. sllx %g1, 32, %g1
  263. sub %o0, %g1, %o0
  264. sethi %hi(PAGE_SIZE), %o4
  265. 1: subcc %o4, (1 << 5), %o4
  266. stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
  267. membar #Sync
  268. bne,pt %icc, 1b
  269. nop
  270. retl /* I-cache flush never needed on Cheetah, see callers. */
  271. nop
  272. #endif /* DCACHE_ALIASING_POSSIBLE */
  273. cheetah_patch_one:
  274. 1: lduw [%o1], %g1
  275. stw %g1, [%o0]
  276. flush %o0
  277. subcc %o2, 1, %o2
  278. add %o1, 4, %o1
  279. bne,pt %icc, 1b
  280. add %o0, 4, %o0
  281. retl
  282. nop
  283. .globl cheetah_patch_cachetlbops
  284. cheetah_patch_cachetlbops:
  285. save %sp, -128, %sp
  286. sethi %hi(__flush_tlb_mm), %o0
  287. or %o0, %lo(__flush_tlb_mm), %o0
  288. sethi %hi(__cheetah_flush_tlb_mm), %o1
  289. or %o1, %lo(__cheetah_flush_tlb_mm), %o1
  290. call cheetah_patch_one
  291. mov 15, %o2
  292. sethi %hi(__flush_tlb_pending), %o0
  293. or %o0, %lo(__flush_tlb_pending), %o0
  294. sethi %hi(__cheetah_flush_tlb_pending), %o1
  295. or %o1, %lo(__cheetah_flush_tlb_pending), %o1
  296. call cheetah_patch_one
  297. mov 22, %o2
  298. #ifdef DCACHE_ALIASING_POSSIBLE
  299. sethi %hi(__flush_dcache_page), %o0
  300. or %o0, %lo(__flush_dcache_page), %o0
  301. sethi %hi(flush_dcpage_cheetah), %o1
  302. or %o1, %lo(flush_dcpage_cheetah), %o1
  303. call cheetah_patch_one
  304. mov 11, %o2
  305. #endif /* DCACHE_ALIASING_POSSIBLE */
  306. ret
  307. restore
  308. #ifdef CONFIG_SMP
  309. /* These are all called by the slaves of a cross call, at
  310. * trap level 1, with interrupts fully disabled.
  311. *
  312. * Register usage:
  313. * %g5 mm->context (all tlb flushes)
  314. * %g1 address arg 1 (tlb page and range flushes)
  315. * %g7 address arg 2 (tlb range flush only)
  316. *
  317. * %g6 ivector table, don't touch
  318. * %g2 scratch 1
  319. * %g3 scratch 2
  320. * %g4 scratch 3
  321. *
  322. * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
  323. */
  324. .align 32
  325. .globl xcall_flush_tlb_mm
  326. xcall_flush_tlb_mm:
  327. mov PRIMARY_CONTEXT, %g2
  328. mov 0x40, %g4
  329. ldxa [%g2] ASI_DMMU, %g3
  330. stxa %g5, [%g2] ASI_DMMU
  331. stxa %g0, [%g4] ASI_DMMU_DEMAP
  332. stxa %g0, [%g4] ASI_IMMU_DEMAP
  333. stxa %g3, [%g2] ASI_DMMU
  334. retry
  335. .globl xcall_flush_tlb_pending
  336. xcall_flush_tlb_pending:
  337. /* %g5=context, %g1=nr, %g7=vaddrs[] */
  338. sllx %g1, 3, %g1
  339. mov PRIMARY_CONTEXT, %g4
  340. ldxa [%g4] ASI_DMMU, %g2
  341. stxa %g5, [%g4] ASI_DMMU
  342. 1: sub %g1, (1 << 3), %g1
  343. ldx [%g7 + %g1], %g5
  344. andcc %g5, 0x1, %g0
  345. be,pn %icc, 2f
  346. andn %g5, 0x1, %g5
  347. stxa %g0, [%g5] ASI_IMMU_DEMAP
  348. 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
  349. membar #Sync
  350. brnz,pt %g1, 1b
  351. nop
  352. stxa %g2, [%g4] ASI_DMMU
  353. retry
  354. .globl xcall_flush_tlb_kernel_range
  355. xcall_flush_tlb_kernel_range:
  356. sethi %hi(PAGE_SIZE - 1), %g2
  357. or %g2, %lo(PAGE_SIZE - 1), %g2
  358. andn %g1, %g2, %g1
  359. andn %g7, %g2, %g7
  360. sub %g7, %g1, %g3
  361. add %g2, 1, %g2
  362. sub %g3, %g2, %g3
  363. or %g1, 0x20, %g1 ! Nucleus
  364. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  365. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  366. membar #Sync
  367. brnz,pt %g3, 1b
  368. sub %g3, %g2, %g3
  369. retry
  370. nop
  371. nop
  372. /* This runs in a very controlled environment, so we do
  373. * not need to worry about BH races etc.
  374. */
  375. .globl xcall_sync_tick
  376. xcall_sync_tick:
  377. rdpr %pstate, %g2
  378. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  379. rdpr %pil, %g2
  380. wrpr %g0, 15, %pil
  381. sethi %hi(109f), %g7
  382. b,pt %xcc, etrap_irq
  383. 109: or %g7, %lo(109b), %g7
  384. call smp_synchronize_tick_client
  385. nop
  386. clr %l6
  387. b rtrap_xcall
  388. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  389. /* NOTE: This is SPECIAL!! We do etrap/rtrap however
  390. * we choose to deal with the "BH's run with
  391. * %pil==15" problem (described in asm/pil.h)
  392. * by just invoking rtrap directly past where
  393. * BH's are checked for.
  394. *
  395. * We do it like this because we do not want %pil==15
  396. * lockups to prevent regs being reported.
  397. */
  398. .globl xcall_report_regs
  399. xcall_report_regs:
  400. rdpr %pstate, %g2
  401. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  402. rdpr %pil, %g2
  403. wrpr %g0, 15, %pil
  404. sethi %hi(109f), %g7
  405. b,pt %xcc, etrap_irq
  406. 109: or %g7, %lo(109b), %g7
  407. call __show_regs
  408. add %sp, PTREGS_OFF, %o0
  409. clr %l6
  410. /* Has to be a non-v9 branch due to the large distance. */
  411. b rtrap_xcall
  412. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  413. #ifdef DCACHE_ALIASING_POSSIBLE
  414. .align 32
  415. .globl xcall_flush_dcache_page_cheetah
  416. xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
  417. sethi %hi(PAGE_SIZE), %g3
  418. 1: subcc %g3, (1 << 5), %g3
  419. stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
  420. membar #Sync
  421. bne,pt %icc, 1b
  422. nop
  423. retry
  424. nop
  425. #endif /* DCACHE_ALIASING_POSSIBLE */
  426. .globl xcall_flush_dcache_page_spitfire
  427. xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
  428. %g7 == kernel page virtual address
  429. %g5 == (page->mapping != NULL) */
  430. #ifdef DCACHE_ALIASING_POSSIBLE
  431. srlx %g1, (13 - 2), %g1 ! Form tag comparitor
  432. sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
  433. sub %g3, (1 << 5), %g3 ! D$ linesize == 32
  434. 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
  435. andcc %g2, 0x3, %g0
  436. be,pn %xcc, 2f
  437. andn %g2, 0x3, %g2
  438. cmp %g2, %g1
  439. bne,pt %xcc, 2f
  440. nop
  441. stxa %g0, [%g3] ASI_DCACHE_TAG
  442. membar #Sync
  443. 2: cmp %g3, 0
  444. bne,pt %xcc, 1b
  445. sub %g3, (1 << 5), %g3
  446. brz,pn %g5, 2f
  447. #endif /* DCACHE_ALIASING_POSSIBLE */
  448. sethi %hi(PAGE_SIZE), %g3
  449. 1: flush %g7
  450. subcc %g3, (1 << 5), %g3
  451. bne,pt %icc, 1b
  452. add %g7, (1 << 5), %g7
  453. 2: retry
  454. nop
  455. nop
  456. .globl xcall_promstop
  457. xcall_promstop:
  458. rdpr %pstate, %g2
  459. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  460. rdpr %pil, %g2
  461. wrpr %g0, 15, %pil
  462. sethi %hi(109f), %g7
  463. b,pt %xcc, etrap_irq
  464. 109: or %g7, %lo(109b), %g7
  465. flushw
  466. call prom_stopself
  467. nop
  468. /* We should not return, just spin if we do... */
  469. 1: b,a,pt %xcc, 1b
  470. nop
  471. .data
  472. errata32_hwbug:
  473. .xword 0
  474. .text
  475. /* These two are not performance critical... */
  476. .globl xcall_flush_tlb_all_spitfire
  477. xcall_flush_tlb_all_spitfire:
  478. /* Spitfire Errata #32 workaround. */
  479. sethi %hi(errata32_hwbug), %g4
  480. stx %g0, [%g4 + %lo(errata32_hwbug)]
  481. clr %g2
  482. clr %g3
  483. 1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4
  484. and %g4, _PAGE_L, %g5
  485. brnz,pn %g5, 2f
  486. mov TLB_TAG_ACCESS, %g7
  487. stxa %g0, [%g7] ASI_DMMU
  488. membar #Sync
  489. stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS
  490. membar #Sync
  491. /* Spitfire Errata #32 workaround. */
  492. sethi %hi(errata32_hwbug), %g4
  493. stx %g0, [%g4 + %lo(errata32_hwbug)]
  494. 2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4
  495. and %g4, _PAGE_L, %g5
  496. brnz,pn %g5, 2f
  497. mov TLB_TAG_ACCESS, %g7
  498. stxa %g0, [%g7] ASI_IMMU
  499. membar #Sync
  500. stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS
  501. membar #Sync
  502. /* Spitfire Errata #32 workaround. */
  503. sethi %hi(errata32_hwbug), %g4
  504. stx %g0, [%g4 + %lo(errata32_hwbug)]
  505. 2: add %g2, 1, %g2
  506. cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT
  507. ble,pt %icc, 1b
  508. sll %g2, 3, %g3
  509. flush %g6
  510. retry
  511. .globl xcall_flush_tlb_all_cheetah
  512. xcall_flush_tlb_all_cheetah:
  513. mov 0x80, %g2
  514. stxa %g0, [%g2] ASI_DMMU_DEMAP
  515. stxa %g0, [%g2] ASI_IMMU_DEMAP
  516. retry
  517. /* These just get rescheduled to PIL vectors. */
  518. .globl xcall_call_function
  519. xcall_call_function:
  520. wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
  521. retry
  522. .globl xcall_receive_signal
  523. xcall_receive_signal:
  524. wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
  525. retry
  526. .globl xcall_capture
  527. xcall_capture:
  528. wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
  529. retry
  530. #endif /* CONFIG_SMP */