ultra.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. /* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
  2. * ultra.S: Don't expand these all over the place...
  3. *
  4. * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/config.h>
  7. #include <asm/asi.h>
  8. #include <asm/pgtable.h>
  9. #include <asm/page.h>
  10. #include <asm/spitfire.h>
  11. #include <asm/mmu_context.h>
  12. #include <asm/pil.h>
  13. #include <asm/head.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/cacheflush.h>
  16. /* Basically, most of the Spitfire vs. Cheetah madness
  17. * has to do with the fact that Cheetah does not support
  18. * IMMU flushes out of the secondary context. Someone needs
  19. * to throw a south lake birthday party for the folks
  20. * in Microelectronics who refused to fix this shit.
  21. */
  22. /* This file is meant to be read efficiently by the CPU, not humans.
  23. * Staraj sie tego nikomu nie pierdolnac...
  24. */
  25. .text
  26. .align 32
  27. .globl __flush_tlb_mm
  28. __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  29. ldxa [%o1] ASI_DMMU, %g2
  30. cmp %g2, %o0
  31. bne,pn %icc, __spitfire_flush_tlb_mm_slow
  32. mov 0x50, %g3
  33. stxa %g0, [%g3] ASI_DMMU_DEMAP
  34. stxa %g0, [%g3] ASI_IMMU_DEMAP
  35. retl
  36. flush %g6
  37. nop
  38. nop
  39. nop
  40. nop
  41. nop
  42. nop
  43. nop
  44. nop
  45. .align 32
  46. .globl __flush_tlb_pending
  47. __flush_tlb_pending:
  48. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  49. rdpr %pstate, %g7
  50. sllx %o1, 3, %o1
  51. andn %g7, PSTATE_IE, %g2
  52. wrpr %g2, %pstate
  53. mov SECONDARY_CONTEXT, %o4
  54. ldxa [%o4] ASI_DMMU, %g2
  55. stxa %o0, [%o4] ASI_DMMU
  56. 1: sub %o1, (1 << 3), %o1
  57. ldx [%o2 + %o1], %o3
  58. andcc %o3, 1, %g0
  59. andn %o3, 1, %o3
  60. be,pn %icc, 2f
  61. or %o3, 0x10, %o3
  62. stxa %g0, [%o3] ASI_IMMU_DEMAP
  63. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  64. membar #Sync
  65. brnz,pt %o1, 1b
  66. nop
  67. stxa %g2, [%o4] ASI_DMMU
  68. flush %g6
  69. retl
  70. wrpr %g7, 0x0, %pstate
  71. .align 32
  72. .globl __flush_tlb_kernel_range
  73. __flush_tlb_kernel_range: /* %o0=start, %o1=end */
  74. cmp %o0, %o1
  75. be,pn %xcc, 2f
  76. sethi %hi(PAGE_SIZE), %o4
  77. sub %o1, %o0, %o3
  78. sub %o3, %o4, %o3
  79. or %o0, 0x20, %o0 ! Nucleus
  80. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  81. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  82. membar #Sync
  83. brnz,pt %o3, 1b
  84. sub %o3, %o4, %o3
  85. 2: retl
  86. flush %g6
  87. __spitfire_flush_tlb_mm_slow:
  88. rdpr %pstate, %g1
  89. wrpr %g1, PSTATE_IE, %pstate
  90. stxa %o0, [%o1] ASI_DMMU
  91. stxa %g0, [%g3] ASI_DMMU_DEMAP
  92. stxa %g0, [%g3] ASI_IMMU_DEMAP
  93. flush %g6
  94. stxa %g2, [%o1] ASI_DMMU
  95. flush %g6
  96. retl
  97. wrpr %g1, 0, %pstate
  98. /*
  99. * The following code flushes one page_size worth.
  100. */
  101. #if (PAGE_SHIFT == 13)
  102. #define ITAG_MASK 0xfe
  103. #elif (PAGE_SHIFT == 16)
  104. #define ITAG_MASK 0x7fe
  105. #else
  106. #error unsupported PAGE_SIZE
  107. #endif
  108. .align 32
  109. .globl __flush_icache_page
  110. __flush_icache_page: /* %o0 = phys_page */
  111. membar #StoreStore
  112. srlx %o0, PAGE_SHIFT, %o0
  113. sethi %uhi(PAGE_OFFSET), %g1
  114. sllx %o0, PAGE_SHIFT, %o0
  115. sethi %hi(PAGE_SIZE), %g2
  116. sllx %g1, 32, %g1
  117. add %o0, %g1, %o0
  118. 1: subcc %g2, 32, %g2
  119. bne,pt %icc, 1b
  120. flush %o0 + %g2
  121. retl
  122. nop
  123. #ifdef DCACHE_ALIASING_POSSIBLE
  124. #if (PAGE_SHIFT != 13)
  125. #error only page shift of 13 is supported by dcache flush
  126. #endif
  127. #define DTAG_MASK 0x3
  128. .align 64
  129. .globl __flush_dcache_page
  130. __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
  131. sethi %uhi(PAGE_OFFSET), %g1
  132. sllx %g1, 32, %g1
  133. sub %o0, %g1, %o0
  134. clr %o4
  135. srlx %o0, 11, %o0
  136. sethi %hi(1 << 14), %o2
  137. 1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group
  138. add %o4, (1 << 5), %o4 ! IEU0
  139. ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group
  140. add %o4, (1 << 5), %o4 ! IEU0
  141. ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available
  142. add %o4, (1 << 5), %o4 ! IEU0
  143. andn %o3, DTAG_MASK, %o3 ! IEU1
  144. ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group
  145. add %o4, (1 << 5), %o4 ! IEU0
  146. andn %g1, DTAG_MASK, %g1 ! IEU1
  147. cmp %o0, %o3 ! IEU1 Group
  148. be,a,pn %xcc, dflush1 ! CTI
  149. sub %o4, (4 << 5), %o4 ! IEU0 (Group)
  150. cmp %o0, %g1 ! IEU1 Group
  151. andn %g2, DTAG_MASK, %g2 ! IEU0
  152. be,a,pn %xcc, dflush2 ! CTI
  153. sub %o4, (3 << 5), %o4 ! IEU0 (Group)
  154. cmp %o0, %g2 ! IEU1 Group
  155. andn %g3, DTAG_MASK, %g3 ! IEU0
  156. be,a,pn %xcc, dflush3 ! CTI
  157. sub %o4, (2 << 5), %o4 ! IEU0 (Group)
  158. cmp %o0, %g3 ! IEU1 Group
  159. be,a,pn %xcc, dflush4 ! CTI
  160. sub %o4, (1 << 5), %o4 ! IEU0
  161. 2: cmp %o4, %o2 ! IEU1 Group
  162. bne,pt %xcc, 1b ! CTI
  163. nop ! IEU0
  164. /* The I-cache does not snoop local stores so we
  165. * better flush that too when necessary.
  166. */
  167. brnz,pt %o1, __flush_icache_page
  168. sllx %o0, 11, %o0
  169. retl
  170. nop
  171. dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
  172. add %o4, (1 << 5), %o4
  173. dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
  174. add %o4, (1 << 5), %o4
  175. dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
  176. add %o4, (1 << 5), %o4
  177. dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
  178. add %o4, (1 << 5), %o4
  179. membar #Sync
  180. ba,pt %xcc, 2b
  181. nop
  182. #endif /* DCACHE_ALIASING_POSSIBLE */
  183. .align 32
  184. __prefill_dtlb:
  185. rdpr %pstate, %g7
  186. wrpr %g7, PSTATE_IE, %pstate
  187. mov TLB_TAG_ACCESS, %g1
  188. stxa %o5, [%g1] ASI_DMMU
  189. stxa %o2, [%g0] ASI_DTLB_DATA_IN
  190. flush %g6
  191. retl
  192. wrpr %g7, %pstate
  193. __prefill_itlb:
  194. rdpr %pstate, %g7
  195. wrpr %g7, PSTATE_IE, %pstate
  196. mov TLB_TAG_ACCESS, %g1
  197. stxa %o5, [%g1] ASI_IMMU
  198. stxa %o2, [%g0] ASI_ITLB_DATA_IN
  199. flush %g6
  200. retl
  201. wrpr %g7, %pstate
  202. .globl __update_mmu_cache
  203. __update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
  204. srlx %o1, PAGE_SHIFT, %o1
  205. andcc %o3, FAULT_CODE_DTLB, %g0
  206. sllx %o1, PAGE_SHIFT, %o5
  207. bne,pt %xcc, __prefill_dtlb
  208. or %o5, %o0, %o5
  209. ba,a,pt %xcc, __prefill_itlb
  210. /* Cheetah specific versions, patched at boot time.
  211. *
  212. * This writes of the PRIMARY_CONTEXT register in this file are
  213. * safe even on Cheetah+ and later wrt. the page size fields.
  214. * The nucleus page size fields do not matter because we make
  215. * no data references, and these instructions execute out of a
  216. * locked I-TLB entry sitting in the fully assosciative I-TLB.
  217. * This sequence should also never trap.
  218. */
  219. __cheetah_flush_tlb_mm: /* 15 insns */
  220. rdpr %pstate, %g7
  221. andn %g7, PSTATE_IE, %g2
  222. wrpr %g2, 0x0, %pstate
  223. wrpr %g0, 1, %tl
  224. mov PRIMARY_CONTEXT, %o2
  225. mov 0x40, %g3
  226. ldxa [%o2] ASI_DMMU, %g2
  227. stxa %o0, [%o2] ASI_DMMU
  228. stxa %g0, [%g3] ASI_DMMU_DEMAP
  229. stxa %g0, [%g3] ASI_IMMU_DEMAP
  230. stxa %g2, [%o2] ASI_DMMU
  231. flush %g6
  232. wrpr %g0, 0, %tl
  233. retl
  234. wrpr %g7, 0x0, %pstate
  235. __cheetah_flush_tlb_pending: /* 22 insns */
  236. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  237. rdpr %pstate, %g7
  238. sllx %o1, 3, %o1
  239. andn %g7, PSTATE_IE, %g2
  240. wrpr %g2, 0x0, %pstate
  241. wrpr %g0, 1, %tl
  242. mov PRIMARY_CONTEXT, %o4
  243. ldxa [%o4] ASI_DMMU, %g2
  244. stxa %o0, [%o4] ASI_DMMU
  245. 1: sub %o1, (1 << 3), %o1
  246. ldx [%o2 + %o1], %o3
  247. andcc %o3, 1, %g0
  248. be,pn %icc, 2f
  249. andn %o3, 1, %o3
  250. stxa %g0, [%o3] ASI_IMMU_DEMAP
  251. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  252. membar #Sync
  253. brnz,pt %o1, 1b
  254. nop
  255. stxa %g2, [%o4] ASI_DMMU
  256. flush %g6
  257. wrpr %g0, 0, %tl
  258. retl
  259. wrpr %g7, 0x0, %pstate
  260. #ifdef DCACHE_ALIASING_POSSIBLE
  261. flush_dcpage_cheetah: /* 11 insns */
  262. sethi %uhi(PAGE_OFFSET), %g1
  263. sllx %g1, 32, %g1
  264. sub %o0, %g1, %o0
  265. sethi %hi(PAGE_SIZE), %o4
  266. 1: subcc %o4, (1 << 5), %o4
  267. stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
  268. membar #Sync
  269. bne,pt %icc, 1b
  270. nop
  271. retl /* I-cache flush never needed on Cheetah, see callers. */
  272. nop
  273. #endif /* DCACHE_ALIASING_POSSIBLE */
  274. cheetah_patch_one:
  275. 1: lduw [%o1], %g1
  276. stw %g1, [%o0]
  277. flush %o0
  278. subcc %o2, 1, %o2
  279. add %o1, 4, %o1
  280. bne,pt %icc, 1b
  281. add %o0, 4, %o0
  282. retl
  283. nop
  284. .globl cheetah_patch_cachetlbops
  285. cheetah_patch_cachetlbops:
  286. save %sp, -128, %sp
  287. sethi %hi(__flush_tlb_mm), %o0
  288. or %o0, %lo(__flush_tlb_mm), %o0
  289. sethi %hi(__cheetah_flush_tlb_mm), %o1
  290. or %o1, %lo(__cheetah_flush_tlb_mm), %o1
  291. call cheetah_patch_one
  292. mov 15, %o2
  293. sethi %hi(__flush_tlb_pending), %o0
  294. or %o0, %lo(__flush_tlb_pending), %o0
  295. sethi %hi(__cheetah_flush_tlb_pending), %o1
  296. or %o1, %lo(__cheetah_flush_tlb_pending), %o1
  297. call cheetah_patch_one
  298. mov 22, %o2
  299. #ifdef DCACHE_ALIASING_POSSIBLE
  300. sethi %hi(__flush_dcache_page), %o0
  301. or %o0, %lo(__flush_dcache_page), %o0
  302. sethi %hi(flush_dcpage_cheetah), %o1
  303. or %o1, %lo(flush_dcpage_cheetah), %o1
  304. call cheetah_patch_one
  305. mov 11, %o2
  306. #endif /* DCACHE_ALIASING_POSSIBLE */
  307. ret
  308. restore
  309. #ifdef CONFIG_SMP
  310. /* These are all called by the slaves of a cross call, at
  311. * trap level 1, with interrupts fully disabled.
  312. *
  313. * Register usage:
  314. * %g5 mm->context (all tlb flushes)
  315. * %g1 address arg 1 (tlb page and range flushes)
  316. * %g7 address arg 2 (tlb range flush only)
  317. *
  318. * %g6 ivector table, don't touch
  319. * %g2 scratch 1
  320. * %g3 scratch 2
  321. * %g4 scratch 3
  322. *
  323. * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
  324. */
  325. .align 32
  326. .globl xcall_flush_tlb_mm
  327. xcall_flush_tlb_mm:
  328. mov PRIMARY_CONTEXT, %g2
  329. mov 0x40, %g4
  330. ldxa [%g2] ASI_DMMU, %g3
  331. stxa %g5, [%g2] ASI_DMMU
  332. stxa %g0, [%g4] ASI_DMMU_DEMAP
  333. stxa %g0, [%g4] ASI_IMMU_DEMAP
  334. stxa %g3, [%g2] ASI_DMMU
  335. retry
  336. .globl xcall_flush_tlb_pending
  337. xcall_flush_tlb_pending:
  338. /* %g5=context, %g1=nr, %g7=vaddrs[] */
  339. sllx %g1, 3, %g1
  340. mov PRIMARY_CONTEXT, %g4
  341. ldxa [%g4] ASI_DMMU, %g2
  342. stxa %g5, [%g4] ASI_DMMU
  343. 1: sub %g1, (1 << 3), %g1
  344. ldx [%g7 + %g1], %g5
  345. andcc %g5, 0x1, %g0
  346. be,pn %icc, 2f
  347. andn %g5, 0x1, %g5
  348. stxa %g0, [%g5] ASI_IMMU_DEMAP
  349. 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
  350. membar #Sync
  351. brnz,pt %g1, 1b
  352. nop
  353. stxa %g2, [%g4] ASI_DMMU
  354. retry
  355. .globl xcall_flush_tlb_kernel_range
  356. xcall_flush_tlb_kernel_range:
  357. sethi %hi(PAGE_SIZE - 1), %g2
  358. or %g2, %lo(PAGE_SIZE - 1), %g2
  359. andn %g1, %g2, %g1
  360. andn %g7, %g2, %g7
  361. sub %g7, %g1, %g3
  362. add %g2, 1, %g2
  363. sub %g3, %g2, %g3
  364. or %g1, 0x20, %g1 ! Nucleus
  365. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  366. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  367. membar #Sync
  368. brnz,pt %g3, 1b
  369. sub %g3, %g2, %g3
  370. retry
  371. nop
  372. nop
  373. /* This runs in a very controlled environment, so we do
  374. * not need to worry about BH races etc.
  375. */
  376. .globl xcall_sync_tick
  377. xcall_sync_tick:
  378. rdpr %pstate, %g2
  379. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  380. rdpr %pil, %g2
  381. wrpr %g0, 15, %pil
  382. sethi %hi(109f), %g7
  383. b,pt %xcc, etrap_irq
  384. 109: or %g7, %lo(109b), %g7
  385. call smp_synchronize_tick_client
  386. nop
  387. clr %l6
  388. b rtrap_xcall
  389. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  390. /* NOTE: This is SPECIAL!! We do etrap/rtrap however
  391. * we choose to deal with the "BH's run with
  392. * %pil==15" problem (described in asm/pil.h)
  393. * by just invoking rtrap directly past where
  394. * BH's are checked for.
  395. *
  396. * We do it like this because we do not want %pil==15
  397. * lockups to prevent regs being reported.
  398. */
  399. .globl xcall_report_regs
  400. xcall_report_regs:
  401. rdpr %pstate, %g2
  402. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  403. rdpr %pil, %g2
  404. wrpr %g0, 15, %pil
  405. sethi %hi(109f), %g7
  406. b,pt %xcc, etrap_irq
  407. 109: or %g7, %lo(109b), %g7
  408. call __show_regs
  409. add %sp, PTREGS_OFF, %o0
  410. clr %l6
  411. /* Has to be a non-v9 branch due to the large distance. */
  412. b rtrap_xcall
  413. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  414. #ifdef DCACHE_ALIASING_POSSIBLE
  415. .align 32
  416. .globl xcall_flush_dcache_page_cheetah
  417. xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
  418. sethi %hi(PAGE_SIZE), %g3
  419. 1: subcc %g3, (1 << 5), %g3
  420. stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
  421. membar #Sync
  422. bne,pt %icc, 1b
  423. nop
  424. retry
  425. nop
  426. #endif /* DCACHE_ALIASING_POSSIBLE */
  427. .globl xcall_flush_dcache_page_spitfire
  428. xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
  429. %g7 == kernel page virtual address
  430. %g5 == (page->mapping != NULL) */
  431. #ifdef DCACHE_ALIASING_POSSIBLE
  432. srlx %g1, (13 - 2), %g1 ! Form tag comparitor
  433. sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
  434. sub %g3, (1 << 5), %g3 ! D$ linesize == 32
  435. 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
  436. andcc %g2, 0x3, %g0
  437. be,pn %xcc, 2f
  438. andn %g2, 0x3, %g2
  439. cmp %g2, %g1
  440. bne,pt %xcc, 2f
  441. nop
  442. stxa %g0, [%g3] ASI_DCACHE_TAG
  443. membar #Sync
  444. 2: cmp %g3, 0
  445. bne,pt %xcc, 1b
  446. sub %g3, (1 << 5), %g3
  447. brz,pn %g5, 2f
  448. #endif /* DCACHE_ALIASING_POSSIBLE */
  449. sethi %hi(PAGE_SIZE), %g3
  450. 1: flush %g7
  451. subcc %g3, (1 << 5), %g3
  452. bne,pt %icc, 1b
  453. add %g7, (1 << 5), %g7
  454. 2: retry
  455. nop
  456. nop
  457. .globl xcall_promstop
  458. xcall_promstop:
  459. rdpr %pstate, %g2
  460. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  461. rdpr %pil, %g2
  462. wrpr %g0, 15, %pil
  463. sethi %hi(109f), %g7
  464. b,pt %xcc, etrap_irq
  465. 109: or %g7, %lo(109b), %g7
  466. flushw
  467. call prom_stopself
  468. nop
  469. /* We should not return, just spin if we do... */
  470. 1: b,a,pt %xcc, 1b
  471. nop
  472. .data
  473. errata32_hwbug:
  474. .xword 0
  475. .text
  476. /* These two are not performance critical... */
  477. .globl xcall_flush_tlb_all_spitfire
  478. xcall_flush_tlb_all_spitfire:
  479. /* Spitfire Errata #32 workaround. */
  480. sethi %hi(errata32_hwbug), %g4
  481. stx %g0, [%g4 + %lo(errata32_hwbug)]
  482. clr %g2
  483. clr %g3
  484. 1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4
  485. and %g4, _PAGE_L, %g5
  486. brnz,pn %g5, 2f
  487. mov TLB_TAG_ACCESS, %g7
  488. stxa %g0, [%g7] ASI_DMMU
  489. membar #Sync
  490. stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS
  491. membar #Sync
  492. /* Spitfire Errata #32 workaround. */
  493. sethi %hi(errata32_hwbug), %g4
  494. stx %g0, [%g4 + %lo(errata32_hwbug)]
  495. 2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4
  496. and %g4, _PAGE_L, %g5
  497. brnz,pn %g5, 2f
  498. mov TLB_TAG_ACCESS, %g7
  499. stxa %g0, [%g7] ASI_IMMU
  500. membar #Sync
  501. stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS
  502. membar #Sync
  503. /* Spitfire Errata #32 workaround. */
  504. sethi %hi(errata32_hwbug), %g4
  505. stx %g0, [%g4 + %lo(errata32_hwbug)]
  506. 2: add %g2, 1, %g2
  507. cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT
  508. ble,pt %icc, 1b
  509. sll %g2, 3, %g3
  510. flush %g6
  511. retry
  512. .globl xcall_flush_tlb_all_cheetah
  513. xcall_flush_tlb_all_cheetah:
  514. mov 0x80, %g2
  515. stxa %g0, [%g2] ASI_DMMU_DEMAP
  516. stxa %g0, [%g2] ASI_IMMU_DEMAP
  517. retry
  518. /* These just get rescheduled to PIL vectors. */
  519. .globl xcall_call_function
  520. xcall_call_function:
  521. wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
  522. retry
  523. .globl xcall_receive_signal
  524. xcall_receive_signal:
  525. wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
  526. retry
  527. .globl xcall_capture
  528. xcall_capture:
  529. wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
  530. retry
  531. #endif /* CONFIG_SMP */