ultra.S 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /*
  2. * ultra.S: Don't expand these all over the place...
  3. *
  4. * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
  5. */
  6. #include <asm/asi.h>
  7. #include <asm/pgtable.h>
  8. #include <asm/page.h>
  9. #include <asm/spitfire.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/mmu.h>
  12. #include <asm/pil.h>
  13. #include <asm/head.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/hypervisor.h>
  17. #include <asm/cpudata.h>
  18. /* Basically, most of the Spitfire vs. Cheetah madness
  19. * has to do with the fact that Cheetah does not support
  20. * IMMU flushes out of the secondary context. Someone needs
  21. * to throw a south lake birthday party for the folks
  22. * in Microelectronics who refused to fix this shit.
  23. */
  24. /* This file is meant to be read efficiently by the CPU, not humans.
  25. * Staraj sie tego nikomu nie pierdolnac...
  26. */
  27. .text
  28. .align 32
  29. .globl __flush_tlb_mm
  30. __flush_tlb_mm: /* 18 insns */
  31. /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  32. ldxa [%o1] ASI_DMMU, %g2
  33. cmp %g2, %o0
  34. bne,pn %icc, __spitfire_flush_tlb_mm_slow
  35. mov 0x50, %g3
  36. stxa %g0, [%g3] ASI_DMMU_DEMAP
  37. stxa %g0, [%g3] ASI_IMMU_DEMAP
  38. sethi %hi(KERNBASE), %g3
  39. flush %g3
  40. retl
  41. nop
  42. nop
  43. nop
  44. nop
  45. nop
  46. nop
  47. nop
  48. nop
  49. nop
  50. nop
  51. .align 32
  52. .globl __flush_tlb_pending
  53. __flush_tlb_pending: /* 26 insns */
  54. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  55. rdpr %pstate, %g7
  56. sllx %o1, 3, %o1
  57. andn %g7, PSTATE_IE, %g2
  58. wrpr %g2, %pstate
  59. mov SECONDARY_CONTEXT, %o4
  60. ldxa [%o4] ASI_DMMU, %g2
  61. stxa %o0, [%o4] ASI_DMMU
  62. 1: sub %o1, (1 << 3), %o1
  63. ldx [%o2 + %o1], %o3
  64. andcc %o3, 1, %g0
  65. andn %o3, 1, %o3
  66. be,pn %icc, 2f
  67. or %o3, 0x10, %o3
  68. stxa %g0, [%o3] ASI_IMMU_DEMAP
  69. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  70. membar #Sync
  71. brnz,pt %o1, 1b
  72. nop
  73. stxa %g2, [%o4] ASI_DMMU
  74. sethi %hi(KERNBASE), %o4
  75. flush %o4
  76. retl
  77. wrpr %g7, 0x0, %pstate
  78. nop
  79. nop
  80. nop
  81. nop
  82. .align 32
  83. .globl __flush_tlb_kernel_range
  84. __flush_tlb_kernel_range: /* 16 insns */
  85. /* %o0=start, %o1=end */
  86. cmp %o0, %o1
  87. be,pn %xcc, 2f
  88. sethi %hi(PAGE_SIZE), %o4
  89. sub %o1, %o0, %o3
  90. sub %o3, %o4, %o3
  91. or %o0, 0x20, %o0 ! Nucleus
  92. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  93. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  94. membar #Sync
  95. brnz,pt %o3, 1b
  96. sub %o3, %o4, %o3
  97. 2: sethi %hi(KERNBASE), %o3
  98. flush %o3
  99. retl
  100. nop
  101. nop
  102. __spitfire_flush_tlb_mm_slow:
  103. rdpr %pstate, %g1
  104. wrpr %g1, PSTATE_IE, %pstate
  105. stxa %o0, [%o1] ASI_DMMU
  106. stxa %g0, [%g3] ASI_DMMU_DEMAP
  107. stxa %g0, [%g3] ASI_IMMU_DEMAP
  108. flush %g6
  109. stxa %g2, [%o1] ASI_DMMU
  110. sethi %hi(KERNBASE), %o1
  111. flush %o1
  112. retl
  113. wrpr %g1, 0, %pstate
  114. /*
  115. * The following code flushes one page_size worth.
  116. */
  117. .section .kprobes.text, "ax"
  118. .align 32
  119. .globl __flush_icache_page
  120. __flush_icache_page: /* %o0 = phys_page */
  121. membar #StoreStore
  122. srlx %o0, PAGE_SHIFT, %o0
  123. sethi %uhi(PAGE_OFFSET), %g1
  124. sllx %o0, PAGE_SHIFT, %o0
  125. sethi %hi(PAGE_SIZE), %g2
  126. sllx %g1, 32, %g1
  127. add %o0, %g1, %o0
  128. 1: subcc %g2, 32, %g2
  129. bne,pt %icc, 1b
  130. flush %o0 + %g2
  131. retl
  132. nop
  133. #ifdef DCACHE_ALIASING_POSSIBLE
  134. #if (PAGE_SHIFT != 13)
  135. #error only page shift of 13 is supported by dcache flush
  136. #endif
  137. #define DTAG_MASK 0x3
  138. /* This routine is Spitfire specific so the hardcoded
  139. * D-cache size and line-size are OK.
  140. */
  141. .align 64
  142. .globl __flush_dcache_page
  143. __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
  144. sethi %uhi(PAGE_OFFSET), %g1
  145. sllx %g1, 32, %g1
  146. sub %o0, %g1, %o0 ! physical address
  147. srlx %o0, 11, %o0 ! make D-cache TAG
  148. sethi %hi(1 << 14), %o2 ! D-cache size
  149. sub %o2, (1 << 5), %o2 ! D-cache line size
  150. 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
  151. andcc %o3, DTAG_MASK, %g0 ! Valid?
  152. be,pn %xcc, 2f ! Nope, branch
  153. andn %o3, DTAG_MASK, %o3 ! Clear valid bits
  154. cmp %o3, %o0 ! TAG match?
  155. bne,pt %xcc, 2f ! Nope, branch
  156. nop
  157. stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
  158. membar #Sync
  159. 2: brnz,pt %o2, 1b
  160. sub %o2, (1 << 5), %o2 ! D-cache line size
  161. /* The I-cache does not snoop local stores so we
  162. * better flush that too when necessary.
  163. */
  164. brnz,pt %o1, __flush_icache_page
  165. sllx %o0, 11, %o0
  166. retl
  167. nop
  168. #endif /* DCACHE_ALIASING_POSSIBLE */
  169. .previous
  170. /* Cheetah specific versions, patched at boot time. */
  171. __cheetah_flush_tlb_mm: /* 19 insns */
  172. rdpr %pstate, %g7
  173. andn %g7, PSTATE_IE, %g2
  174. wrpr %g2, 0x0, %pstate
  175. wrpr %g0, 1, %tl
  176. mov PRIMARY_CONTEXT, %o2
  177. mov 0x40, %g3
  178. ldxa [%o2] ASI_DMMU, %g2
  179. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
  180. sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
  181. or %o0, %o1, %o0 /* Preserve nucleus page size fields */
  182. stxa %o0, [%o2] ASI_DMMU
  183. stxa %g0, [%g3] ASI_DMMU_DEMAP
  184. stxa %g0, [%g3] ASI_IMMU_DEMAP
  185. stxa %g2, [%o2] ASI_DMMU
  186. sethi %hi(KERNBASE), %o2
  187. flush %o2
  188. wrpr %g0, 0, %tl
  189. retl
  190. wrpr %g7, 0x0, %pstate
  191. __cheetah_flush_tlb_pending: /* 27 insns */
  192. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  193. rdpr %pstate, %g7
  194. sllx %o1, 3, %o1
  195. andn %g7, PSTATE_IE, %g2
  196. wrpr %g2, 0x0, %pstate
  197. wrpr %g0, 1, %tl
  198. mov PRIMARY_CONTEXT, %o4
  199. ldxa [%o4] ASI_DMMU, %g2
  200. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  201. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  202. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  203. stxa %o0, [%o4] ASI_DMMU
  204. 1: sub %o1, (1 << 3), %o1
  205. ldx [%o2 + %o1], %o3
  206. andcc %o3, 1, %g0
  207. be,pn %icc, 2f
  208. andn %o3, 1, %o3
  209. stxa %g0, [%o3] ASI_IMMU_DEMAP
  210. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  211. membar #Sync
  212. brnz,pt %o1, 1b
  213. nop
  214. stxa %g2, [%o4] ASI_DMMU
  215. sethi %hi(KERNBASE), %o4
  216. flush %o4
  217. wrpr %g0, 0, %tl
  218. retl
  219. wrpr %g7, 0x0, %pstate
  220. #ifdef DCACHE_ALIASING_POSSIBLE
  221. __cheetah_flush_dcache_page: /* 11 insns */
  222. sethi %uhi(PAGE_OFFSET), %g1
  223. sllx %g1, 32, %g1
  224. sub %o0, %g1, %o0
  225. sethi %hi(PAGE_SIZE), %o4
  226. 1: subcc %o4, (1 << 5), %o4
  227. stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
  228. membar #Sync
  229. bne,pt %icc, 1b
  230. nop
  231. retl /* I-cache flush never needed on Cheetah, see callers. */
  232. nop
  233. #endif /* DCACHE_ALIASING_POSSIBLE */
  234. /* Hypervisor specific versions, patched at boot time. */
  235. __hypervisor_tlb_tl0_error:
  236. save %sp, -192, %sp
  237. mov %i0, %o0
  238. call hypervisor_tlbop_error
  239. mov %i1, %o1
  240. ret
  241. restore
  242. __hypervisor_flush_tlb_mm: /* 10 insns */
  243. mov %o0, %o2 /* ARG2: mmu context */
  244. mov 0, %o0 /* ARG0: CPU lists unimplemented */
  245. mov 0, %o1 /* ARG1: CPU lists unimplemented */
  246. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  247. mov HV_FAST_MMU_DEMAP_CTX, %o5
  248. ta HV_FAST_TRAP
  249. brnz,pn %o0, __hypervisor_tlb_tl0_error
  250. mov HV_FAST_MMU_DEMAP_CTX, %o1
  251. retl
  252. nop
  253. __hypervisor_flush_tlb_pending: /* 16 insns */
  254. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  255. sllx %o1, 3, %g1
  256. mov %o2, %g2
  257. mov %o0, %g3
  258. 1: sub %g1, (1 << 3), %g1
  259. ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
  260. mov %g3, %o1 /* ARG1: mmu context */
  261. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  262. srlx %o0, PAGE_SHIFT, %o0
  263. sllx %o0, PAGE_SHIFT, %o0
  264. ta HV_MMU_UNMAP_ADDR_TRAP
  265. brnz,pn %o0, __hypervisor_tlb_tl0_error
  266. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  267. brnz,pt %g1, 1b
  268. nop
  269. retl
  270. nop
  271. __hypervisor_flush_tlb_kernel_range: /* 16 insns */
  272. /* %o0=start, %o1=end */
  273. cmp %o0, %o1
  274. be,pn %xcc, 2f
  275. sethi %hi(PAGE_SIZE), %g3
  276. mov %o0, %g1
  277. sub %o1, %g1, %g2
  278. sub %g2, %g3, %g2
  279. 1: add %g1, %g2, %o0 /* ARG0: virtual address */
  280. mov 0, %o1 /* ARG1: mmu context */
  281. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  282. ta HV_MMU_UNMAP_ADDR_TRAP
  283. brnz,pn %o0, __hypervisor_tlb_tl0_error
  284. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  285. brnz,pt %g2, 1b
  286. sub %g2, %g3, %g2
  287. 2: retl
  288. nop
  289. #ifdef DCACHE_ALIASING_POSSIBLE
  290. /* XXX Niagara and friends have an 8K cache, so no aliasing is
  291. * XXX possible, but nothing explicit in the Hypervisor API
  292. * XXX guarantees this.
  293. */
  294. __hypervisor_flush_dcache_page: /* 2 insns */
  295. retl
  296. nop
  297. #endif
  298. tlb_patch_one:
  299. 1: lduw [%o1], %g1
  300. stw %g1, [%o0]
  301. flush %o0
  302. subcc %o2, 1, %o2
  303. add %o1, 4, %o1
  304. bne,pt %icc, 1b
  305. add %o0, 4, %o0
  306. retl
  307. nop
  308. .globl cheetah_patch_cachetlbops
  309. cheetah_patch_cachetlbops:
  310. save %sp, -128, %sp
  311. sethi %hi(__flush_tlb_mm), %o0
  312. or %o0, %lo(__flush_tlb_mm), %o0
  313. sethi %hi(__cheetah_flush_tlb_mm), %o1
  314. or %o1, %lo(__cheetah_flush_tlb_mm), %o1
  315. call tlb_patch_one
  316. mov 19, %o2
  317. sethi %hi(__flush_tlb_pending), %o0
  318. or %o0, %lo(__flush_tlb_pending), %o0
  319. sethi %hi(__cheetah_flush_tlb_pending), %o1
  320. or %o1, %lo(__cheetah_flush_tlb_pending), %o1
  321. call tlb_patch_one
  322. mov 27, %o2
  323. #ifdef DCACHE_ALIASING_POSSIBLE
  324. sethi %hi(__flush_dcache_page), %o0
  325. or %o0, %lo(__flush_dcache_page), %o0
  326. sethi %hi(__cheetah_flush_dcache_page), %o1
  327. or %o1, %lo(__cheetah_flush_dcache_page), %o1
  328. call tlb_patch_one
  329. mov 11, %o2
  330. #endif /* DCACHE_ALIASING_POSSIBLE */
  331. ret
  332. restore
  333. #ifdef CONFIG_SMP
  334. /* These are all called by the slaves of a cross call, at
  335. * trap level 1, with interrupts fully disabled.
  336. *
  337. * Register usage:
  338. * %g5 mm->context (all tlb flushes)
  339. * %g1 address arg 1 (tlb page and range flushes)
  340. * %g7 address arg 2 (tlb range flush only)
  341. *
  342. * %g6 scratch 1
  343. * %g2 scratch 2
  344. * %g3 scratch 3
  345. * %g4 scratch 4
  346. */
  347. .align 32
  348. .globl xcall_flush_tlb_mm
  349. xcall_flush_tlb_mm: /* 21 insns */
  350. mov PRIMARY_CONTEXT, %g2
  351. ldxa [%g2] ASI_DMMU, %g3
  352. srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
  353. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  354. or %g5, %g4, %g5 /* Preserve nucleus page size fields */
  355. stxa %g5, [%g2] ASI_DMMU
  356. mov 0x40, %g4
  357. stxa %g0, [%g4] ASI_DMMU_DEMAP
  358. stxa %g0, [%g4] ASI_IMMU_DEMAP
  359. stxa %g3, [%g2] ASI_DMMU
  360. retry
  361. nop
  362. nop
  363. nop
  364. nop
  365. nop
  366. nop
  367. nop
  368. nop
  369. nop
  370. nop
  371. .globl xcall_flush_tlb_pending
  372. xcall_flush_tlb_pending: /* 21 insns */
  373. /* %g5=context, %g1=nr, %g7=vaddrs[] */
  374. sllx %g1, 3, %g1
  375. mov PRIMARY_CONTEXT, %g4
  376. ldxa [%g4] ASI_DMMU, %g2
  377. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
  378. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  379. or %g5, %g4, %g5
  380. mov PRIMARY_CONTEXT, %g4
  381. stxa %g5, [%g4] ASI_DMMU
  382. 1: sub %g1, (1 << 3), %g1
  383. ldx [%g7 + %g1], %g5
  384. andcc %g5, 0x1, %g0
  385. be,pn %icc, 2f
  386. andn %g5, 0x1, %g5
  387. stxa %g0, [%g5] ASI_IMMU_DEMAP
  388. 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
  389. membar #Sync
  390. brnz,pt %g1, 1b
  391. nop
  392. stxa %g2, [%g4] ASI_DMMU
  393. retry
  394. nop
  395. .globl xcall_flush_tlb_kernel_range
  396. xcall_flush_tlb_kernel_range: /* 25 insns */
  397. sethi %hi(PAGE_SIZE - 1), %g2
  398. or %g2, %lo(PAGE_SIZE - 1), %g2
  399. andn %g1, %g2, %g1
  400. andn %g7, %g2, %g7
  401. sub %g7, %g1, %g3
  402. add %g2, 1, %g2
  403. sub %g3, %g2, %g3
  404. or %g1, 0x20, %g1 ! Nucleus
  405. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  406. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  407. membar #Sync
  408. brnz,pt %g3, 1b
  409. sub %g3, %g2, %g3
  410. retry
  411. nop
  412. nop
  413. nop
  414. nop
  415. nop
  416. nop
  417. nop
  418. nop
  419. nop
  420. nop
  421. nop
  422. /* This runs in a very controlled environment, so we do
  423. * not need to worry about BH races etc.
  424. */
  425. .globl xcall_sync_tick
  426. xcall_sync_tick:
  427. 661: rdpr %pstate, %g2
  428. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  429. .section .sun4v_2insn_patch, "ax"
  430. .word 661b
  431. nop
  432. nop
  433. .previous
  434. rdpr %pil, %g2
  435. wrpr %g0, 15, %pil
  436. sethi %hi(109f), %g7
  437. b,pt %xcc, etrap_irq
  438. 109: or %g7, %lo(109b), %g7
  439. #ifdef CONFIG_TRACE_IRQFLAGS
  440. call trace_hardirqs_off
  441. nop
  442. #endif
  443. call smp_synchronize_tick_client
  444. nop
  445. b rtrap_xcall
  446. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  447. .globl xcall_fetch_glob_regs
  448. xcall_fetch_glob_regs:
  449. sethi %hi(global_reg_snapshot), %g1
  450. or %g1, %lo(global_reg_snapshot), %g1
  451. __GET_CPUID(%g2)
  452. sllx %g2, 6, %g3
  453. add %g1, %g3, %g1
  454. rdpr %tstate, %g7
  455. stx %g7, [%g1 + GR_SNAP_TSTATE]
  456. rdpr %tpc, %g7
  457. stx %g7, [%g1 + GR_SNAP_TPC]
  458. rdpr %tnpc, %g7
  459. stx %g7, [%g1 + GR_SNAP_TNPC]
  460. stx %o7, [%g1 + GR_SNAP_O7]
  461. stx %i7, [%g1 + GR_SNAP_I7]
  462. /* Don't try this at home kids... */
  463. rdpr %cwp, %g2
  464. sub %g2, 1, %g7
  465. wrpr %g7, %cwp
  466. mov %i7, %g7
  467. wrpr %g2, %cwp
  468. stx %g7, [%g1 + GR_SNAP_RPC]
  469. sethi %hi(trap_block), %g7
  470. or %g7, %lo(trap_block), %g7
  471. sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
  472. add %g7, %g2, %g7
  473. ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
  474. membar #StoreStore
  475. stx %g3, [%g1 + GR_SNAP_THREAD]
  476. retry
  477. #ifdef DCACHE_ALIASING_POSSIBLE
  478. .align 32
  479. .globl xcall_flush_dcache_page_cheetah
  480. xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
  481. sethi %hi(PAGE_SIZE), %g3
  482. 1: subcc %g3, (1 << 5), %g3
  483. stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
  484. membar #Sync
  485. bne,pt %icc, 1b
  486. nop
  487. retry
  488. nop
  489. #endif /* DCACHE_ALIASING_POSSIBLE */
  490. .globl xcall_flush_dcache_page_spitfire
  491. xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
  492. %g7 == kernel page virtual address
  493. %g5 == (page->mapping != NULL) */
  494. #ifdef DCACHE_ALIASING_POSSIBLE
  495. srlx %g1, (13 - 2), %g1 ! Form tag comparitor
  496. sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
  497. sub %g3, (1 << 5), %g3 ! D$ linesize == 32
  498. 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
  499. andcc %g2, 0x3, %g0
  500. be,pn %xcc, 2f
  501. andn %g2, 0x3, %g2
  502. cmp %g2, %g1
  503. bne,pt %xcc, 2f
  504. nop
  505. stxa %g0, [%g3] ASI_DCACHE_TAG
  506. membar #Sync
  507. 2: cmp %g3, 0
  508. bne,pt %xcc, 1b
  509. sub %g3, (1 << 5), %g3
  510. brz,pn %g5, 2f
  511. #endif /* DCACHE_ALIASING_POSSIBLE */
  512. sethi %hi(PAGE_SIZE), %g3
  513. 1: flush %g7
  514. subcc %g3, (1 << 5), %g3
  515. bne,pt %icc, 1b
  516. add %g7, (1 << 5), %g7
  517. 2: retry
  518. nop
  519. nop
  520. /* %g5: error
  521. * %g6: tlb op
  522. */
  523. __hypervisor_tlb_xcall_error:
  524. mov %g5, %g4
  525. mov %g6, %g5
  526. ba,pt %xcc, etrap
  527. rd %pc, %g7
  528. mov %l4, %o0
  529. call hypervisor_tlbop_error_xcall
  530. mov %l5, %o1
  531. ba,a,pt %xcc, rtrap
  532. .globl __hypervisor_xcall_flush_tlb_mm
  533. __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
  534. /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
  535. mov %o0, %g2
  536. mov %o1, %g3
  537. mov %o2, %g4
  538. mov %o3, %g1
  539. mov %o5, %g7
  540. clr %o0 /* ARG0: CPU lists unimplemented */
  541. clr %o1 /* ARG1: CPU lists unimplemented */
  542. mov %g5, %o2 /* ARG2: mmu context */
  543. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  544. mov HV_FAST_MMU_DEMAP_CTX, %o5
  545. ta HV_FAST_TRAP
  546. mov HV_FAST_MMU_DEMAP_CTX, %g6
  547. brnz,pn %o0, __hypervisor_tlb_xcall_error
  548. mov %o0, %g5
  549. mov %g2, %o0
  550. mov %g3, %o1
  551. mov %g4, %o2
  552. mov %g1, %o3
  553. mov %g7, %o5
  554. membar #Sync
  555. retry
  556. .globl __hypervisor_xcall_flush_tlb_pending
  557. __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
  558. /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
  559. sllx %g1, 3, %g1
  560. mov %o0, %g2
  561. mov %o1, %g3
  562. mov %o2, %g4
  563. 1: sub %g1, (1 << 3), %g1
  564. ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
  565. mov %g5, %o1 /* ARG1: mmu context */
  566. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  567. srlx %o0, PAGE_SHIFT, %o0
  568. sllx %o0, PAGE_SHIFT, %o0
  569. ta HV_MMU_UNMAP_ADDR_TRAP
  570. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  571. brnz,a,pn %o0, __hypervisor_tlb_xcall_error
  572. mov %o0, %g5
  573. brnz,pt %g1, 1b
  574. nop
  575. mov %g2, %o0
  576. mov %g3, %o1
  577. mov %g4, %o2
  578. membar #Sync
  579. retry
  580. .globl __hypervisor_xcall_flush_tlb_kernel_range
  581. __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
  582. /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
  583. sethi %hi(PAGE_SIZE - 1), %g2
  584. or %g2, %lo(PAGE_SIZE - 1), %g2
  585. andn %g1, %g2, %g1
  586. andn %g7, %g2, %g7
  587. sub %g7, %g1, %g3
  588. add %g2, 1, %g2
  589. sub %g3, %g2, %g3
  590. mov %o0, %g2
  591. mov %o1, %g4
  592. mov %o2, %g7
  593. 1: add %g1, %g3, %o0 /* ARG0: virtual address */
  594. mov 0, %o1 /* ARG1: mmu context */
  595. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  596. ta HV_MMU_UNMAP_ADDR_TRAP
  597. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  598. brnz,pn %o0, __hypervisor_tlb_xcall_error
  599. mov %o0, %g5
  600. sethi %hi(PAGE_SIZE), %o2
  601. brnz,pt %g3, 1b
  602. sub %g3, %o2, %g3
  603. mov %g2, %o0
  604. mov %g4, %o1
  605. mov %g7, %o2
  606. membar #Sync
  607. retry
  608. /* These just get rescheduled to PIL vectors. */
  609. .globl xcall_call_function
  610. xcall_call_function:
  611. wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
  612. retry
  613. .globl xcall_call_function_single
  614. xcall_call_function_single:
  615. wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
  616. retry
  617. .globl xcall_receive_signal
  618. xcall_receive_signal:
  619. wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
  620. retry
  621. .globl xcall_capture
  622. xcall_capture:
  623. wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
  624. retry
  625. .globl xcall_new_mmu_context_version
  626. xcall_new_mmu_context_version:
  627. wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
  628. retry
  629. #ifdef CONFIG_KGDB
  630. .globl xcall_kgdb_capture
  631. xcall_kgdb_capture:
  632. 661: rdpr %pstate, %g2
  633. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  634. .section .sun4v_2insn_patch, "ax"
  635. .word 661b
  636. nop
  637. nop
  638. .previous
  639. rdpr %pil, %g2
  640. wrpr %g0, 15, %pil
  641. sethi %hi(109f), %g7
  642. ba,pt %xcc, etrap_irq
  643. 109: or %g7, %lo(109b), %g7
  644. #ifdef CONFIG_TRACE_IRQFLAGS
  645. call trace_hardirqs_off
  646. nop
  647. #endif
  648. call smp_kgdb_capture_client
  649. add %sp, PTREGS_OFF, %o0
  650. /* Has to be a non-v9 branch due to the large distance. */
  651. ba rtrap_xcall
  652. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  653. #endif
  654. #endif /* CONFIG_SMP */
  655. .globl hypervisor_patch_cachetlbops
  656. hypervisor_patch_cachetlbops:
  657. save %sp, -128, %sp
  658. sethi %hi(__flush_tlb_mm), %o0
  659. or %o0, %lo(__flush_tlb_mm), %o0
  660. sethi %hi(__hypervisor_flush_tlb_mm), %o1
  661. or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
  662. call tlb_patch_one
  663. mov 10, %o2
  664. sethi %hi(__flush_tlb_pending), %o0
  665. or %o0, %lo(__flush_tlb_pending), %o0
  666. sethi %hi(__hypervisor_flush_tlb_pending), %o1
  667. or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
  668. call tlb_patch_one
  669. mov 16, %o2
  670. sethi %hi(__flush_tlb_kernel_range), %o0
  671. or %o0, %lo(__flush_tlb_kernel_range), %o0
  672. sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
  673. or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
  674. call tlb_patch_one
  675. mov 16, %o2
  676. #ifdef DCACHE_ALIASING_POSSIBLE
  677. sethi %hi(__flush_dcache_page), %o0
  678. or %o0, %lo(__flush_dcache_page), %o0
  679. sethi %hi(__hypervisor_flush_dcache_page), %o1
  680. or %o1, %lo(__hypervisor_flush_dcache_page), %o1
  681. call tlb_patch_one
  682. mov 2, %o2
  683. #endif /* DCACHE_ALIASING_POSSIBLE */
  684. #ifdef CONFIG_SMP
  685. sethi %hi(xcall_flush_tlb_mm), %o0
  686. or %o0, %lo(xcall_flush_tlb_mm), %o0
  687. sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
  688. or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
  689. call tlb_patch_one
  690. mov 21, %o2
  691. sethi %hi(xcall_flush_tlb_pending), %o0
  692. or %o0, %lo(xcall_flush_tlb_pending), %o0
  693. sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
  694. or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
  695. call tlb_patch_one
  696. mov 21, %o2
  697. sethi %hi(xcall_flush_tlb_kernel_range), %o0
  698. or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
  699. sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  700. or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  701. call tlb_patch_one
  702. mov 25, %o2
  703. #endif /* CONFIG_SMP */
  704. ret
  705. restore