trampoline_64.S 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. /*
  2. * trampoline.S: Jump start slave processors on sparc64.
  3. *
  4. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  5. */
  6. #include <linux/init.h>
  7. #include <asm/head.h>
  8. #include <asm/asi.h>
  9. #include <asm/lsu.h>
  10. #include <asm/dcr.h>
  11. #include <asm/dcu.h>
  12. #include <asm/pstate.h>
  13. #include <asm/page.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/spitfire.h>
  16. #include <asm/processor.h>
  17. #include <asm/thread_info.h>
  18. #include <asm/mmu.h>
  19. #include <asm/hypervisor.h>
  20. #include <asm/cpudata.h>
  21. .data
  22. .align 8
  23. call_method:
  24. .asciz "call-method"
  25. .align 8
  26. itlb_load:
  27. .asciz "SUNW,itlb-load"
  28. .align 8
  29. dtlb_load:
  30. .asciz "SUNW,dtlb-load"
  31. #define TRAMP_STACK_SIZE 1024
  32. .align 16
  33. tramp_stack:
  34. .skip TRAMP_STACK_SIZE
  35. .align 8
  36. .globl sparc64_cpu_startup, sparc64_cpu_startup_end
  37. sparc64_cpu_startup:
  38. BRANCH_IF_SUN4V(g1, niagara_startup)
  39. BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
  40. BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
  41. ba,pt %xcc, spitfire_startup
  42. nop
  43. cheetah_plus_startup:
  44. /* Preserve OBP chosen DCU and DCR register settings. */
  45. ba,pt %xcc, cheetah_generic_startup
  46. nop
  47. cheetah_startup:
  48. mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
  49. wr %g1, %asr18
  50. sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
  51. or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
  52. sllx %g5, 32, %g5
  53. or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
  54. stxa %g5, [%g0] ASI_DCU_CONTROL_REG
  55. membar #Sync
  56. /* fallthru */
  57. cheetah_generic_startup:
  58. mov TSB_EXTENSION_P, %g3
  59. stxa %g0, [%g3] ASI_DMMU
  60. stxa %g0, [%g3] ASI_IMMU
  61. membar #Sync
  62. mov TSB_EXTENSION_S, %g3
  63. stxa %g0, [%g3] ASI_DMMU
  64. membar #Sync
  65. mov TSB_EXTENSION_N, %g3
  66. stxa %g0, [%g3] ASI_DMMU
  67. stxa %g0, [%g3] ASI_IMMU
  68. membar #Sync
  69. /* fallthru */
  70. niagara_startup:
  71. /* Disable STICK_INT interrupts. */
  72. sethi %hi(0x80000000), %g5
  73. sllx %g5, 32, %g5
  74. wr %g5, %asr25
  75. ba,pt %xcc, startup_continue
  76. nop
  77. spitfire_startup:
  78. mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
  79. stxa %g1, [%g0] ASI_LSU_CONTROL
  80. membar #Sync
  81. startup_continue:
  82. mov %o0, %l0
  83. BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
  84. sethi %hi(0x80000000), %g2
  85. sllx %g2, 32, %g2
  86. wr %g2, 0, %tick_cmpr
  87. /* Call OBP by hand to lock KERNBASE into i/d tlbs.
  88. * We lock 'num_kernel_image_mappings' consequetive entries.
  89. */
  90. sethi %hi(prom_entry_lock), %g2
  91. 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
  92. brnz,pn %g1, 1b
  93. nop
  94. sethi %hi(p1275buf), %g2
  95. or %g2, %lo(p1275buf), %g2
  96. ldx [%g2 + 0x10], %l2
  97. add %l2, -(192 + 128), %sp
  98. flushw
  99. /* Setup the loop variables:
  100. * %l3: VADDR base
  101. * %l4: TTE base
  102. * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings'
  103. * %l6: Number of TTE entries to map
  104. * %l7: Highest TTE entry number, we count down
  105. */
  106. sethi %hi(KERNBASE), %l3
  107. sethi %hi(kern_locked_tte_data), %l4
  108. ldx [%l4 + %lo(kern_locked_tte_data)], %l4
  109. clr %l5
  110. sethi %hi(num_kernel_image_mappings), %l6
  111. lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
  112. mov 15, %l7
  113. BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
  114. mov 63, %l7
  115. 2:
  116. 3:
  117. /* Lock into I-MMU */
  118. sethi %hi(call_method), %g2
  119. or %g2, %lo(call_method), %g2
  120. stx %g2, [%sp + 2047 + 128 + 0x00]
  121. mov 5, %g2
  122. stx %g2, [%sp + 2047 + 128 + 0x08]
  123. mov 1, %g2
  124. stx %g2, [%sp + 2047 + 128 + 0x10]
  125. sethi %hi(itlb_load), %g2
  126. or %g2, %lo(itlb_load), %g2
  127. stx %g2, [%sp + 2047 + 128 + 0x18]
  128. sethi %hi(prom_mmu_ihandle_cache), %g2
  129. lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
  130. stx %g2, [%sp + 2047 + 128 + 0x20]
  131. /* Each TTE maps 4MB, convert index to offset. */
  132. sllx %l5, 22, %g1
  133. add %l3, %g1, %g2
  134. stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
  135. add %l4, %g1, %g2
  136. stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
  137. /* TTE index is highest minus loop index. */
  138. sub %l7, %l5, %g2
  139. stx %g2, [%sp + 2047 + 128 + 0x38]
  140. sethi %hi(p1275buf), %g2
  141. or %g2, %lo(p1275buf), %g2
  142. ldx [%g2 + 0x08], %o1
  143. call %o1
  144. add %sp, (2047 + 128), %o0
  145. /* Lock into D-MMU */
  146. sethi %hi(call_method), %g2
  147. or %g2, %lo(call_method), %g2
  148. stx %g2, [%sp + 2047 + 128 + 0x00]
  149. mov 5, %g2
  150. stx %g2, [%sp + 2047 + 128 + 0x08]
  151. mov 1, %g2
  152. stx %g2, [%sp + 2047 + 128 + 0x10]
  153. sethi %hi(dtlb_load), %g2
  154. or %g2, %lo(dtlb_load), %g2
  155. stx %g2, [%sp + 2047 + 128 + 0x18]
  156. sethi %hi(prom_mmu_ihandle_cache), %g2
  157. lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
  158. stx %g2, [%sp + 2047 + 128 + 0x20]
  159. /* Each TTE maps 4MB, convert index to offset. */
  160. sllx %l5, 22, %g1
  161. add %l3, %g1, %g2
  162. stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
  163. add %l4, %g1, %g2
  164. stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
  165. /* TTE index is highest minus loop index. */
  166. sub %l7, %l5, %g2
  167. stx %g2, [%sp + 2047 + 128 + 0x38]
  168. sethi %hi(p1275buf), %g2
  169. or %g2, %lo(p1275buf), %g2
  170. ldx [%g2 + 0x08], %o1
  171. call %o1
  172. add %sp, (2047 + 128), %o0
  173. add %l5, 1, %l5
  174. cmp %l5, %l6
  175. bne,pt %xcc, 3b
  176. nop
  177. sethi %hi(prom_entry_lock), %g2
  178. stb %g0, [%g2 + %lo(prom_entry_lock)]
  179. ba,pt %xcc, after_lock_tlb
  180. nop
  181. niagara_lock_tlb:
  182. sethi %hi(KERNBASE), %l3
  183. sethi %hi(kern_locked_tte_data), %l4
  184. ldx [%l4 + %lo(kern_locked_tte_data)], %l4
  185. clr %l5
  186. sethi %hi(num_kernel_image_mappings), %l6
  187. lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
  188. 1:
  189. mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
  190. sllx %l5, 22, %g2
  191. add %l3, %g2, %o0
  192. clr %o1
  193. add %l4, %g2, %o2
  194. mov HV_MMU_IMMU, %o3
  195. ta HV_FAST_TRAP
  196. mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
  197. sllx %l5, 22, %g2
  198. add %l3, %g2, %o0
  199. clr %o1
  200. add %l4, %g2, %o2
  201. mov HV_MMU_DMMU, %o3
  202. ta HV_FAST_TRAP
  203. add %l5, 1, %l5
  204. cmp %l5, %l6
  205. bne,pt %xcc, 1b
  206. nop
  207. after_lock_tlb:
  208. wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
  209. wr %g0, 0, %fprs
  210. wr %g0, ASI_P, %asi
  211. mov PRIMARY_CONTEXT, %g7
  212. 661: stxa %g0, [%g7] ASI_DMMU
  213. .section .sun4v_1insn_patch, "ax"
  214. .word 661b
  215. stxa %g0, [%g7] ASI_MMU
  216. .previous
  217. membar #Sync
  218. mov SECONDARY_CONTEXT, %g7
  219. 661: stxa %g0, [%g7] ASI_DMMU
  220. .section .sun4v_1insn_patch, "ax"
  221. .word 661b
  222. stxa %g0, [%g7] ASI_MMU
  223. .previous
  224. membar #Sync
  225. /* Everything we do here, until we properly take over the
  226. * trap table, must be done with extreme care. We cannot
  227. * make any references to %g6 (current thread pointer),
  228. * %g4 (current task pointer), or %g5 (base of current cpu's
  229. * per-cpu area) until we properly take over the trap table
  230. * from the firmware and hypervisor.
  231. *
  232. * Get onto temporary stack which is in the locked kernel image.
  233. */
  234. sethi %hi(tramp_stack), %g1
  235. or %g1, %lo(tramp_stack), %g1
  236. add %g1, TRAMP_STACK_SIZE, %g1
  237. sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
  238. mov 0, %fp
  239. /* Put garbage in these registers to trap any access to them. */
  240. set 0xdeadbeef, %g4
  241. set 0xdeadbeef, %g5
  242. set 0xdeadbeef, %g6
  243. call init_irqwork_curcpu
  244. nop
  245. sethi %hi(tlb_type), %g3
  246. lduw [%g3 + %lo(tlb_type)], %g2
  247. cmp %g2, 3
  248. bne,pt %icc, 1f
  249. nop
  250. call hard_smp_processor_id
  251. nop
  252. call sun4v_register_mondo_queues
  253. nop
  254. 1: call init_cur_cpu_trap
  255. ldx [%l0], %o0
  256. /* Start using proper page size encodings in ctx register. */
  257. sethi %hi(sparc64_kern_pri_context), %g3
  258. ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
  259. mov PRIMARY_CONTEXT, %g1
  260. 661: stxa %g2, [%g1] ASI_DMMU
  261. .section .sun4v_1insn_patch, "ax"
  262. .word 661b
  263. stxa %g2, [%g1] ASI_MMU
  264. .previous
  265. membar #Sync
  266. wrpr %g0, 0, %wstate
  267. sethi %hi(prom_entry_lock), %g2
  268. 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
  269. brnz,pn %g1, 1b
  270. nop
  271. /* As a hack, put &init_thread_union into %g6.
  272. * prom_world() loads from here to restore the %asi
  273. * register.
  274. */
  275. sethi %hi(init_thread_union), %g6
  276. or %g6, %lo(init_thread_union), %g6
  277. sethi %hi(is_sun4v), %o0
  278. lduw [%o0 + %lo(is_sun4v)], %o0
  279. brz,pt %o0, 2f
  280. nop
  281. TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
  282. add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
  283. stxa %g2, [%g0] ASI_SCRATCHPAD
  284. /* Compute physical address:
  285. *
  286. * paddr = kern_base + (mmfsa_vaddr - KERNBASE)
  287. */
  288. sethi %hi(KERNBASE), %g3
  289. sub %g2, %g3, %g2
  290. sethi %hi(kern_base), %g3
  291. ldx [%g3 + %lo(kern_base)], %g3
  292. add %g2, %g3, %o1
  293. sethi %hi(sparc64_ttable_tl0), %o0
  294. set prom_set_trap_table_name, %g2
  295. stx %g2, [%sp + 2047 + 128 + 0x00]
  296. mov 2, %g2
  297. stx %g2, [%sp + 2047 + 128 + 0x08]
  298. mov 0, %g2
  299. stx %g2, [%sp + 2047 + 128 + 0x10]
  300. stx %o0, [%sp + 2047 + 128 + 0x18]
  301. stx %o1, [%sp + 2047 + 128 + 0x20]
  302. sethi %hi(p1275buf), %g2
  303. or %g2, %lo(p1275buf), %g2
  304. ldx [%g2 + 0x08], %o1
  305. call %o1
  306. add %sp, (2047 + 128), %o0
  307. ba,pt %xcc, 3f
  308. nop
  309. 2: sethi %hi(sparc64_ttable_tl0), %o0
  310. set prom_set_trap_table_name, %g2
  311. stx %g2, [%sp + 2047 + 128 + 0x00]
  312. mov 1, %g2
  313. stx %g2, [%sp + 2047 + 128 + 0x08]
  314. mov 0, %g2
  315. stx %g2, [%sp + 2047 + 128 + 0x10]
  316. stx %o0, [%sp + 2047 + 128 + 0x18]
  317. sethi %hi(p1275buf), %g2
  318. or %g2, %lo(p1275buf), %g2
  319. ldx [%g2 + 0x08], %o1
  320. call %o1
  321. add %sp, (2047 + 128), %o0
  322. 3: sethi %hi(prom_entry_lock), %g2
  323. stb %g0, [%g2 + %lo(prom_entry_lock)]
  324. ldx [%l0], %g6
  325. ldx [%g6 + TI_TASK], %g4
  326. mov 1, %g5
  327. sllx %g5, THREAD_SHIFT, %g5
  328. sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
  329. add %g6, %g5, %sp
  330. mov 0, %fp
  331. rdpr %pstate, %o1
  332. or %o1, PSTATE_IE, %o1
  333. wrpr %o1, 0, %pstate
  334. call smp_callin
  335. nop
  336. call cpu_panic
  337. nop
  338. 1: b,a,pt %xcc, 1b
  339. .align 8
  340. sparc64_cpu_startup_end: