sun4v_ivec.S 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /* sun4v_ivec.S: Sun4v interrupt vector handling.
  2. *
  3. * Copyright (C) 2006 <davem@davemloft.net>
  4. */
  5. #include <asm/cpudata.h>
  6. #include <asm/intr_queue.h>
  7. #include <asm/pil.h>
  8. .text
  9. .align 32
  10. sun4v_cpu_mondo:
  11. /* Head offset in %g2, tail offset in %g4.
  12. * If they are the same, no work.
  13. */
  14. mov INTRQ_CPU_MONDO_HEAD, %g2
  15. ldxa [%g2] ASI_QUEUE, %g2
  16. mov INTRQ_CPU_MONDO_TAIL, %g4
  17. ldxa [%g4] ASI_QUEUE, %g4
  18. cmp %g2, %g4
  19. be,pn %xcc, sun4v_cpu_mondo_queue_empty
  20. nop
  21. /* Get &trap_block[smp_processor_id()] into %g4. */
  22. ldxa [%g0] ASI_SCRATCHPAD, %g4
  23. sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
  24. /* Get CPU mondo queue base phys address into %g7. */
  25. ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
  26. /* Now get the cross-call arguments and handler PC, same
  27. * layout as sun4u:
  28. *
  29. * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
  30. * high half is context arg to MMU flushes, into %g5
  31. * 2nd 64-bit word: 64-bit arg, load into %g1
  32. * 3rd 64-bit word: 64-bit arg, load into %g7
  33. */
  34. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
  35. add %g2, 0x8, %g2
  36. srlx %g3, 32, %g5
  37. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  38. add %g2, 0x8, %g2
  39. srl %g3, 0, %g3
  40. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
  41. add %g2, 0x40 - 0x8 - 0x8, %g2
  42. /* Update queue head pointer. */
  43. lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
  44. and %g2, %g4, %g2
  45. mov INTRQ_CPU_MONDO_HEAD, %g4
  46. stxa %g2, [%g4] ASI_QUEUE
  47. membar #Sync
  48. jmpl %g3, %g0
  49. nop
  50. sun4v_cpu_mondo_queue_empty:
  51. retry
  52. sun4v_dev_mondo:
  53. /* Head offset in %g2, tail offset in %g4. */
  54. mov INTRQ_DEVICE_MONDO_HEAD, %g2
  55. ldxa [%g2] ASI_QUEUE, %g2
  56. mov INTRQ_DEVICE_MONDO_TAIL, %g4
  57. ldxa [%g4] ASI_QUEUE, %g4
  58. cmp %g2, %g4
  59. be,pn %xcc, sun4v_dev_mondo_queue_empty
  60. nop
  61. /* Get &trap_block[smp_processor_id()] into %g4. */
  62. ldxa [%g0] ASI_SCRATCHPAD, %g4
  63. sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
  64. /* Get DEV mondo queue base phys address into %g5. */
  65. ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
  66. /* Load IVEC into %g3. */
  67. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  68. add %g2, 0x40, %g2
  69. /* XXX There can be a full 64-byte block of data here.
  70. * XXX This is how we can get at MSI vector data.
  71. * XXX Current we do not capture this, but when we do we'll
  72. * XXX need to add a 64-byte storage area in the struct ino_bucket
  73. * XXX or the struct irq_desc.
  74. */
  75. /* Update queue head pointer, this frees up some registers. */
  76. lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
  77. and %g2, %g4, %g2
  78. mov INTRQ_DEVICE_MONDO_HEAD, %g4
  79. stxa %g2, [%g4] ASI_QUEUE
  80. membar #Sync
  81. TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
  82. /* Get __pa(&ivector_table[IVEC]) into %g4. */
  83. sethi %hi(ivector_table_pa), %g4
  84. ldx [%g4 + %lo(ivector_table_pa)], %g4
  85. sllx %g3, 4, %g3
  86. add %g4, %g3, %g4
  87. ldx [%g1], %g2
  88. stxa %g2, [%g4] ASI_PHYS_USE_EC
  89. stx %g4, [%g1]
  90. /* Signal the interrupt by setting (1 << pil) in %softint. */
  91. wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
  92. sun4v_dev_mondo_queue_empty:
  93. retry
  94. sun4v_res_mondo:
  95. /* Head offset in %g2, tail offset in %g4. */
  96. mov INTRQ_RESUM_MONDO_HEAD, %g2
  97. ldxa [%g2] ASI_QUEUE, %g2
  98. mov INTRQ_RESUM_MONDO_TAIL, %g4
  99. ldxa [%g4] ASI_QUEUE, %g4
  100. cmp %g2, %g4
  101. be,pn %xcc, sun4v_res_mondo_queue_empty
  102. nop
  103. /* Get &trap_block[smp_processor_id()] into %g3. */
  104. ldxa [%g0] ASI_SCRATCHPAD, %g3
  105. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  106. /* Get RES mondo queue base phys address into %g5. */
  107. ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
  108. /* Get RES kernel buffer base phys address into %g7. */
  109. ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
  110. /* If the first word is non-zero, queue is full. */
  111. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  112. brnz,pn %g1, sun4v_res_mondo_queue_full
  113. nop
  114. lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
  115. /* Remember this entry's offset in %g1. */
  116. mov %g2, %g1
  117. /* Copy 64-byte queue entry into kernel buffer. */
  118. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  119. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  120. add %g2, 0x08, %g2
  121. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  122. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  123. add %g2, 0x08, %g2
  124. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  125. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  126. add %g2, 0x08, %g2
  127. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  128. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  129. add %g2, 0x08, %g2
  130. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  131. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  132. add %g2, 0x08, %g2
  133. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  134. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  135. add %g2, 0x08, %g2
  136. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  137. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  138. add %g2, 0x08, %g2
  139. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  140. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  141. add %g2, 0x08, %g2
  142. /* Update queue head pointer. */
  143. and %g2, %g4, %g2
  144. mov INTRQ_RESUM_MONDO_HEAD, %g4
  145. stxa %g2, [%g4] ASI_QUEUE
  146. membar #Sync
  147. /* Disable interrupts and save register state so we can call
  148. * C code. The etrap handling will leave %g4 in %l4 for us
  149. * when it's done.
  150. */
  151. rdpr %pil, %g2
  152. wrpr %g0, 15, %pil
  153. mov %g1, %g4
  154. ba,pt %xcc, etrap_irq
  155. rd %pc, %g7
  156. #ifdef CONFIG_TRACE_IRQFLAGS
  157. call trace_hardirqs_off
  158. nop
  159. #endif
  160. /* Log the event. */
  161. add %sp, PTREGS_OFF, %o0
  162. call sun4v_resum_error
  163. mov %l4, %o1
  164. /* Return from trap. */
  165. ba,pt %xcc, rtrap_irq
  166. nop
  167. sun4v_res_mondo_queue_empty:
  168. retry
  169. sun4v_res_mondo_queue_full:
  170. /* The queue is full, consolidate our damage by setting
  171. * the head equal to the tail. We'll just trap again otherwise.
  172. * Call C code to log the event.
  173. */
  174. mov INTRQ_RESUM_MONDO_HEAD, %g2
  175. stxa %g4, [%g2] ASI_QUEUE
  176. membar #Sync
  177. rdpr %pil, %g2
  178. wrpr %g0, 15, %pil
  179. ba,pt %xcc, etrap_irq
  180. rd %pc, %g7
  181. #ifdef CONFIG_TRACE_IRQFLAGS
  182. call trace_hardirqs_off
  183. nop
  184. #endif
  185. call sun4v_resum_overflow
  186. add %sp, PTREGS_OFF, %o0
  187. ba,pt %xcc, rtrap_irq
  188. nop
  189. sun4v_nonres_mondo:
  190. /* Head offset in %g2, tail offset in %g4. */
  191. mov INTRQ_NONRESUM_MONDO_HEAD, %g2
  192. ldxa [%g2] ASI_QUEUE, %g2
  193. mov INTRQ_NONRESUM_MONDO_TAIL, %g4
  194. ldxa [%g4] ASI_QUEUE, %g4
  195. cmp %g2, %g4
  196. be,pn %xcc, sun4v_nonres_mondo_queue_empty
  197. nop
  198. /* Get &trap_block[smp_processor_id()] into %g3. */
  199. ldxa [%g0] ASI_SCRATCHPAD, %g3
  200. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  201. /* Get RES mondo queue base phys address into %g5. */
  202. ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
  203. /* Get RES kernel buffer base phys address into %g7. */
  204. ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
  205. /* If the first word is non-zero, queue is full. */
  206. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  207. brnz,pn %g1, sun4v_nonres_mondo_queue_full
  208. nop
  209. lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
  210. /* Remember this entry's offset in %g1. */
  211. mov %g2, %g1
  212. /* Copy 64-byte queue entry into kernel buffer. */
  213. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  214. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  215. add %g2, 0x08, %g2
  216. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  217. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  218. add %g2, 0x08, %g2
  219. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  220. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  221. add %g2, 0x08, %g2
  222. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  223. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  224. add %g2, 0x08, %g2
  225. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  226. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  227. add %g2, 0x08, %g2
  228. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  229. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  230. add %g2, 0x08, %g2
  231. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  232. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  233. add %g2, 0x08, %g2
  234. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  235. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  236. add %g2, 0x08, %g2
  237. /* Update queue head pointer. */
  238. and %g2, %g4, %g2
  239. mov INTRQ_NONRESUM_MONDO_HEAD, %g4
  240. stxa %g2, [%g4] ASI_QUEUE
  241. membar #Sync
  242. /* Disable interrupts and save register state so we can call
  243. * C code. The etrap handling will leave %g4 in %l4 for us
  244. * when it's done.
  245. */
  246. rdpr %pil, %g2
  247. wrpr %g0, 15, %pil
  248. mov %g1, %g4
  249. ba,pt %xcc, etrap_irq
  250. rd %pc, %g7
  251. #ifdef CONFIG_TRACE_IRQFLAGS
  252. call trace_hardirqs_off
  253. nop
  254. #endif
  255. /* Log the event. */
  256. add %sp, PTREGS_OFF, %o0
  257. call sun4v_nonresum_error
  258. mov %l4, %o1
  259. /* Return from trap. */
  260. ba,pt %xcc, rtrap_irq
  261. nop
  262. sun4v_nonres_mondo_queue_empty:
  263. retry
  264. sun4v_nonres_mondo_queue_full:
  265. /* The queue is full, consolidate our damage by setting
  266. * the head equal to the tail. We'll just trap again otherwise.
  267. * Call C code to log the event.
  268. */
  269. mov INTRQ_NONRESUM_MONDO_HEAD, %g2
  270. stxa %g4, [%g2] ASI_QUEUE
  271. membar #Sync
  272. rdpr %pil, %g2
  273. wrpr %g0, 15, %pil
  274. ba,pt %xcc, etrap_irq
  275. rd %pc, %g7
  276. #ifdef CONFIG_TRACE_IRQFLAGS
  277. call trace_hardirqs_off
  278. nop
  279. #endif
  280. call sun4v_nonresum_overflow
  281. add %sp, PTREGS_OFF, %o0
  282. ba,pt %xcc, rtrap_irq
  283. nop