sun4v_ivec.S 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /* sun4v_ivec.S: Sun4v interrupt vector handling.
  2. *
  3. * Copyright (C) 2006 <davem@davemloft.net>
  4. */
  5. #include <asm/cpudata.h>
  6. #include <asm/intr_queue.h>
  7. .text
  8. .align 32
  9. sun4v_cpu_mondo:
  10. /* Head offset in %g2, tail offset in %g4.
  11. * If they are the same, no work.
  12. */
  13. mov INTRQ_CPU_MONDO_HEAD, %g2
  14. ldxa [%g2] ASI_QUEUE, %g2
  15. mov INTRQ_CPU_MONDO_TAIL, %g4
  16. ldxa [%g4] ASI_QUEUE, %g4
  17. cmp %g2, %g4
  18. be,pn %xcc, sun4v_cpu_mondo_queue_empty
  19. nop
  20. /* Get &trap_block[smp_processor_id()] into %g3. */
  21. ldxa [%g0] ASI_SCRATCHPAD, %g3
  22. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  23. /* Get CPU mondo queue base phys address into %g7. */
  24. ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
  25. /* Now get the cross-call arguments and handler PC, same
  26. * layout as sun4u:
  27. *
  28. * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
  29. * high half is context arg to MMU flushes, into %g5
  30. * 2nd 64-bit word: 64-bit arg, load into %g1
  31. * 3rd 64-bit word: 64-bit arg, load into %g7
  32. */
  33. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
  34. add %g2, 0x8, %g2
  35. srlx %g3, 32, %g5
  36. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  37. add %g2, 0x8, %g2
  38. srl %g3, 0, %g3
  39. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
  40. add %g2, 0x40 - 0x8 - 0x8, %g2
  41. /* Update queue head pointer. */
  42. sethi %hi(8192 - 1), %g4
  43. or %g4, %lo(8192 - 1), %g4
  44. and %g2, %g4, %g2
  45. mov INTRQ_CPU_MONDO_HEAD, %g4
  46. stxa %g2, [%g4] ASI_QUEUE
  47. membar #Sync
  48. jmpl %g3, %g0
  49. nop
  50. sun4v_cpu_mondo_queue_empty:
  51. retry
  52. sun4v_dev_mondo:
  53. /* Head offset in %g2, tail offset in %g4. */
  54. mov INTRQ_DEVICE_MONDO_HEAD, %g2
  55. ldxa [%g2] ASI_QUEUE, %g2
  56. mov INTRQ_DEVICE_MONDO_TAIL, %g4
  57. ldxa [%g4] ASI_QUEUE, %g4
  58. cmp %g2, %g4
  59. be,pn %xcc, sun4v_dev_mondo_queue_empty
  60. nop
  61. /* Get &trap_block[smp_processor_id()] into %g3. */
  62. ldxa [%g0] ASI_SCRATCHPAD, %g3
  63. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  64. /* Get DEV mondo queue base phys address into %g5. */
  65. ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
  66. /* Load IVEC into %g3. */
  67. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  68. add %g2, 0x40, %g2
  69. /* XXX There can be a full 64-byte block of data here.
  70. * XXX This is how we can get at MSI vector data.
  71. * XXX Current we do not capture this, but when we do we'll
  72. * XXX need to add a 64-byte storage area in the struct ino_bucket
  73. * XXX or the struct irq_desc.
  74. */
  75. /* Update queue head pointer, this frees up some registers. */
  76. sethi %hi(8192 - 1), %g4
  77. or %g4, %lo(8192 - 1), %g4
  78. and %g2, %g4, %g2
  79. mov INTRQ_DEVICE_MONDO_HEAD, %g4
  80. stxa %g2, [%g4] ASI_QUEUE
  81. membar #Sync
  82. /* Get &__irq_work[smp_processor_id()] into %g1. */
  83. sethi %hi(__irq_work), %g4
  84. sllx %g1, 6, %g1
  85. or %g4, %lo(__irq_work), %g4
  86. add %g4, %g1, %g1
  87. /* Get &ivector_table[IVEC] into %g4. */
  88. sethi %hi(ivector_table), %g4
  89. sllx %g3, 5, %g3
  90. or %g4, %lo(ivector_table), %g4
  91. add %g4, %g3, %g4
  92. /* Load IRQ %pil into %g5. */
  93. ldub [%g4 + 0x04], %g5
  94. /* Insert ivector_table[] entry into __irq_work[] queue. */
  95. sllx %g5, 2, %g3
  96. lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */
  97. stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
  98. stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */
  99. /* Signal the interrupt by setting (1 << pil) in %softint. */
  100. mov 1, %g2
  101. sllx %g2, %g5, %g2
  102. wr %g2, 0x0, %set_softint
  103. sun4v_dev_mondo_queue_empty:
  104. retry
  105. sun4v_res_mondo:
  106. /* Head offset in %g2, tail offset in %g4. */
  107. mov INTRQ_RESUM_MONDO_HEAD, %g2
  108. ldxa [%g2] ASI_QUEUE, %g2
  109. mov INTRQ_RESUM_MONDO_TAIL, %g4
  110. ldxa [%g4] ASI_QUEUE, %g4
  111. cmp %g2, %g4
  112. be,pn %xcc, sun4v_res_mondo_queue_empty
  113. nop
  114. /* Get &trap_block[smp_processor_id()] into %g3. */
  115. ldxa [%g0] ASI_SCRATCHPAD, %g3
  116. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  117. /* Get RES mondo queue base phys address into %g5. */
  118. ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
  119. /* Get RES kernel buffer base phys address into %g7. */
  120. ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
  121. /* If the first word is non-zero, queue is full. */
  122. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  123. brnz,pn %g1, sun4v_res_mondo_queue_full
  124. nop
  125. /* Remember this entry's offset in %g1. */
  126. mov %g2, %g1
  127. /* Copy 64-byte queue entry into kernel buffer. */
  128. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  129. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  130. add %g2, 0x08, %g2
  131. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  132. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  133. add %g2, 0x08, %g2
  134. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  135. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  136. add %g2, 0x08, %g2
  137. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  138. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  139. add %g2, 0x08, %g2
  140. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  141. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  142. add %g2, 0x08, %g2
  143. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  144. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  145. add %g2, 0x08, %g2
  146. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  147. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  148. add %g2, 0x08, %g2
  149. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  150. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  151. add %g2, 0x08, %g2
  152. /* Update queue head pointer. */
  153. sethi %hi(8192 - 1), %g4
  154. or %g4, %lo(8192 - 1), %g4
  155. and %g2, %g4, %g2
  156. mov INTRQ_RESUM_MONDO_HEAD, %g4
  157. stxa %g2, [%g4] ASI_QUEUE
  158. membar #Sync
  159. /* Disable interrupts and save register state so we can call
  160. * C code. The etrap handling will leave %g4 in %l4 for us
  161. * when it's done.
  162. */
  163. rdpr %pil, %g2
  164. wrpr %g0, 15, %pil
  165. mov %g1, %g4
  166. ba,pt %xcc, etrap_irq
  167. rd %pc, %g7
  168. /* Log the event. */
  169. add %sp, PTREGS_OFF, %o0
  170. call sun4v_resum_error
  171. mov %l4, %o1
  172. /* Return from trap. */
  173. ba,pt %xcc, rtrap_irq
  174. nop
  175. sun4v_res_mondo_queue_empty:
  176. retry
  177. sun4v_res_mondo_queue_full:
  178. /* The queue is full, consolidate our damage by setting
  179. * the head equal to the tail. We'll just trap again otherwise.
  180. * Call C code to log the event.
  181. */
  182. mov INTRQ_RESUM_MONDO_HEAD, %g2
  183. stxa %g4, [%g2] ASI_QUEUE
  184. membar #Sync
  185. rdpr %pil, %g2
  186. wrpr %g0, 15, %pil
  187. ba,pt %xcc, etrap_irq
  188. rd %pc, %g7
  189. call sun4v_resum_overflow
  190. add %sp, PTREGS_OFF, %o0
  191. ba,pt %xcc, rtrap_irq
  192. nop
  193. sun4v_nonres_mondo:
  194. /* Head offset in %g2, tail offset in %g4. */
  195. mov INTRQ_NONRESUM_MONDO_HEAD, %g2
  196. ldxa [%g2] ASI_QUEUE, %g2
  197. mov INTRQ_NONRESUM_MONDO_TAIL, %g4
  198. ldxa [%g4] ASI_QUEUE, %g4
  199. cmp %g2, %g4
  200. be,pn %xcc, sun4v_nonres_mondo_queue_empty
  201. nop
  202. /* Get &trap_block[smp_processor_id()] into %g3. */
  203. ldxa [%g0] ASI_SCRATCHPAD, %g3
  204. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  205. /* Get RES mondo queue base phys address into %g5. */
  206. ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
  207. /* Get RES kernel buffer base phys address into %g7. */
  208. ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
  209. /* If the first word is non-zero, queue is full. */
  210. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  211. brnz,pn %g1, sun4v_nonres_mondo_queue_full
  212. nop
  213. /* Remember this entry's offset in %g1. */
  214. mov %g2, %g1
  215. /* Copy 64-byte queue entry into kernel buffer. */
  216. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  217. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  218. add %g2, 0x08, %g2
  219. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  220. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  221. add %g2, 0x08, %g2
  222. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  223. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  224. add %g2, 0x08, %g2
  225. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  226. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  227. add %g2, 0x08, %g2
  228. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  229. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  230. add %g2, 0x08, %g2
  231. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  232. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  233. add %g2, 0x08, %g2
  234. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  235. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  236. add %g2, 0x08, %g2
  237. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  238. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  239. add %g2, 0x08, %g2
  240. /* Update queue head pointer. */
  241. sethi %hi(8192 - 1), %g4
  242. or %g4, %lo(8192 - 1), %g4
  243. and %g2, %g4, %g2
  244. mov INTRQ_NONRESUM_MONDO_HEAD, %g4
  245. stxa %g2, [%g4] ASI_QUEUE
  246. membar #Sync
  247. /* Disable interrupts and save register state so we can call
  248. * C code. The etrap handling will leave %g4 in %l4 for us
  249. * when it's done.
  250. */
  251. rdpr %pil, %g2
  252. wrpr %g0, 15, %pil
  253. mov %g1, %g4
  254. ba,pt %xcc, etrap_irq
  255. rd %pc, %g7
  256. /* Log the event. */
  257. add %sp, PTREGS_OFF, %o0
  258. call sun4v_nonresum_error
  259. mov %l4, %o1
  260. /* Return from trap. */
  261. ba,pt %xcc, rtrap_irq
  262. nop
  263. sun4v_nonres_mondo_queue_empty:
  264. retry
  265. sun4v_nonres_mondo_queue_full:
  266. /* The queue is full, consolidate our damage by setting
  267. * the head equal to the tail. We'll just trap again otherwise.
  268. * Call C code to log the event.
  269. */
  270. mov INTRQ_NONRESUM_MONDO_HEAD, %g2
  271. stxa %g4, [%g2] ASI_QUEUE
  272. membar #Sync
  273. rdpr %pil, %g2
  274. wrpr %g0, 15, %pil
  275. ba,pt %xcc, etrap_irq
  276. rd %pc, %g7
  277. call sun4v_nonresum_overflow
  278. add %sp, PTREGS_OFF, %o0
  279. ba,pt %xcc, rtrap_irq
  280. nop