sun4v_ivec.S 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /* sun4v_ivec.S: Sun4v interrupt vector handling.
  2. *
  3. * Copyright (C) 2006 <davem@davemloft.net>
  4. */
  5. #include <asm/cpudata.h>
  6. #include <asm/intr_queue.h>
  7. #include <asm/pil.h>
  8. .text
  9. .align 32
  10. sun4v_cpu_mondo:
  11. /* Head offset in %g2, tail offset in %g4.
  12. * If they are the same, no work.
  13. */
  14. mov INTRQ_CPU_MONDO_HEAD, %g2
  15. ldxa [%g2] ASI_QUEUE, %g2
  16. mov INTRQ_CPU_MONDO_TAIL, %g4
  17. ldxa [%g4] ASI_QUEUE, %g4
  18. cmp %g2, %g4
  19. be,pn %xcc, sun4v_cpu_mondo_queue_empty
  20. nop
  21. /* Get &trap_block[smp_processor_id()] into %g3. */
  22. ldxa [%g0] ASI_SCRATCHPAD, %g3
  23. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  24. /* Get CPU mondo queue base phys address into %g7. */
  25. ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
  26. /* Now get the cross-call arguments and handler PC, same
  27. * layout as sun4u:
  28. *
  29. * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
  30. * high half is context arg to MMU flushes, into %g5
  31. * 2nd 64-bit word: 64-bit arg, load into %g1
  32. * 3rd 64-bit word: 64-bit arg, load into %g7
  33. */
  34. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
  35. add %g2, 0x8, %g2
  36. srlx %g3, 32, %g5
  37. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  38. add %g2, 0x8, %g2
  39. srl %g3, 0, %g3
  40. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
  41. add %g2, 0x40 - 0x8 - 0x8, %g2
  42. /* Update queue head pointer. */
  43. sethi %hi(8192 - 1), %g4
  44. or %g4, %lo(8192 - 1), %g4
  45. and %g2, %g4, %g2
  46. mov INTRQ_CPU_MONDO_HEAD, %g4
  47. stxa %g2, [%g4] ASI_QUEUE
  48. membar #Sync
  49. jmpl %g3, %g0
  50. nop
  51. sun4v_cpu_mondo_queue_empty:
  52. retry
  53. sun4v_dev_mondo:
  54. /* Head offset in %g2, tail offset in %g4. */
  55. mov INTRQ_DEVICE_MONDO_HEAD, %g2
  56. ldxa [%g2] ASI_QUEUE, %g2
  57. mov INTRQ_DEVICE_MONDO_TAIL, %g4
  58. ldxa [%g4] ASI_QUEUE, %g4
  59. cmp %g2, %g4
  60. be,pn %xcc, sun4v_dev_mondo_queue_empty
  61. nop
  62. /* Get &trap_block[smp_processor_id()] into %g3. */
  63. ldxa [%g0] ASI_SCRATCHPAD, %g3
  64. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  65. /* Get DEV mondo queue base phys address into %g5. */
  66. ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
  67. /* Load IVEC into %g3. */
  68. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  69. add %g2, 0x40, %g2
  70. /* XXX There can be a full 64-byte block of data here.
  71. * XXX This is how we can get at MSI vector data.
  72. * XXX Current we do not capture this, but when we do we'll
  73. * XXX need to add a 64-byte storage area in the struct ino_bucket
  74. * XXX or the struct irq_desc.
  75. */
  76. /* Update queue head pointer, this frees up some registers. */
  77. sethi %hi(8192 - 1), %g4
  78. or %g4, %lo(8192 - 1), %g4
  79. and %g2, %g4, %g2
  80. mov INTRQ_DEVICE_MONDO_HEAD, %g4
  81. stxa %g2, [%g4] ASI_QUEUE
  82. membar #Sync
  83. /* Get &__irq_work[smp_processor_id()] into %g1. */
  84. TRAP_LOAD_IRQ_WORK(%g1, %g4)
  85. /* Get &ivector_table[IVEC] into %g4. */
  86. sethi %hi(ivector_table), %g4
  87. sllx %g3, 3, %g3
  88. or %g4, %lo(ivector_table), %g4
  89. add %g4, %g3, %g4
  90. /* Insert ivector_table[] entry into __irq_work[] queue. */
  91. lduw [%g1], %g2 /* g2 = irq_work(cpu) */
  92. stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
  93. stw %g4, [%g1] /* irq_work(cpu) = bucket */
  94. /* Signal the interrupt by setting (1 << pil) in %softint. */
  95. wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
  96. sun4v_dev_mondo_queue_empty:
  97. retry
  98. sun4v_res_mondo:
  99. /* Head offset in %g2, tail offset in %g4. */
  100. mov INTRQ_RESUM_MONDO_HEAD, %g2
  101. ldxa [%g2] ASI_QUEUE, %g2
  102. mov INTRQ_RESUM_MONDO_TAIL, %g4
  103. ldxa [%g4] ASI_QUEUE, %g4
  104. cmp %g2, %g4
  105. be,pn %xcc, sun4v_res_mondo_queue_empty
  106. nop
  107. /* Get &trap_block[smp_processor_id()] into %g3. */
  108. ldxa [%g0] ASI_SCRATCHPAD, %g3
  109. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  110. /* Get RES mondo queue base phys address into %g5. */
  111. ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
  112. /* Get RES kernel buffer base phys address into %g7. */
  113. ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
  114. /* If the first word is non-zero, queue is full. */
  115. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  116. brnz,pn %g1, sun4v_res_mondo_queue_full
  117. nop
  118. /* Remember this entry's offset in %g1. */
  119. mov %g2, %g1
  120. /* Copy 64-byte queue entry into kernel buffer. */
  121. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  122. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  123. add %g2, 0x08, %g2
  124. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  125. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  126. add %g2, 0x08, %g2
  127. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  128. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  129. add %g2, 0x08, %g2
  130. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  131. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  132. add %g2, 0x08, %g2
  133. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  134. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  135. add %g2, 0x08, %g2
  136. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  137. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  138. add %g2, 0x08, %g2
  139. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  140. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  141. add %g2, 0x08, %g2
  142. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  143. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  144. add %g2, 0x08, %g2
  145. /* Update queue head pointer. */
  146. sethi %hi(8192 - 1), %g4
  147. or %g4, %lo(8192 - 1), %g4
  148. and %g2, %g4, %g2
  149. mov INTRQ_RESUM_MONDO_HEAD, %g4
  150. stxa %g2, [%g4] ASI_QUEUE
  151. membar #Sync
  152. /* Disable interrupts and save register state so we can call
  153. * C code. The etrap handling will leave %g4 in %l4 for us
  154. * when it's done.
  155. */
  156. rdpr %pil, %g2
  157. wrpr %g0, 15, %pil
  158. mov %g1, %g4
  159. ba,pt %xcc, etrap_irq
  160. rd %pc, %g7
  161. #ifdef CONFIG_TRACE_IRQFLAGS
  162. call trace_hardirqs_off
  163. nop
  164. #endif
  165. /* Log the event. */
  166. add %sp, PTREGS_OFF, %o0
  167. call sun4v_resum_error
  168. mov %l4, %o1
  169. /* Return from trap. */
  170. ba,pt %xcc, rtrap_irq
  171. nop
  172. sun4v_res_mondo_queue_empty:
  173. retry
  174. sun4v_res_mondo_queue_full:
  175. /* The queue is full, consolidate our damage by setting
  176. * the head equal to the tail. We'll just trap again otherwise.
  177. * Call C code to log the event.
  178. */
  179. mov INTRQ_RESUM_MONDO_HEAD, %g2
  180. stxa %g4, [%g2] ASI_QUEUE
  181. membar #Sync
  182. rdpr %pil, %g2
  183. wrpr %g0, 15, %pil
  184. ba,pt %xcc, etrap_irq
  185. rd %pc, %g7
  186. #ifdef CONFIG_TRACE_IRQFLAGS
  187. call trace_hardirqs_off
  188. nop
  189. #endif
  190. call sun4v_resum_overflow
  191. add %sp, PTREGS_OFF, %o0
  192. ba,pt %xcc, rtrap_irq
  193. nop
  194. sun4v_nonres_mondo:
  195. /* Head offset in %g2, tail offset in %g4. */
  196. mov INTRQ_NONRESUM_MONDO_HEAD, %g2
  197. ldxa [%g2] ASI_QUEUE, %g2
  198. mov INTRQ_NONRESUM_MONDO_TAIL, %g4
  199. ldxa [%g4] ASI_QUEUE, %g4
  200. cmp %g2, %g4
  201. be,pn %xcc, sun4v_nonres_mondo_queue_empty
  202. nop
  203. /* Get &trap_block[smp_processor_id()] into %g3. */
  204. ldxa [%g0] ASI_SCRATCHPAD, %g3
  205. sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
  206. /* Get RES mondo queue base phys address into %g5. */
  207. ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
  208. /* Get RES kernel buffer base phys address into %g7. */
  209. ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
  210. /* If the first word is non-zero, queue is full. */
  211. ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
  212. brnz,pn %g1, sun4v_nonres_mondo_queue_full
  213. nop
  214. /* Remember this entry's offset in %g1. */
  215. mov %g2, %g1
  216. /* Copy 64-byte queue entry into kernel buffer. */
  217. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  218. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  219. add %g2, 0x08, %g2
  220. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  221. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  222. add %g2, 0x08, %g2
  223. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  224. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  225. add %g2, 0x08, %g2
  226. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  227. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  228. add %g2, 0x08, %g2
  229. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  230. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  231. add %g2, 0x08, %g2
  232. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  233. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  234. add %g2, 0x08, %g2
  235. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  236. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  237. add %g2, 0x08, %g2
  238. ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
  239. stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
  240. add %g2, 0x08, %g2
  241. /* Update queue head pointer. */
  242. sethi %hi(8192 - 1), %g4
  243. or %g4, %lo(8192 - 1), %g4
  244. and %g2, %g4, %g2
  245. mov INTRQ_NONRESUM_MONDO_HEAD, %g4
  246. stxa %g2, [%g4] ASI_QUEUE
  247. membar #Sync
  248. /* Disable interrupts and save register state so we can call
  249. * C code. The etrap handling will leave %g4 in %l4 for us
  250. * when it's done.
  251. */
  252. rdpr %pil, %g2
  253. wrpr %g0, 15, %pil
  254. mov %g1, %g4
  255. ba,pt %xcc, etrap_irq
  256. rd %pc, %g7
  257. #ifdef CONFIG_TRACE_IRQFLAGS
  258. call trace_hardirqs_off
  259. nop
  260. #endif
  261. /* Log the event. */
  262. add %sp, PTREGS_OFF, %o0
  263. call sun4v_nonresum_error
  264. mov %l4, %o1
  265. /* Return from trap. */
  266. ba,pt %xcc, rtrap_irq
  267. nop
  268. sun4v_nonres_mondo_queue_empty:
  269. retry
  270. sun4v_nonres_mondo_queue_full:
  271. /* The queue is full, consolidate our damage by setting
  272. * the head equal to the tail. We'll just trap again otherwise.
  273. * Call C code to log the event.
  274. */
  275. mov INTRQ_NONRESUM_MONDO_HEAD, %g2
  276. stxa %g4, [%g2] ASI_QUEUE
  277. membar #Sync
  278. rdpr %pil, %g2
  279. wrpr %g0, 15, %pil
  280. ba,pt %xcc, etrap_irq
  281. rd %pc, %g7
  282. #ifdef CONFIG_TRACE_IRQFLAGS
  283. call trace_hardirqs_off
  284. nop
  285. #endif
  286. call sun4v_nonresum_overflow
  287. add %sp, PTREGS_OFF, %o0
  288. ba,pt %xcc, rtrap_irq
  289. nop