bfa_intr.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <bfa.h>
  18. #include <bfi/bfi_ctreg.h>
  19. #include <bfa_port_priv.h>
  20. #include <bfa_intr_priv.h>
  21. #include <cs/bfa_debug.h>
  22. BFA_TRC_FILE(HAL, INTR);
  23. static void
  24. bfa_msix_errint(struct bfa_s *bfa, u32 intr)
  25. {
  26. bfa_ioc_error_isr(&bfa->ioc);
  27. }
  28. static void
  29. bfa_msix_lpu(struct bfa_s *bfa)
  30. {
  31. bfa_ioc_mbox_isr(&bfa->ioc);
  32. }
  33. static void
  34. bfa_reqq_resume(struct bfa_s *bfa, int qid)
  35. {
  36. struct list_head *waitq, *qe, *qen;
  37. struct bfa_reqq_wait_s *wqe;
  38. waitq = bfa_reqq(bfa, qid);
  39. list_for_each_safe(qe, qen, waitq) {
  40. /**
  41. * Callback only as long as there is room in request queue
  42. */
  43. if (bfa_reqq_full(bfa, qid))
  44. break;
  45. list_del(qe);
  46. wqe = (struct bfa_reqq_wait_s *) qe;
  47. wqe->qresume(wqe->cbarg);
  48. }
  49. }
  50. void
  51. bfa_msix_all(struct bfa_s *bfa, int vec)
  52. {
  53. bfa_intx(bfa);
  54. }
  55. /**
  56. * hal_intr_api
  57. */
  58. bfa_boolean_t
  59. bfa_intx(struct bfa_s *bfa)
  60. {
  61. u32 intr, qintr;
  62. int queue;
  63. intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
  64. if (!intr)
  65. return BFA_FALSE;
  66. /**
  67. * RME completion queue interrupt
  68. */
  69. qintr = intr & __HFN_INT_RME_MASK;
  70. bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
  71. for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
  72. if (intr & (__HFN_INT_RME_Q0 << queue))
  73. bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
  74. }
  75. intr &= ~qintr;
  76. if (!intr)
  77. return BFA_TRUE;
  78. /**
  79. * CPE completion queue interrupt
  80. */
  81. qintr = intr & __HFN_INT_CPE_MASK;
  82. bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
  83. for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
  84. if (intr & (__HFN_INT_CPE_Q0 << queue))
  85. bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
  86. }
  87. intr &= ~qintr;
  88. if (!intr)
  89. return BFA_TRUE;
  90. bfa_msix_lpu_err(bfa, intr);
  91. return BFA_TRUE;
  92. }
  93. void
  94. bfa_isr_enable(struct bfa_s *bfa)
  95. {
  96. u32 intr_unmask;
  97. int pci_func = bfa_ioc_pcifn(&bfa->ioc);
  98. bfa_trc(bfa, pci_func);
  99. bfa_msix_install(bfa);
  100. intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
  101. __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
  102. __HFN_INT_LL_HALT);
  103. if (pci_func == 0)
  104. intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
  105. __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
  106. __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
  107. __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
  108. __HFN_INT_MBOX_LPU0);
  109. else
  110. intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
  111. __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
  112. __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
  113. __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
  114. __HFN_INT_MBOX_LPU1);
  115. bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
  116. bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
  117. bfa->iocfc.intr_mask = ~intr_unmask;
  118. bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
  119. }
  120. void
  121. bfa_isr_disable(struct bfa_s *bfa)
  122. {
  123. bfa_isr_mode_set(bfa, BFA_FALSE);
  124. bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
  125. bfa_msix_uninstall(bfa);
  126. }
  127. void
  128. bfa_msix_reqq(struct bfa_s *bfa, int qid)
  129. {
  130. struct list_head *waitq;
  131. qid &= (BFI_IOC_MAX_CQS - 1);
  132. bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
  133. /**
  134. * Resume any pending requests in the corresponding reqq.
  135. */
  136. waitq = bfa_reqq(bfa, qid);
  137. if (!list_empty(waitq))
  138. bfa_reqq_resume(bfa, qid);
  139. }
  140. void
  141. bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
  142. {
  143. bfa_trc(bfa, m->mhdr.msg_class);
  144. bfa_trc(bfa, m->mhdr.msg_id);
  145. bfa_trc(bfa, m->mhdr.mtag.i2htok);
  146. bfa_assert(0);
  147. bfa_trc_stop(bfa->trcmod);
  148. }
  149. void
  150. bfa_msix_rspq(struct bfa_s *bfa, int qid)
  151. {
  152. struct bfi_msg_s *m;
  153. u32 pi, ci;
  154. struct list_head *waitq;
  155. bfa_trc_fp(bfa, qid);
  156. qid &= (BFI_IOC_MAX_CQS - 1);
  157. bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
  158. ci = bfa_rspq_ci(bfa, qid);
  159. pi = bfa_rspq_pi(bfa, qid);
  160. bfa_trc_fp(bfa, ci);
  161. bfa_trc_fp(bfa, pi);
  162. if (bfa->rme_process) {
  163. while (ci != pi) {
  164. m = bfa_rspq_elem(bfa, qid, ci);
  165. bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
  166. bfa_isrs[m->mhdr.msg_class] (bfa, m);
  167. CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
  168. }
  169. }
  170. /**
  171. * update CI
  172. */
  173. bfa_rspq_ci(bfa, qid) = pi;
  174. bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
  175. bfa_os_mmiowb();
  176. /**
  177. * Resume any pending requests in the corresponding reqq.
  178. */
  179. waitq = bfa_reqq(bfa, qid);
  180. if (!list_empty(waitq))
  181. bfa_reqq_resume(bfa, qid);
  182. }
  183. void
  184. bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
  185. {
  186. u32 intr, curr_value;
  187. intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
  188. if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
  189. bfa_msix_lpu(bfa);
  190. intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
  191. __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
  192. if (intr) {
  193. if (intr & __HFN_INT_LL_HALT) {
  194. /**
  195. * If LL_HALT bit is set then FW Init Halt LL Port
  196. * Register needs to be cleared as well so Interrupt
  197. * Status Register will be cleared.
  198. */
  199. curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
  200. curr_value &= ~__FW_INIT_HALT_P;
  201. bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
  202. }
  203. if (intr & __HFN_INT_ERR_PSS) {
  204. /**
  205. * ERR_PSS bit needs to be cleared as well in case
  206. * interrups are shared so driver's interrupt handler is
  207. * still called eventhough it is already masked out.
  208. */
  209. curr_value = bfa_reg_read(
  210. bfa->ioc.ioc_regs.pss_err_status_reg);
  211. curr_value &= __PSS_ERR_STATUS_SET;
  212. bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
  213. curr_value);
  214. }
  215. bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
  216. bfa_msix_errint(bfa, intr);
  217. }
  218. }
  219. void
  220. bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
  221. {
  222. bfa_isrs[mc] = isr_func;
  223. }