bfa_hw_cb.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <bfa_priv.h>
  18. #include <bfi/bfi_cbreg.h>
  19. void
  20. bfa_hwcb_reginit(struct bfa_s *bfa)
  21. {
  22. struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
  23. bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
  24. int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
  25. if (fn == 0) {
  26. bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
  27. bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
  28. } else {
  29. bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
  30. bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
  31. }
  32. for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
  33. /*
  34. * CPE registers
  35. */
  36. q = CPE_Q_NUM(fn, i);
  37. bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
  38. bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
  39. bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
  40. /*
  41. * RME registers
  42. */
  43. q = CPE_Q_NUM(fn, i);
  44. bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
  45. bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
  46. bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
  47. }
  48. }
  49. void
  50. bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
  51. {
  52. }
  53. static void
  54. bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
  55. {
  56. bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
  57. __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq));
  58. }
  59. void
  60. bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
  61. {
  62. }
  63. static void
  64. bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
  65. {
  66. bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
  67. __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq));
  68. }
  69. void
  70. bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
  71. u32 *num_vecs, u32 *max_vec_bit)
  72. {
  73. #define __HFN_NUMINTS 13
  74. if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
  75. *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
  76. __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
  77. __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
  78. __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
  79. __HFN_INT_MBOX_LPU0);
  80. *max_vec_bit = __HFN_INT_MBOX_LPU0;
  81. } else {
  82. *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
  83. __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
  84. __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
  85. __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
  86. __HFN_INT_MBOX_LPU1);
  87. *max_vec_bit = __HFN_INT_MBOX_LPU1;
  88. }
  89. *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
  90. __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
  91. *num_vecs = __HFN_NUMINTS;
  92. }
  93. /**
  94. * No special setup required for crossbow -- vector assignments are implicit.
  95. */
  96. void
  97. bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
  98. {
  99. int i;
  100. bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS));
  101. bfa->msix.nvecs = nvecs;
  102. if (nvecs == 1) {
  103. for (i = 0; i < BFA_MSIX_CB_MAX; i++)
  104. bfa->msix.handler[i] = bfa_msix_all;
  105. return;
  106. }
  107. for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++)
  108. bfa->msix.handler[i] = bfa_msix_reqq;
  109. for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
  110. bfa->msix.handler[i] = bfa_msix_rspq;
  111. for (; i < BFA_MSIX_CB_MAX; i++)
  112. bfa->msix.handler[i] = bfa_msix_lpu_err;
  113. }
  114. /**
  115. * Crossbow -- dummy, interrupts are masked
  116. */
  117. void
  118. bfa_hwcb_msix_install(struct bfa_s *bfa)
  119. {
  120. }
  121. void
  122. bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
  123. {
  124. }
  125. /**
  126. * No special enable/disable -- vector assignments are implicit.
  127. */
  128. void
  129. bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
  130. {
  131. bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
  132. bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
  133. }
  134. void
  135. bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
  136. {
  137. *start = BFA_MSIX_RME_Q0;
  138. *end = BFA_MSIX_RME_Q7;
  139. }