bfa_hw_cb.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_modules.h"
  19. #include "bfi_cbreg.h"
  20. void
  21. bfa_hwcb_reginit(struct bfa_s *bfa)
  22. {
  23. struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
  24. void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
  25. int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
  26. if (fn == 0) {
  27. bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
  28. bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
  29. } else {
  30. bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
  31. bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
  32. }
  33. for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
  34. /*
  35. * CPE registers
  36. */
  37. q = CPE_Q_NUM(fn, i);
  38. bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
  39. bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
  40. bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
  41. /*
  42. * RME registers
  43. */
  44. q = CPE_Q_NUM(fn, i);
  45. bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
  46. bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
  47. bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
  48. }
  49. }
  50. void
  51. bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
  52. {
  53. }
  54. static void
  55. bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
  56. {
  57. writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
  58. bfa->iocfc.bfa_regs.intr_status);
  59. }
  60. void
  61. bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
  62. {
  63. }
  64. static void
  65. bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
  66. {
  67. writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
  68. bfa->iocfc.bfa_regs.intr_status);
  69. }
  70. void
  71. bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
  72. u32 *num_vecs, u32 *max_vec_bit)
  73. {
  74. #define __HFN_NUMINTS 13
  75. if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
  76. *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
  77. __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
  78. __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
  79. __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
  80. __HFN_INT_MBOX_LPU0);
  81. *max_vec_bit = __HFN_INT_MBOX_LPU0;
  82. } else {
  83. *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
  84. __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
  85. __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
  86. __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
  87. __HFN_INT_MBOX_LPU1);
  88. *max_vec_bit = __HFN_INT_MBOX_LPU1;
  89. }
  90. *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
  91. __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
  92. *num_vecs = __HFN_NUMINTS;
  93. }
  94. /*
  95. * No special setup required for crossbow -- vector assignments are implicit.
  96. */
  97. void
  98. bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
  99. {
  100. int i;
  101. WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
  102. bfa->msix.nvecs = nvecs;
  103. if (nvecs == 1) {
  104. for (i = 0; i < BFA_MSIX_CB_MAX; i++)
  105. bfa->msix.handler[i] = bfa_msix_all;
  106. return;
  107. }
  108. for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++)
  109. bfa->msix.handler[i] = bfa_msix_reqq;
  110. for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
  111. bfa->msix.handler[i] = bfa_msix_rspq;
  112. for (; i < BFA_MSIX_CB_MAX; i++)
  113. bfa->msix.handler[i] = bfa_msix_lpu_err;
  114. }
  115. /*
  116. * Crossbow -- dummy, interrupts are masked
  117. */
  118. void
  119. bfa_hwcb_msix_install(struct bfa_s *bfa)
  120. {
  121. }
  122. void
  123. bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
  124. {
  125. }
  126. /*
  127. * No special enable/disable -- vector assignments are implicit.
  128. */
  129. void
  130. bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
  131. {
  132. bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
  133. bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
  134. }
  135. void
  136. bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
  137. {
  138. *start = BFA_MSIX_RME_Q0;
  139. *end = BFA_MSIX_RME_Q7;
  140. }