bnx2x_sriov.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. /* bnx2x_sriov.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2012 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. *
  19. */
  20. #include "bnx2x.h"
  21. #include "bnx2x_init.h"
  22. #include "bnx2x_sriov.h"
  23. int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  24. {
  25. int idx;
  26. for_each_vf(bp, idx)
  27. if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  28. break;
  29. return idx;
  30. }
  31. static
  32. struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  33. {
  34. u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  35. return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  36. }
  37. static int bnx2x_ari_enabled(struct pci_dev *dev)
  38. {
  39. return dev->bus->self && dev->bus->self->ari_enabled;
  40. }
  41. static void
  42. bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
  43. {
  44. struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  45. if (vf) {
  46. if (!vf_sb_count(vf))
  47. vf->igu_base_id = igu_sb_id;
  48. ++vf_sb_count(vf);
  49. }
  50. }
  51. static void
  52. bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
  53. {
  54. int sb_id;
  55. u32 val;
  56. u8 fid;
  57. /* IGU in normal mode - read CAM */
  58. for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
  59. val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
  60. if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
  61. continue;
  62. fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
  63. if (!(fid & IGU_FID_ENCODE_IS_PF))
  64. bnx2x_vf_set_igu_info(bp, sb_id,
  65. (fid & IGU_FID_VF_NUM_MASK));
  66. DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
  67. ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
  68. ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
  69. (fid & IGU_FID_VF_NUM_MASK)), sb_id,
  70. GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
  71. }
  72. }
  73. static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
  74. {
  75. if (bp->vfdb) {
  76. kfree(bp->vfdb->vfqs);
  77. kfree(bp->vfdb->vfs);
  78. kfree(bp->vfdb);
  79. }
  80. bp->vfdb = NULL;
  81. }
  82. static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  83. {
  84. int pos;
  85. struct pci_dev *dev = bp->pdev;
  86. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
  87. if (!pos) {
  88. BNX2X_ERR("failed to find SRIOV capability in device\n");
  89. return -ENODEV;
  90. }
  91. iov->pos = pos;
  92. DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
  93. pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
  94. pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
  95. pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
  96. pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
  97. pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
  98. pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
  99. pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
  100. pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  101. return 0;
  102. }
  103. static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  104. {
  105. u32 val;
  106. /* read the SRIOV capability structure
  107. * The fields can be read via configuration read or
  108. * directly from the device (starting at offset PCICFG_OFFSET)
  109. */
  110. if (bnx2x_sriov_pci_cfg_info(bp, iov))
  111. return -ENODEV;
  112. /* get the number of SRIOV bars */
  113. iov->nres = 0;
  114. /* read the first_vfid */
  115. val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
  116. iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
  117. * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
  118. DP(BNX2X_MSG_IOV,
  119. "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
  120. BP_FUNC(bp),
  121. iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
  122. iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
  123. return 0;
  124. }
  125. static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
  126. {
  127. int i;
  128. u8 queue_count = 0;
  129. if (IS_SRIOV(bp))
  130. for_each_vf(bp, i)
  131. queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
  132. return queue_count;
  133. }
  134. /* must be called after PF bars are mapped */
  135. int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
  136. int num_vfs_param)
  137. {
  138. int err, i, qcount;
  139. struct bnx2x_sriov *iov;
  140. struct pci_dev *dev = bp->pdev;
  141. bp->vfdb = NULL;
  142. /* verify sriov capability is present in configuration space */
  143. if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
  144. DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
  145. return 0;
  146. }
  147. /* verify is pf */
  148. if (IS_VF(bp))
  149. return 0;
  150. /* verify chip revision */
  151. if (CHIP_IS_E1x(bp))
  152. return 0;
  153. /* check if SRIOV support is turned off */
  154. if (!num_vfs_param)
  155. return 0;
  156. /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
  157. if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
  158. BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
  159. BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
  160. return 0;
  161. }
  162. /* SRIOV can be enabled only with MSIX */
  163. if (int_mode_param == BNX2X_INT_MODE_MSI ||
  164. int_mode_param == BNX2X_INT_MODE_INTX) {
  165. BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
  166. return 0;
  167. }
  168. /* verify ari is enabled */
  169. if (!bnx2x_ari_enabled(bp->pdev)) {
  170. BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
  171. return 0;
  172. }
  173. /* verify igu is in normal mode */
  174. if (CHIP_INT_MODE_IS_BC(bp)) {
  175. BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
  176. return 0;
  177. }
  178. /* allocate the vfs database */
  179. bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
  180. if (!bp->vfdb) {
  181. BNX2X_ERR("failed to allocate vf database\n");
  182. err = -ENOMEM;
  183. goto failed;
  184. }
  185. /* get the sriov info - Linux already collected all the pertinent
  186. * information, however the sriov structure is for the private use
  187. * of the pci module. Also we want this information regardless
  188. * of the hyper-visor.
  189. */
  190. iov = &(bp->vfdb->sriov);
  191. err = bnx2x_sriov_info(bp, iov);
  192. if (err)
  193. goto failed;
  194. /* SR-IOV capability was enabled but there are no VFs*/
  195. if (iov->total == 0)
  196. goto failed;
  197. /* calcuate the actual number of VFs */
  198. iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
  199. /* allcate the vf array */
  200. bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
  201. BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
  202. if (!bp->vfdb->vfs) {
  203. BNX2X_ERR("failed to allocate vf array\n");
  204. err = -ENOMEM;
  205. goto failed;
  206. }
  207. /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
  208. for_each_vf(bp, i) {
  209. bnx2x_vf(bp, i, index) = i;
  210. bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
  211. bnx2x_vf(bp, i, state) = VF_FREE;
  212. INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
  213. mutex_init(&bnx2x_vf(bp, i, op_mutex));
  214. bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
  215. }
  216. /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
  217. bnx2x_get_vf_igu_cam_info(bp);
  218. /* get the total queue count and allocate the global queue arrays */
  219. qcount = bnx2x_iov_get_max_queue_count(bp);
  220. /* allocate the queue arrays for all VFs */
  221. bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
  222. GFP_KERNEL);
  223. if (!bp->vfdb->vfqs) {
  224. BNX2X_ERR("failed to allocate vf queue array\n");
  225. err = -ENOMEM;
  226. goto failed;
  227. }
  228. return 0;
  229. failed:
  230. DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
  231. __bnx2x_iov_free_vfdb(bp);
  232. return err;
  233. }
  234. /* called by bnx2x_init_hw_func, returns the next ilt line */
  235. int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
  236. {
  237. int i;
  238. struct bnx2x_ilt *ilt = BP_ILT(bp);
  239. if (!IS_SRIOV(bp))
  240. return line;
  241. /* set vfs ilt lines */
  242. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  243. struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
  244. ilt->lines[line+i].page = hw_cxt->addr;
  245. ilt->lines[line+i].page_mapping = hw_cxt->mapping;
  246. ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
  247. }
  248. return line + i;
  249. }
  250. void bnx2x_iov_remove_one(struct bnx2x *bp)
  251. {
  252. /* if SRIOV is not enabled there's nothing to do */
  253. if (!IS_SRIOV(bp))
  254. return;
  255. /* free vf database */
  256. __bnx2x_iov_free_vfdb(bp);
  257. }