bnx2x_sriov.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703
  1. /* bnx2x_sriov.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2012 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. *
  19. */
  20. #include "bnx2x.h"
  21. #include "bnx2x_init.h"
  22. #include "bnx2x_cmn.h"
  23. #include "bnx2x_sriov.h"
  24. /* General service functions */
  25. static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  26. u16 pf_id)
  27. {
  28. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  29. pf_id);
  30. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  31. pf_id);
  32. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  33. pf_id);
  34. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  35. pf_id);
  36. }
  37. static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  38. u8 enable)
  39. {
  40. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  41. enable);
  42. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  43. enable);
  44. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  45. enable);
  46. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  47. enable);
  48. }
  49. int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  50. {
  51. int idx;
  52. for_each_vf(bp, idx)
  53. if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  54. break;
  55. return idx;
  56. }
  57. static
  58. struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  59. {
  60. u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  61. return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  62. }
  63. static int bnx2x_ari_enabled(struct pci_dev *dev)
  64. {
  65. return dev->bus->self && dev->bus->self->ari_enabled;
  66. }
  67. static void
  68. bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
  69. {
  70. struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  71. if (vf) {
  72. if (!vf_sb_count(vf))
  73. vf->igu_base_id = igu_sb_id;
  74. ++vf_sb_count(vf);
  75. }
  76. }
  77. static void
  78. bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
  79. {
  80. int sb_id;
  81. u32 val;
  82. u8 fid;
  83. /* IGU in normal mode - read CAM */
  84. for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
  85. val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
  86. if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
  87. continue;
  88. fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
  89. if (!(fid & IGU_FID_ENCODE_IS_PF))
  90. bnx2x_vf_set_igu_info(bp, sb_id,
  91. (fid & IGU_FID_VF_NUM_MASK));
  92. DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
  93. ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
  94. ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
  95. (fid & IGU_FID_VF_NUM_MASK)), sb_id,
  96. GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
  97. }
  98. }
  99. static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
  100. {
  101. if (bp->vfdb) {
  102. kfree(bp->vfdb->vfqs);
  103. kfree(bp->vfdb->vfs);
  104. kfree(bp->vfdb);
  105. }
  106. bp->vfdb = NULL;
  107. }
  108. static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  109. {
  110. int pos;
  111. struct pci_dev *dev = bp->pdev;
  112. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
  113. if (!pos) {
  114. BNX2X_ERR("failed to find SRIOV capability in device\n");
  115. return -ENODEV;
  116. }
  117. iov->pos = pos;
  118. DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
  119. pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
  120. pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
  121. pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
  122. pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
  123. pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
  124. pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
  125. pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
  126. pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  127. return 0;
  128. }
  129. static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  130. {
  131. u32 val;
  132. /* read the SRIOV capability structure
  133. * The fields can be read via configuration read or
  134. * directly from the device (starting at offset PCICFG_OFFSET)
  135. */
  136. if (bnx2x_sriov_pci_cfg_info(bp, iov))
  137. return -ENODEV;
  138. /* get the number of SRIOV bars */
  139. iov->nres = 0;
  140. /* read the first_vfid */
  141. val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
  142. iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
  143. * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
  144. DP(BNX2X_MSG_IOV,
  145. "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
  146. BP_FUNC(bp),
  147. iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
  148. iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
  149. return 0;
  150. }
  151. static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
  152. {
  153. int i;
  154. u8 queue_count = 0;
  155. if (IS_SRIOV(bp))
  156. for_each_vf(bp, i)
  157. queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
  158. return queue_count;
  159. }
  160. /* must be called after PF bars are mapped */
  161. int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
  162. int num_vfs_param)
  163. {
  164. int err, i, qcount;
  165. struct bnx2x_sriov *iov;
  166. struct pci_dev *dev = bp->pdev;
  167. bp->vfdb = NULL;
  168. /* verify sriov capability is present in configuration space */
  169. if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
  170. DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
  171. return 0;
  172. }
  173. /* verify is pf */
  174. if (IS_VF(bp))
  175. return 0;
  176. /* verify chip revision */
  177. if (CHIP_IS_E1x(bp))
  178. return 0;
  179. /* check if SRIOV support is turned off */
  180. if (!num_vfs_param)
  181. return 0;
  182. /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
  183. if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
  184. BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
  185. BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
  186. return 0;
  187. }
  188. /* SRIOV can be enabled only with MSIX */
  189. if (int_mode_param == BNX2X_INT_MODE_MSI ||
  190. int_mode_param == BNX2X_INT_MODE_INTX) {
  191. BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
  192. return 0;
  193. }
  194. /* verify ari is enabled */
  195. if (!bnx2x_ari_enabled(bp->pdev)) {
  196. BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
  197. return 0;
  198. }
  199. /* verify igu is in normal mode */
  200. if (CHIP_INT_MODE_IS_BC(bp)) {
  201. BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
  202. return 0;
  203. }
  204. /* allocate the vfs database */
  205. bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
  206. if (!bp->vfdb) {
  207. BNX2X_ERR("failed to allocate vf database\n");
  208. err = -ENOMEM;
  209. goto failed;
  210. }
  211. /* get the sriov info - Linux already collected all the pertinent
  212. * information, however the sriov structure is for the private use
  213. * of the pci module. Also we want this information regardless
  214. * of the hyper-visor.
  215. */
  216. iov = &(bp->vfdb->sriov);
  217. err = bnx2x_sriov_info(bp, iov);
  218. if (err)
  219. goto failed;
  220. /* SR-IOV capability was enabled but there are no VFs*/
  221. if (iov->total == 0)
  222. goto failed;
  223. /* calcuate the actual number of VFs */
  224. iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
  225. /* allcate the vf array */
  226. bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
  227. BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
  228. if (!bp->vfdb->vfs) {
  229. BNX2X_ERR("failed to allocate vf array\n");
  230. err = -ENOMEM;
  231. goto failed;
  232. }
  233. /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
  234. for_each_vf(bp, i) {
  235. bnx2x_vf(bp, i, index) = i;
  236. bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
  237. bnx2x_vf(bp, i, state) = VF_FREE;
  238. INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
  239. mutex_init(&bnx2x_vf(bp, i, op_mutex));
  240. bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
  241. }
  242. /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
  243. bnx2x_get_vf_igu_cam_info(bp);
  244. /* get the total queue count and allocate the global queue arrays */
  245. qcount = bnx2x_iov_get_max_queue_count(bp);
  246. /* allocate the queue arrays for all VFs */
  247. bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
  248. GFP_KERNEL);
  249. if (!bp->vfdb->vfqs) {
  250. BNX2X_ERR("failed to allocate vf queue array\n");
  251. err = -ENOMEM;
  252. goto failed;
  253. }
  254. return 0;
  255. failed:
  256. DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
  257. __bnx2x_iov_free_vfdb(bp);
  258. return err;
  259. }
  260. /* VF enable primitives
  261. * when pretend is required the caller is responsible
  262. * for calling pretend prior to calling these routines
  263. */
  264. /* called only on E1H or E2.
  265. * When pretending to be PF, the pretend value is the function number 0...7
  266. * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
  267. * combination
  268. */
  269. int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
  270. {
  271. u32 pretend_reg;
  272. if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
  273. return -1;
  274. /* get my own pretend register */
  275. pretend_reg = bnx2x_get_pretend_reg(bp);
  276. REG_WR(bp, pretend_reg, pretend_func_val);
  277. REG_RD(bp, pretend_reg);
  278. return 0;
  279. }
  280. /* internal vf enable - until vf is enabled internally all transactions
  281. * are blocked. this routine should always be called last with pretend.
  282. */
  283. static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
  284. {
  285. REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
  286. }
  287. /* clears vf error in all semi blocks */
  288. static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
  289. {
  290. REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
  291. REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
  292. REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
  293. REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
  294. }
  295. static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
  296. {
  297. u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
  298. u32 was_err_reg = 0;
  299. switch (was_err_group) {
  300. case 0:
  301. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
  302. break;
  303. case 1:
  304. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
  305. break;
  306. case 2:
  307. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
  308. break;
  309. case 3:
  310. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
  311. break;
  312. }
  313. REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
  314. }
  315. void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
  316. {
  317. /* set the VF-PF association in the FW */
  318. storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
  319. storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
  320. /* clear vf errors*/
  321. bnx2x_vf_semi_clear_err(bp, abs_vfid);
  322. bnx2x_vf_pglue_clear_err(bp, abs_vfid);
  323. /* internal vf-enable - pretend */
  324. bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
  325. DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
  326. bnx2x_vf_enable_internal(bp, true);
  327. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  328. }
  329. static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
  330. {
  331. struct pci_dev *dev;
  332. struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  333. if (!vf)
  334. goto unknown_dev;
  335. dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
  336. if (dev)
  337. return bnx2x_is_pcie_pending(dev);
  338. unknown_dev:
  339. BNX2X_ERR("Unknown device\n");
  340. return false;
  341. }
  342. int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
  343. {
  344. /* Wait 100ms */
  345. msleep(100);
  346. /* Verify no pending pci transactions */
  347. if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
  348. BNX2X_ERR("PCIE Transactions still pending\n");
  349. return 0;
  350. }
  351. /* must be called after the number of PF queues and the number of VFs are
  352. * both known
  353. */
  354. static void
  355. bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
  356. {
  357. u16 vlan_count = 0;
  358. /* will be set only during VF-ACQUIRE */
  359. resc->num_rxqs = 0;
  360. resc->num_txqs = 0;
  361. /* no credit calculcis for macs (just yet) */
  362. resc->num_mac_filters = 1;
  363. /* divvy up vlan rules */
  364. vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
  365. vlan_count = 1 << ilog2(vlan_count);
  366. resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
  367. /* no real limitation */
  368. resc->num_mc_filters = 0;
  369. /* num_sbs already set */
  370. }
  371. /* IOV global initialization routines */
  372. void bnx2x_iov_init_dq(struct bnx2x *bp)
  373. {
  374. if (!IS_SRIOV(bp))
  375. return;
  376. /* Set the DQ such that the CID reflect the abs_vfid */
  377. REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
  378. REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
  379. /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
  380. * the PF L2 queues
  381. */
  382. REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
  383. /* The VF window size is the log2 of the max number of CIDs per VF */
  384. REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
  385. /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
  386. * the Pf doorbell size although the 2 are independent.
  387. */
  388. REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
  389. BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
  390. /* No security checks for now -
  391. * configure single rule (out of 16) mask = 0x1, value = 0x0,
  392. * CID range 0 - 0x1ffff
  393. */
  394. REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
  395. REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
  396. REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
  397. REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
  398. /* set the number of VF alllowed doorbells to the full DQ range */
  399. REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
  400. /* set the VF doorbell threshold */
  401. REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
  402. }
  403. void bnx2x_iov_init_dmae(struct bnx2x *bp)
  404. {
  405. DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
  406. if (!IS_SRIOV(bp))
  407. return;
  408. REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
  409. }
  410. static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
  411. {
  412. struct pci_dev *dev = bp->pdev;
  413. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  414. return dev->bus->number + ((dev->devfn + iov->offset +
  415. iov->stride * vfid) >> 8);
  416. }
  417. static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
  418. {
  419. struct pci_dev *dev = bp->pdev;
  420. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  421. return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
  422. }
  423. static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
  424. {
  425. int i, n;
  426. struct pci_dev *dev = bp->pdev;
  427. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  428. for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
  429. u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
  430. u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
  431. do_div(size, iov->total);
  432. vf->bars[n].bar = start + size * vf->abs_vfid;
  433. vf->bars[n].size = size;
  434. }
  435. }
  436. void bnx2x_iov_free_mem(struct bnx2x *bp)
  437. {
  438. int i;
  439. if (!IS_SRIOV(bp))
  440. return;
  441. /* free vfs hw contexts */
  442. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  443. struct hw_dma *cxt = &bp->vfdb->context[i];
  444. BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
  445. }
  446. BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
  447. BP_VFDB(bp)->sp_dma.mapping,
  448. BP_VFDB(bp)->sp_dma.size);
  449. BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
  450. BP_VF_MBX_DMA(bp)->mapping,
  451. BP_VF_MBX_DMA(bp)->size);
  452. }
  453. int bnx2x_iov_alloc_mem(struct bnx2x *bp)
  454. {
  455. size_t tot_size;
  456. int i, rc = 0;
  457. if (!IS_SRIOV(bp))
  458. return rc;
  459. /* allocate vfs hw contexts */
  460. tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
  461. BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
  462. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  463. struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
  464. cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
  465. if (cxt->size) {
  466. BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
  467. } else {
  468. cxt->addr = NULL;
  469. cxt->mapping = 0;
  470. }
  471. tot_size -= cxt->size;
  472. }
  473. /* allocate vfs ramrods dma memory - client_init and set_mac */
  474. tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
  475. BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
  476. tot_size);
  477. BP_VFDB(bp)->sp_dma.size = tot_size;
  478. /* allocate mailboxes */
  479. tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
  480. BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
  481. tot_size);
  482. BP_VF_MBX_DMA(bp)->size = tot_size;
  483. return 0;
  484. alloc_mem_err:
  485. return -ENOMEM;
  486. }
  487. /* called by bnx2x_nic_load */
  488. int bnx2x_iov_nic_init(struct bnx2x *bp)
  489. {
  490. int vfid, qcount, i;
  491. if (!IS_SRIOV(bp)) {
  492. DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
  493. return 0;
  494. }
  495. DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
  496. /* initialize vf database */
  497. for_each_vf(bp, vfid) {
  498. struct bnx2x_virtf *vf = BP_VF(bp, vfid);
  499. int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
  500. BNX2X_CIDS_PER_VF;
  501. union cdu_context *base_cxt = (union cdu_context *)
  502. BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
  503. (base_vf_cid & (ILT_PAGE_CIDS-1));
  504. DP(BNX2X_MSG_IOV,
  505. "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
  506. vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
  507. BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
  508. /* init statically provisioned resources */
  509. bnx2x_iov_static_resc(bp, &vf->alloc_resc);
  510. /* queues are initialized during VF-ACQUIRE */
  511. /* reserve the vf vlan credit */
  512. bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
  513. vf->filter_state = 0;
  514. vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
  515. /* init mcast object - This object will be re-initialized
  516. * during VF-ACQUIRE with the proper cl_id and cid.
  517. * It needs to be initialized here so that it can be safely
  518. * handled by a subsequent FLR flow.
  519. */
  520. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
  521. 0xFF, 0xFF, 0xFF,
  522. bnx2x_vf_sp(bp, vf, mcast_rdata),
  523. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  524. BNX2X_FILTER_MCAST_PENDING,
  525. &vf->filter_state,
  526. BNX2X_OBJ_TYPE_RX_TX);
  527. /* set the mailbox message addresses */
  528. BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
  529. (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
  530. MBX_MSG_ALIGNED_SIZE);
  531. BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
  532. vfid * MBX_MSG_ALIGNED_SIZE;
  533. /* Enable vf mailbox */
  534. bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
  535. }
  536. /* Final VF init */
  537. qcount = 0;
  538. for_each_vf(bp, i) {
  539. struct bnx2x_virtf *vf = BP_VF(bp, i);
  540. /* fill in the BDF and bars */
  541. vf->bus = bnx2x_vf_bus(bp, i);
  542. vf->devfn = bnx2x_vf_devfn(bp, i);
  543. bnx2x_vf_set_bars(bp, vf);
  544. DP(BNX2X_MSG_IOV,
  545. "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
  546. vf->abs_vfid, vf->bus, vf->devfn,
  547. (unsigned)vf->bars[0].bar, vf->bars[0].size,
  548. (unsigned)vf->bars[1].bar, vf->bars[1].size,
  549. (unsigned)vf->bars[2].bar, vf->bars[2].size);
  550. /* set local queue arrays */
  551. vf->vfqs = &bp->vfdb->vfqs[qcount];
  552. qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
  553. }
  554. return 0;
  555. }
  556. /* called by bnx2x_init_hw_func, returns the next ilt line */
  557. int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
  558. {
  559. int i;
  560. struct bnx2x_ilt *ilt = BP_ILT(bp);
  561. if (!IS_SRIOV(bp))
  562. return line;
  563. /* set vfs ilt lines */
  564. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  565. struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
  566. ilt->lines[line+i].page = hw_cxt->addr;
  567. ilt->lines[line+i].page_mapping = hw_cxt->mapping;
  568. ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
  569. }
  570. return line + i;
  571. }
  572. void bnx2x_iov_remove_one(struct bnx2x *bp)
  573. {
  574. /* if SRIOV is not enabled there's nothing to do */
  575. if (!IS_SRIOV(bp))
  576. return;
  577. /* free vf database */
  578. __bnx2x_iov_free_vfdb(bp);
  579. }