bnx2x_sriov.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /* bnx2x_sriov.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2012 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. *
  19. */
  20. #include "bnx2x.h"
  21. #include "bnx2x_init.h"
  22. #include "bnx2x_cmn.h"
  23. #include "bnx2x_sriov.h"
  24. /* General service functions */
  25. static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  26. u16 pf_id)
  27. {
  28. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  29. pf_id);
  30. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  31. pf_id);
  32. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  33. pf_id);
  34. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  35. pf_id);
  36. }
  37. static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  38. u8 enable)
  39. {
  40. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  41. enable);
  42. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  43. enable);
  44. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  45. enable);
  46. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  47. enable);
  48. }
  49. int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  50. {
  51. int idx;
  52. for_each_vf(bp, idx)
  53. if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  54. break;
  55. return idx;
  56. }
  57. static
  58. struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  59. {
  60. u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  61. return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  62. }
  63. static int bnx2x_ari_enabled(struct pci_dev *dev)
  64. {
  65. return dev->bus->self && dev->bus->self->ari_enabled;
  66. }
  67. static void
  68. bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
  69. {
  70. struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  71. if (vf) {
  72. if (!vf_sb_count(vf))
  73. vf->igu_base_id = igu_sb_id;
  74. ++vf_sb_count(vf);
  75. }
  76. }
  77. static void
  78. bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
  79. {
  80. int sb_id;
  81. u32 val;
  82. u8 fid;
  83. /* IGU in normal mode - read CAM */
  84. for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
  85. val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
  86. if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
  87. continue;
  88. fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
  89. if (!(fid & IGU_FID_ENCODE_IS_PF))
  90. bnx2x_vf_set_igu_info(bp, sb_id,
  91. (fid & IGU_FID_VF_NUM_MASK));
  92. DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
  93. ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
  94. ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
  95. (fid & IGU_FID_VF_NUM_MASK)), sb_id,
  96. GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
  97. }
  98. }
  99. static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
  100. {
  101. if (bp->vfdb) {
  102. kfree(bp->vfdb->vfqs);
  103. kfree(bp->vfdb->vfs);
  104. kfree(bp->vfdb);
  105. }
  106. bp->vfdb = NULL;
  107. }
  108. static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  109. {
  110. int pos;
  111. struct pci_dev *dev = bp->pdev;
  112. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
  113. if (!pos) {
  114. BNX2X_ERR("failed to find SRIOV capability in device\n");
  115. return -ENODEV;
  116. }
  117. iov->pos = pos;
  118. DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
  119. pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
  120. pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
  121. pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
  122. pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
  123. pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
  124. pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
  125. pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
  126. pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  127. return 0;
  128. }
  129. static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  130. {
  131. u32 val;
  132. /* read the SRIOV capability structure
  133. * The fields can be read via configuration read or
  134. * directly from the device (starting at offset PCICFG_OFFSET)
  135. */
  136. if (bnx2x_sriov_pci_cfg_info(bp, iov))
  137. return -ENODEV;
  138. /* get the number of SRIOV bars */
  139. iov->nres = 0;
  140. /* read the first_vfid */
  141. val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
  142. iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
  143. * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
  144. DP(BNX2X_MSG_IOV,
  145. "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
  146. BP_FUNC(bp),
  147. iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
  148. iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
  149. return 0;
  150. }
  151. static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
  152. {
  153. int i;
  154. u8 queue_count = 0;
  155. if (IS_SRIOV(bp))
  156. for_each_vf(bp, i)
  157. queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
  158. return queue_count;
  159. }
  160. /* must be called after PF bars are mapped */
  161. int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
  162. int num_vfs_param)
  163. {
  164. int err, i, qcount;
  165. struct bnx2x_sriov *iov;
  166. struct pci_dev *dev = bp->pdev;
  167. bp->vfdb = NULL;
  168. /* verify sriov capability is present in configuration space */
  169. if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
  170. DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
  171. return 0;
  172. }
  173. /* verify is pf */
  174. if (IS_VF(bp))
  175. return 0;
  176. /* verify chip revision */
  177. if (CHIP_IS_E1x(bp))
  178. return 0;
  179. /* check if SRIOV support is turned off */
  180. if (!num_vfs_param)
  181. return 0;
  182. /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
  183. if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
  184. BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
  185. BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
  186. return 0;
  187. }
  188. /* SRIOV can be enabled only with MSIX */
  189. if (int_mode_param == BNX2X_INT_MODE_MSI ||
  190. int_mode_param == BNX2X_INT_MODE_INTX) {
  191. BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
  192. return 0;
  193. }
  194. /* verify ari is enabled */
  195. if (!bnx2x_ari_enabled(bp->pdev)) {
  196. BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
  197. return 0;
  198. }
  199. /* verify igu is in normal mode */
  200. if (CHIP_INT_MODE_IS_BC(bp)) {
  201. BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
  202. return 0;
  203. }
  204. /* allocate the vfs database */
  205. bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
  206. if (!bp->vfdb) {
  207. BNX2X_ERR("failed to allocate vf database\n");
  208. err = -ENOMEM;
  209. goto failed;
  210. }
  211. /* get the sriov info - Linux already collected all the pertinent
  212. * information, however the sriov structure is for the private use
  213. * of the pci module. Also we want this information regardless
  214. * of the hyper-visor.
  215. */
  216. iov = &(bp->vfdb->sriov);
  217. err = bnx2x_sriov_info(bp, iov);
  218. if (err)
  219. goto failed;
  220. /* SR-IOV capability was enabled but there are no VFs*/
  221. if (iov->total == 0)
  222. goto failed;
  223. /* calcuate the actual number of VFs */
  224. iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
  225. /* allcate the vf array */
  226. bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
  227. BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
  228. if (!bp->vfdb->vfs) {
  229. BNX2X_ERR("failed to allocate vf array\n");
  230. err = -ENOMEM;
  231. goto failed;
  232. }
  233. /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
  234. for_each_vf(bp, i) {
  235. bnx2x_vf(bp, i, index) = i;
  236. bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
  237. bnx2x_vf(bp, i, state) = VF_FREE;
  238. INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
  239. mutex_init(&bnx2x_vf(bp, i, op_mutex));
  240. bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
  241. }
  242. /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
  243. bnx2x_get_vf_igu_cam_info(bp);
  244. /* get the total queue count and allocate the global queue arrays */
  245. qcount = bnx2x_iov_get_max_queue_count(bp);
  246. /* allocate the queue arrays for all VFs */
  247. bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
  248. GFP_KERNEL);
  249. if (!bp->vfdb->vfqs) {
  250. BNX2X_ERR("failed to allocate vf queue array\n");
  251. err = -ENOMEM;
  252. goto failed;
  253. }
  254. return 0;
  255. failed:
  256. DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
  257. __bnx2x_iov_free_vfdb(bp);
  258. return err;
  259. }
  260. /* VF enable primitives
  261. * when pretend is required the caller is responsible
  262. * for calling pretend prior to calling these routines
  263. */
  264. /* called only on E1H or E2.
  265. * When pretending to be PF, the pretend value is the function number 0...7
  266. * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
  267. * combination
  268. */
  269. int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
  270. {
  271. u32 pretend_reg;
  272. if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
  273. return -1;
  274. /* get my own pretend register */
  275. pretend_reg = bnx2x_get_pretend_reg(bp);
  276. REG_WR(bp, pretend_reg, pretend_func_val);
  277. REG_RD(bp, pretend_reg);
  278. return 0;
  279. }
  280. /* internal vf enable - until vf is enabled internally all transactions
  281. * are blocked. this routine should always be called last with pretend.
  282. */
  283. static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
  284. {
  285. REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
  286. }
  287. /* clears vf error in all semi blocks */
  288. static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
  289. {
  290. REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
  291. REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
  292. REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
  293. REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
  294. }
  295. static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
  296. {
  297. u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
  298. u32 was_err_reg = 0;
  299. switch (was_err_group) {
  300. case 0:
  301. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
  302. break;
  303. case 1:
  304. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
  305. break;
  306. case 2:
  307. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
  308. break;
  309. case 3:
  310. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
  311. break;
  312. }
  313. REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
  314. }
  315. void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
  316. {
  317. /* set the VF-PF association in the FW */
  318. storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
  319. storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
  320. /* clear vf errors*/
  321. bnx2x_vf_semi_clear_err(bp, abs_vfid);
  322. bnx2x_vf_pglue_clear_err(bp, abs_vfid);
  323. /* internal vf-enable - pretend */
  324. bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
  325. DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
  326. bnx2x_vf_enable_internal(bp, true);
  327. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  328. }
  329. static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
  330. {
  331. struct pci_dev *dev;
  332. struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  333. if (!vf)
  334. goto unknown_dev;
  335. dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
  336. if (dev)
  337. return bnx2x_is_pcie_pending(dev);
  338. unknown_dev:
  339. BNX2X_ERR("Unknown device\n");
  340. return false;
  341. }
  342. int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
  343. {
  344. /* Wait 100ms */
  345. msleep(100);
  346. /* Verify no pending pci transactions */
  347. if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
  348. BNX2X_ERR("PCIE Transactions still pending\n");
  349. return 0;
  350. }
  351. /* must be called after the number of PF queues and the number of VFs are
  352. * both known
  353. */
  354. static void
  355. bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
  356. {
  357. u16 vlan_count = 0;
  358. /* will be set only during VF-ACQUIRE */
  359. resc->num_rxqs = 0;
  360. resc->num_txqs = 0;
  361. /* no credit calculcis for macs (just yet) */
  362. resc->num_mac_filters = 1;
  363. /* divvy up vlan rules */
  364. vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
  365. vlan_count = 1 << ilog2(vlan_count);
  366. resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
  367. /* no real limitation */
  368. resc->num_mc_filters = 0;
  369. /* num_sbs already set */
  370. }
  371. /* IOV global initialization routines */
  372. void bnx2x_iov_init_dq(struct bnx2x *bp)
  373. {
  374. if (!IS_SRIOV(bp))
  375. return;
  376. /* Set the DQ such that the CID reflect the abs_vfid */
  377. REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
  378. REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
  379. /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
  380. * the PF L2 queues
  381. */
  382. REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
  383. /* The VF window size is the log2 of the max number of CIDs per VF */
  384. REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
  385. /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
  386. * the Pf doorbell size although the 2 are independent.
  387. */
  388. REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
  389. BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
  390. /* No security checks for now -
  391. * configure single rule (out of 16) mask = 0x1, value = 0x0,
  392. * CID range 0 - 0x1ffff
  393. */
  394. REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
  395. REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
  396. REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
  397. REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
  398. /* set the number of VF alllowed doorbells to the full DQ range */
  399. REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
  400. /* set the VF doorbell threshold */
  401. REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
  402. }
  403. void bnx2x_iov_init_dmae(struct bnx2x *bp)
  404. {
  405. DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
  406. if (!IS_SRIOV(bp))
  407. return;
  408. REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
  409. }
  410. static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
  411. {
  412. struct pci_dev *dev = bp->pdev;
  413. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  414. return dev->bus->number + ((dev->devfn + iov->offset +
  415. iov->stride * vfid) >> 8);
  416. }
  417. static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
  418. {
  419. struct pci_dev *dev = bp->pdev;
  420. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  421. return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
  422. }
  423. static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
  424. {
  425. int i, n;
  426. struct pci_dev *dev = bp->pdev;
  427. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  428. for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
  429. u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
  430. u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
  431. do_div(size, iov->total);
  432. vf->bars[n].bar = start + size * vf->abs_vfid;
  433. vf->bars[n].size = size;
  434. }
  435. }
  436. void bnx2x_iov_remove_one(struct bnx2x *bp)
  437. {
  438. /* if SRIOV is not enabled there's nothing to do */
  439. if (!IS_SRIOV(bp))
  440. return;
  441. /* free vf database */
  442. __bnx2x_iov_free_vfdb(bp);
  443. }
  444. void bnx2x_iov_free_mem(struct bnx2x *bp)
  445. {
  446. int i;
  447. if (!IS_SRIOV(bp))
  448. return;
  449. /* free vfs hw contexts */
  450. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  451. struct hw_dma *cxt = &bp->vfdb->context[i];
  452. BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
  453. }
  454. BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
  455. BP_VFDB(bp)->sp_dma.mapping,
  456. BP_VFDB(bp)->sp_dma.size);
  457. BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
  458. BP_VF_MBX_DMA(bp)->mapping,
  459. BP_VF_MBX_DMA(bp)->size);
  460. }
  461. int bnx2x_iov_alloc_mem(struct bnx2x *bp)
  462. {
  463. size_t tot_size;
  464. int i, rc = 0;
  465. if (!IS_SRIOV(bp))
  466. return rc;
  467. /* allocate vfs hw contexts */
  468. tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
  469. BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
  470. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  471. struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
  472. cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
  473. if (cxt->size) {
  474. BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
  475. } else {
  476. cxt->addr = NULL;
  477. cxt->mapping = 0;
  478. }
  479. tot_size -= cxt->size;
  480. }
  481. /* allocate vfs ramrods dma memory - client_init and set_mac */
  482. tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
  483. BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
  484. tot_size);
  485. BP_VFDB(bp)->sp_dma.size = tot_size;
  486. /* allocate mailboxes */
  487. tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
  488. BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
  489. tot_size);
  490. BP_VF_MBX_DMA(bp)->size = tot_size;
  491. return 0;
  492. alloc_mem_err:
  493. return -ENOMEM;
  494. }
  495. static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
  496. struct bnx2x_vf_queue *q)
  497. {
  498. u8 cl_id = vfq_cl_id(vf, q);
  499. u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
  500. unsigned long q_type = 0;
  501. set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  502. set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  503. /* Queue State object */
  504. bnx2x_init_queue_obj(bp, &q->sp_obj,
  505. cl_id, &q->cid, 1, func_id,
  506. bnx2x_vf_sp(bp, vf, q_data),
  507. bnx2x_vf_sp_map(bp, vf, q_data),
  508. q_type);
  509. DP(BNX2X_MSG_IOV,
  510. "initialized vf %d's queue object. func id set to %d\n",
  511. vf->abs_vfid, q->sp_obj.func_id);
  512. /* mac/vlan objects are per queue, but only those
  513. * that belong to the leading queue are initialized
  514. */
  515. if (vfq_is_leading(q)) {
  516. /* mac */
  517. bnx2x_init_mac_obj(bp, &q->mac_obj,
  518. cl_id, q->cid, func_id,
  519. bnx2x_vf_sp(bp, vf, mac_rdata),
  520. bnx2x_vf_sp_map(bp, vf, mac_rdata),
  521. BNX2X_FILTER_MAC_PENDING,
  522. &vf->filter_state,
  523. BNX2X_OBJ_TYPE_RX_TX,
  524. &bp->macs_pool);
  525. /* vlan */
  526. bnx2x_init_vlan_obj(bp, &q->vlan_obj,
  527. cl_id, q->cid, func_id,
  528. bnx2x_vf_sp(bp, vf, vlan_rdata),
  529. bnx2x_vf_sp_map(bp, vf, vlan_rdata),
  530. BNX2X_FILTER_VLAN_PENDING,
  531. &vf->filter_state,
  532. BNX2X_OBJ_TYPE_RX_TX,
  533. &bp->vlans_pool);
  534. /* mcast */
  535. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
  536. q->cid, func_id, func_id,
  537. bnx2x_vf_sp(bp, vf, mcast_rdata),
  538. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  539. BNX2X_FILTER_MCAST_PENDING,
  540. &vf->filter_state,
  541. BNX2X_OBJ_TYPE_RX_TX);
  542. vf->leading_rss = cl_id;
  543. }
  544. }
  545. /* called by bnx2x_nic_load */
  546. int bnx2x_iov_nic_init(struct bnx2x *bp)
  547. {
  548. int vfid, qcount, i;
  549. if (!IS_SRIOV(bp)) {
  550. DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
  551. return 0;
  552. }
  553. DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
  554. /* initialize vf database */
  555. for_each_vf(bp, vfid) {
  556. struct bnx2x_virtf *vf = BP_VF(bp, vfid);
  557. int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
  558. BNX2X_CIDS_PER_VF;
  559. union cdu_context *base_cxt = (union cdu_context *)
  560. BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
  561. (base_vf_cid & (ILT_PAGE_CIDS-1));
  562. DP(BNX2X_MSG_IOV,
  563. "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
  564. vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
  565. BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
  566. /* init statically provisioned resources */
  567. bnx2x_iov_static_resc(bp, &vf->alloc_resc);
  568. /* queues are initialized during VF-ACQUIRE */
  569. /* reserve the vf vlan credit */
  570. bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
  571. vf->filter_state = 0;
  572. vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
  573. /* init mcast object - This object will be re-initialized
  574. * during VF-ACQUIRE with the proper cl_id and cid.
  575. * It needs to be initialized here so that it can be safely
  576. * handled by a subsequent FLR flow.
  577. */
  578. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
  579. 0xFF, 0xFF, 0xFF,
  580. bnx2x_vf_sp(bp, vf, mcast_rdata),
  581. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  582. BNX2X_FILTER_MCAST_PENDING,
  583. &vf->filter_state,
  584. BNX2X_OBJ_TYPE_RX_TX);
  585. /* set the mailbox message addresses */
  586. BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
  587. (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
  588. MBX_MSG_ALIGNED_SIZE);
  589. BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
  590. vfid * MBX_MSG_ALIGNED_SIZE;
  591. /* Enable vf mailbox */
  592. bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
  593. }
  594. /* Final VF init */
  595. qcount = 0;
  596. for_each_vf(bp, i) {
  597. struct bnx2x_virtf *vf = BP_VF(bp, i);
  598. /* fill in the BDF and bars */
  599. vf->bus = bnx2x_vf_bus(bp, i);
  600. vf->devfn = bnx2x_vf_devfn(bp, i);
  601. bnx2x_vf_set_bars(bp, vf);
  602. DP(BNX2X_MSG_IOV,
  603. "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
  604. vf->abs_vfid, vf->bus, vf->devfn,
  605. (unsigned)vf->bars[0].bar, vf->bars[0].size,
  606. (unsigned)vf->bars[1].bar, vf->bars[1].size,
  607. (unsigned)vf->bars[2].bar, vf->bars[2].size);
  608. /* set local queue arrays */
  609. vf->vfqs = &bp->vfdb->vfqs[qcount];
  610. qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
  611. }
  612. return 0;
  613. }
  614. /* called by bnx2x_init_hw_func, returns the next ilt line */
  615. int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
  616. {
  617. int i;
  618. struct bnx2x_ilt *ilt = BP_ILT(bp);
  619. if (!IS_SRIOV(bp))
  620. return line;
  621. /* set vfs ilt lines */
  622. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  623. struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
  624. ilt->lines[line+i].page = hw_cxt->addr;
  625. ilt->lines[line+i].page_mapping = hw_cxt->mapping;
  626. ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
  627. }
  628. return line + i;
  629. }
  630. static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
  631. {
  632. return ((cid >= BNX2X_FIRST_VF_CID) &&
  633. ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
  634. }
  635. static
  636. void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
  637. struct bnx2x_vf_queue *vfq,
  638. union event_ring_elem *elem)
  639. {
  640. unsigned long ramrod_flags = 0;
  641. int rc = 0;
  642. /* Always push next commands out, don't wait here */
  643. set_bit(RAMROD_CONT, &ramrod_flags);
  644. switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
  645. case BNX2X_FILTER_MAC_PENDING:
  646. rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
  647. &ramrod_flags);
  648. break;
  649. case BNX2X_FILTER_VLAN_PENDING:
  650. rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
  651. &ramrod_flags);
  652. break;
  653. default:
  654. BNX2X_ERR("Unsupported classification command: %d\n",
  655. elem->message.data.eth_event.echo);
  656. return;
  657. }
  658. if (rc < 0)
  659. BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
  660. else if (rc > 0)
  661. DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
  662. }
  663. static
  664. void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
  665. struct bnx2x_virtf *vf)
  666. {
  667. struct bnx2x_mcast_ramrod_params rparam = {NULL};
  668. int rc;
  669. rparam.mcast_obj = &vf->mcast_obj;
  670. vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
  671. /* If there are pending mcast commands - send them */
  672. if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
  673. rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
  674. if (rc < 0)
  675. BNX2X_ERR("Failed to send pending mcast commands: %d\n",
  676. rc);
  677. }
  678. }
  679. static
  680. void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
  681. struct bnx2x_virtf *vf)
  682. {
  683. smp_mb__before_clear_bit();
  684. clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
  685. smp_mb__after_clear_bit();
  686. }
  687. int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
  688. {
  689. struct bnx2x_virtf *vf;
  690. int qidx = 0, abs_vfid;
  691. u8 opcode;
  692. u16 cid = 0xffff;
  693. if (!IS_SRIOV(bp))
  694. return 1;
  695. /* first get the cid - the only events we handle here are cfc-delete
  696. * and set-mac completion
  697. */
  698. opcode = elem->message.opcode;
  699. switch (opcode) {
  700. case EVENT_RING_OPCODE_CFC_DEL:
  701. cid = SW_CID((__force __le32)
  702. elem->message.data.cfc_del_event.cid);
  703. DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
  704. break;
  705. case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
  706. case EVENT_RING_OPCODE_MULTICAST_RULES:
  707. case EVENT_RING_OPCODE_FILTERS_RULES:
  708. cid = (elem->message.data.eth_event.echo &
  709. BNX2X_SWCID_MASK);
  710. DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
  711. break;
  712. case EVENT_RING_OPCODE_VF_FLR:
  713. abs_vfid = elem->message.data.vf_flr_event.vf_id;
  714. DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
  715. abs_vfid);
  716. goto get_vf;
  717. case EVENT_RING_OPCODE_MALICIOUS_VF:
  718. abs_vfid = elem->message.data.malicious_vf_event.vf_id;
  719. DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
  720. abs_vfid);
  721. goto get_vf;
  722. default:
  723. return 1;
  724. }
  725. /* check if the cid is the VF range */
  726. if (!bnx2x_iov_is_vf_cid(bp, cid)) {
  727. DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
  728. return 1;
  729. }
  730. /* extract vf and rxq index from vf_cid - relies on the following:
  731. * 1. vfid on cid reflects the true abs_vfid
  732. * 2. the max number of VFs (per path) is 64
  733. */
  734. qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
  735. abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
  736. get_vf:
  737. vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  738. if (!vf) {
  739. BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
  740. cid, abs_vfid);
  741. return 0;
  742. }
  743. switch (opcode) {
  744. case EVENT_RING_OPCODE_CFC_DEL:
  745. DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
  746. vf->abs_vfid, qidx);
  747. vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
  748. &vfq_get(vf,
  749. qidx)->sp_obj,
  750. BNX2X_Q_CMD_CFC_DEL);
  751. break;
  752. case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
  753. DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
  754. vf->abs_vfid, qidx);
  755. bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
  756. break;
  757. case EVENT_RING_OPCODE_MULTICAST_RULES:
  758. DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
  759. vf->abs_vfid, qidx);
  760. bnx2x_vf_handle_mcast_eqe(bp, vf);
  761. break;
  762. case EVENT_RING_OPCODE_FILTERS_RULES:
  763. DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
  764. vf->abs_vfid, qidx);
  765. bnx2x_vf_handle_filters_eqe(bp, vf);
  766. break;
  767. case EVENT_RING_OPCODE_VF_FLR:
  768. DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
  769. vf->abs_vfid);
  770. /* Do nothing for now */
  771. break;
  772. case EVENT_RING_OPCODE_MALICIOUS_VF:
  773. DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
  774. vf->abs_vfid);
  775. /* Do nothing for now */
  776. break;
  777. }
  778. /* SRIOV: reschedule any 'in_progress' operations */
  779. bnx2x_iov_sp_event(bp, cid, false);
  780. return 0;
  781. }
  782. static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
  783. {
  784. /* extract the vf from vf_cid - relies on the following:
  785. * 1. vfid on cid reflects the true abs_vfid
  786. * 2. the max number of VFs (per path) is 64
  787. */
  788. int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
  789. return bnx2x_vf_by_abs_fid(bp, abs_vfid);
  790. }
  791. void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
  792. struct bnx2x_queue_sp_obj **q_obj)
  793. {
  794. struct bnx2x_virtf *vf;
  795. if (!IS_SRIOV(bp))
  796. return;
  797. vf = bnx2x_vf_by_cid(bp, vf_cid);
  798. if (vf) {
  799. /* extract queue index from vf_cid - relies on the following:
  800. * 1. vfid on cid reflects the true abs_vfid
  801. * 2. the max number of VFs (per path) is 64
  802. */
  803. int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
  804. *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
  805. } else {
  806. BNX2X_ERR("No vf matching cid %d\n", vf_cid);
  807. }
  808. }
  809. void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
  810. {
  811. struct bnx2x_virtf *vf;
  812. /* check if the cid is the VF range */
  813. if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
  814. return;
  815. vf = bnx2x_vf_by_cid(bp, vf_cid);
  816. if (vf) {
  817. /* set in_progress flag */
  818. atomic_set(&vf->op_in_progress, 1);
  819. if (queue_work)
  820. queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  821. }
  822. }
  823. void bnx2x_iov_sp_task(struct bnx2x *bp)
  824. {
  825. int i;
  826. if (!IS_SRIOV(bp))
  827. return;
  828. /* Iterate over all VFs and invoke state transition for VFs with
  829. * 'in-progress' slow-path operations
  830. */
  831. DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
  832. for_each_vf(bp, i) {
  833. struct bnx2x_virtf *vf = BP_VF(bp, i);
  834. if (!list_empty(&vf->op_list_head) &&
  835. atomic_read(&vf->op_in_progress)) {
  836. DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
  837. bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
  838. }
  839. }
  840. }
  841. u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
  842. {
  843. return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
  844. BNX2X_VF_MAX_QUEUES);
  845. }
  846. static
  847. int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
  848. struct vf_pf_resc_request *req_resc)
  849. {
  850. u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
  851. u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
  852. return ((req_resc->num_rxqs <= rxq_cnt) &&
  853. (req_resc->num_txqs <= txq_cnt) &&
  854. (req_resc->num_sbs <= vf_sb_count(vf)) &&
  855. (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
  856. (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
  857. }
  858. /* CORE VF API */
  859. int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  860. struct vf_pf_resc_request *resc)
  861. {
  862. int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
  863. BNX2X_CIDS_PER_VF;
  864. union cdu_context *base_cxt = (union cdu_context *)
  865. BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
  866. (base_vf_cid & (ILT_PAGE_CIDS-1));
  867. int i;
  868. /* if state is 'acquired' the VF was not released or FLR'd, in
  869. * this case the returned resources match the acquired already
  870. * acquired resources. Verify that the requested numbers do
  871. * not exceed the already acquired numbers.
  872. */
  873. if (vf->state == VF_ACQUIRED) {
  874. DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
  875. vf->abs_vfid);
  876. if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
  877. BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
  878. vf->abs_vfid);
  879. return -EINVAL;
  880. }
  881. return 0;
  882. }
  883. /* Otherwise vf state must be 'free' or 'reset' */
  884. if (vf->state != VF_FREE && vf->state != VF_RESET) {
  885. BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
  886. vf->abs_vfid, vf->state);
  887. return -EINVAL;
  888. }
  889. /* static allocation:
  890. * the global maximum number are fixed per VF. fail the request if
  891. * requested number exceed these globals
  892. */
  893. if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
  894. DP(BNX2X_MSG_IOV,
  895. "cannot fulfill vf resource request. Placing maximal available values in response\n");
  896. /* set the max resource in the vf */
  897. return -ENOMEM;
  898. }
  899. /* Set resources counters - 0 request means max available */
  900. vf_sb_count(vf) = resc->num_sbs;
  901. vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
  902. vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
  903. if (resc->num_mac_filters)
  904. vf_mac_rules_cnt(vf) = resc->num_mac_filters;
  905. if (resc->num_vlan_filters)
  906. vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
  907. DP(BNX2X_MSG_IOV,
  908. "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
  909. vf_sb_count(vf), vf_rxq_count(vf),
  910. vf_txq_count(vf), vf_mac_rules_cnt(vf),
  911. vf_vlan_rules_cnt(vf));
  912. /* Initialize the queues */
  913. if (!vf->vfqs) {
  914. DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
  915. return -EINVAL;
  916. }
  917. for_each_vfq(vf, i) {
  918. struct bnx2x_vf_queue *q = vfq_get(vf, i);
  919. if (!q) {
  920. DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
  921. return -EINVAL;
  922. }
  923. q->index = i;
  924. q->cxt = &((base_cxt + i)->eth);
  925. q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
  926. DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
  927. vf->abs_vfid, i, q->index, q->cid, q->cxt);
  928. /* init SP objects */
  929. bnx2x_vfq_init(bp, vf, q);
  930. }
  931. vf->state = VF_ACQUIRED;
  932. return 0;
  933. }
  934. void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
  935. enum channel_tlvs tlv)
  936. {
  937. /* lock the channel */
  938. mutex_lock(&vf->op_mutex);
  939. /* record the locking op */
  940. vf->op_current = tlv;
  941. /* log the lock */
  942. DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
  943. vf->abs_vfid, tlv);
  944. }
  945. void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
  946. enum channel_tlvs expected_tlv)
  947. {
  948. WARN(expected_tlv != vf->op_current,
  949. "lock mismatch: expected %d found %d", expected_tlv,
  950. vf->op_current);
  951. /* lock the channel */
  952. mutex_unlock(&vf->op_mutex);
  953. /* log the unlock */
  954. DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
  955. vf->abs_vfid, vf->op_current);
  956. /* record the locking op */
  957. vf->op_current = CHANNEL_TLV_NONE;
  958. }