bnx2x_sriov.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942
  1. /* bnx2x_sriov.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2012 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. *
  19. */
  20. #include "bnx2x.h"
  21. #include "bnx2x_init.h"
  22. #include "bnx2x_cmn.h"
  23. #include "bnx2x_sriov.h"
  24. /* General service functions */
  25. static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  26. u16 pf_id)
  27. {
  28. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  29. pf_id);
  30. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  31. pf_id);
  32. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  33. pf_id);
  34. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  35. pf_id);
  36. }
  37. static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  38. u8 enable)
  39. {
  40. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  41. enable);
  42. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  43. enable);
  44. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  45. enable);
  46. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  47. enable);
  48. }
  49. int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  50. {
  51. int idx;
  52. for_each_vf(bp, idx)
  53. if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  54. break;
  55. return idx;
  56. }
  57. static
  58. struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  59. {
  60. u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  61. return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  62. }
  63. static int bnx2x_ari_enabled(struct pci_dev *dev)
  64. {
  65. return dev->bus->self && dev->bus->self->ari_enabled;
  66. }
  67. static void
  68. bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
  69. {
  70. struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  71. if (vf) {
  72. if (!vf_sb_count(vf))
  73. vf->igu_base_id = igu_sb_id;
  74. ++vf_sb_count(vf);
  75. }
  76. }
  77. static void
  78. bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
  79. {
  80. int sb_id;
  81. u32 val;
  82. u8 fid;
  83. /* IGU in normal mode - read CAM */
  84. for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
  85. val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
  86. if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
  87. continue;
  88. fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
  89. if (!(fid & IGU_FID_ENCODE_IS_PF))
  90. bnx2x_vf_set_igu_info(bp, sb_id,
  91. (fid & IGU_FID_VF_NUM_MASK));
  92. DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
  93. ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
  94. ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
  95. (fid & IGU_FID_VF_NUM_MASK)), sb_id,
  96. GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
  97. }
  98. }
  99. static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
  100. {
  101. if (bp->vfdb) {
  102. kfree(bp->vfdb->vfqs);
  103. kfree(bp->vfdb->vfs);
  104. kfree(bp->vfdb);
  105. }
  106. bp->vfdb = NULL;
  107. }
  108. static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  109. {
  110. int pos;
  111. struct pci_dev *dev = bp->pdev;
  112. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
  113. if (!pos) {
  114. BNX2X_ERR("failed to find SRIOV capability in device\n");
  115. return -ENODEV;
  116. }
  117. iov->pos = pos;
  118. DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
  119. pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
  120. pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
  121. pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
  122. pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
  123. pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
  124. pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
  125. pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
  126. pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  127. return 0;
  128. }
  129. static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  130. {
  131. u32 val;
  132. /* read the SRIOV capability structure
  133. * The fields can be read via configuration read or
  134. * directly from the device (starting at offset PCICFG_OFFSET)
  135. */
  136. if (bnx2x_sriov_pci_cfg_info(bp, iov))
  137. return -ENODEV;
  138. /* get the number of SRIOV bars */
  139. iov->nres = 0;
  140. /* read the first_vfid */
  141. val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
  142. iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
  143. * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
  144. DP(BNX2X_MSG_IOV,
  145. "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
  146. BP_FUNC(bp),
  147. iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
  148. iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
  149. return 0;
  150. }
  151. static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
  152. {
  153. int i;
  154. u8 queue_count = 0;
  155. if (IS_SRIOV(bp))
  156. for_each_vf(bp, i)
  157. queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
  158. return queue_count;
  159. }
  160. /* must be called after PF bars are mapped */
  161. int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
  162. int num_vfs_param)
  163. {
  164. int err, i, qcount;
  165. struct bnx2x_sriov *iov;
  166. struct pci_dev *dev = bp->pdev;
  167. bp->vfdb = NULL;
  168. /* verify sriov capability is present in configuration space */
  169. if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) {
  170. DP(BNX2X_MSG_IOV, "no sriov - capability not found\n");
  171. return 0;
  172. }
  173. /* verify is pf */
  174. if (IS_VF(bp))
  175. return 0;
  176. /* verify chip revision */
  177. if (CHIP_IS_E1x(bp))
  178. return 0;
  179. /* check if SRIOV support is turned off */
  180. if (!num_vfs_param)
  181. return 0;
  182. /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
  183. if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
  184. BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
  185. BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
  186. return 0;
  187. }
  188. /* SRIOV can be enabled only with MSIX */
  189. if (int_mode_param == BNX2X_INT_MODE_MSI ||
  190. int_mode_param == BNX2X_INT_MODE_INTX) {
  191. BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
  192. return 0;
  193. }
  194. /* verify ari is enabled */
  195. if (!bnx2x_ari_enabled(bp->pdev)) {
  196. BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
  197. return 0;
  198. }
  199. /* verify igu is in normal mode */
  200. if (CHIP_INT_MODE_IS_BC(bp)) {
  201. BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
  202. return 0;
  203. }
  204. /* allocate the vfs database */
  205. bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
  206. if (!bp->vfdb) {
  207. BNX2X_ERR("failed to allocate vf database\n");
  208. err = -ENOMEM;
  209. goto failed;
  210. }
  211. /* get the sriov info - Linux already collected all the pertinent
  212. * information, however the sriov structure is for the private use
  213. * of the pci module. Also we want this information regardless
  214. * of the hyper-visor.
  215. */
  216. iov = &(bp->vfdb->sriov);
  217. err = bnx2x_sriov_info(bp, iov);
  218. if (err)
  219. goto failed;
  220. /* SR-IOV capability was enabled but there are no VFs*/
  221. if (iov->total == 0)
  222. goto failed;
  223. /* calcuate the actual number of VFs */
  224. iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
  225. /* allcate the vf array */
  226. bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
  227. BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
  228. if (!bp->vfdb->vfs) {
  229. BNX2X_ERR("failed to allocate vf array\n");
  230. err = -ENOMEM;
  231. goto failed;
  232. }
  233. /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
  234. for_each_vf(bp, i) {
  235. bnx2x_vf(bp, i, index) = i;
  236. bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
  237. bnx2x_vf(bp, i, state) = VF_FREE;
  238. INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
  239. mutex_init(&bnx2x_vf(bp, i, op_mutex));
  240. bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
  241. }
  242. /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
  243. bnx2x_get_vf_igu_cam_info(bp);
  244. /* get the total queue count and allocate the global queue arrays */
  245. qcount = bnx2x_iov_get_max_queue_count(bp);
  246. /* allocate the queue arrays for all VFs */
  247. bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
  248. GFP_KERNEL);
  249. if (!bp->vfdb->vfqs) {
  250. BNX2X_ERR("failed to allocate vf queue array\n");
  251. err = -ENOMEM;
  252. goto failed;
  253. }
  254. return 0;
  255. failed:
  256. DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
  257. __bnx2x_iov_free_vfdb(bp);
  258. return err;
  259. }
  260. /* VF enable primitives
  261. * when pretend is required the caller is responsible
  262. * for calling pretend prior to calling these routines
  263. */
  264. /* called only on E1H or E2.
  265. * When pretending to be PF, the pretend value is the function number 0...7
  266. * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
  267. * combination
  268. */
  269. int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
  270. {
  271. u32 pretend_reg;
  272. if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
  273. return -1;
  274. /* get my own pretend register */
  275. pretend_reg = bnx2x_get_pretend_reg(bp);
  276. REG_WR(bp, pretend_reg, pretend_func_val);
  277. REG_RD(bp, pretend_reg);
  278. return 0;
  279. }
  280. /* internal vf enable - until vf is enabled internally all transactions
  281. * are blocked. this routine should always be called last with pretend.
  282. */
  283. static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
  284. {
  285. REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
  286. }
  287. /* clears vf error in all semi blocks */
  288. static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
  289. {
  290. REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
  291. REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
  292. REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
  293. REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
  294. }
  295. static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
  296. {
  297. u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
  298. u32 was_err_reg = 0;
  299. switch (was_err_group) {
  300. case 0:
  301. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
  302. break;
  303. case 1:
  304. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
  305. break;
  306. case 2:
  307. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
  308. break;
  309. case 3:
  310. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
  311. break;
  312. }
  313. REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
  314. }
  315. void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
  316. {
  317. /* set the VF-PF association in the FW */
  318. storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
  319. storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
  320. /* clear vf errors*/
  321. bnx2x_vf_semi_clear_err(bp, abs_vfid);
  322. bnx2x_vf_pglue_clear_err(bp, abs_vfid);
  323. /* internal vf-enable - pretend */
  324. bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
  325. DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
  326. bnx2x_vf_enable_internal(bp, true);
  327. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  328. }
  329. static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
  330. {
  331. struct pci_dev *dev;
  332. struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  333. if (!vf)
  334. goto unknown_dev;
  335. dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
  336. if (dev)
  337. return bnx2x_is_pcie_pending(dev);
  338. unknown_dev:
  339. BNX2X_ERR("Unknown device\n");
  340. return false;
  341. }
  342. int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
  343. {
  344. /* Wait 100ms */
  345. msleep(100);
  346. /* Verify no pending pci transactions */
  347. if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
  348. BNX2X_ERR("PCIE Transactions still pending\n");
  349. return 0;
  350. }
  351. /* must be called after the number of PF queues and the number of VFs are
  352. * both known
  353. */
  354. static void
  355. bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
  356. {
  357. u16 vlan_count = 0;
  358. /* will be set only during VF-ACQUIRE */
  359. resc->num_rxqs = 0;
  360. resc->num_txqs = 0;
  361. /* no credit calculcis for macs (just yet) */
  362. resc->num_mac_filters = 1;
  363. /* divvy up vlan rules */
  364. vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
  365. vlan_count = 1 << ilog2(vlan_count);
  366. resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
  367. /* no real limitation */
  368. resc->num_mc_filters = 0;
  369. /* num_sbs already set */
  370. }
  371. /* IOV global initialization routines */
  372. void bnx2x_iov_init_dq(struct bnx2x *bp)
  373. {
  374. if (!IS_SRIOV(bp))
  375. return;
  376. /* Set the DQ such that the CID reflect the abs_vfid */
  377. REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
  378. REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
  379. /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
  380. * the PF L2 queues
  381. */
  382. REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
  383. /* The VF window size is the log2 of the max number of CIDs per VF */
  384. REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
  385. /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
  386. * the Pf doorbell size although the 2 are independent.
  387. */
  388. REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
  389. BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
  390. /* No security checks for now -
  391. * configure single rule (out of 16) mask = 0x1, value = 0x0,
  392. * CID range 0 - 0x1ffff
  393. */
  394. REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
  395. REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
  396. REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
  397. REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
  398. /* set the number of VF alllowed doorbells to the full DQ range */
  399. REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
  400. /* set the VF doorbell threshold */
  401. REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
  402. }
  403. void bnx2x_iov_init_dmae(struct bnx2x *bp)
  404. {
  405. DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
  406. if (!IS_SRIOV(bp))
  407. return;
  408. REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
  409. }
  410. static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
  411. {
  412. struct pci_dev *dev = bp->pdev;
  413. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  414. return dev->bus->number + ((dev->devfn + iov->offset +
  415. iov->stride * vfid) >> 8);
  416. }
  417. static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
  418. {
  419. struct pci_dev *dev = bp->pdev;
  420. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  421. return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
  422. }
  423. static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
  424. {
  425. int i, n;
  426. struct pci_dev *dev = bp->pdev;
  427. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  428. for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
  429. u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
  430. u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
  431. do_div(size, iov->total);
  432. vf->bars[n].bar = start + size * vf->abs_vfid;
  433. vf->bars[n].size = size;
  434. }
  435. }
  436. void bnx2x_iov_remove_one(struct bnx2x *bp)
  437. {
  438. /* if SRIOV is not enabled there's nothing to do */
  439. if (!IS_SRIOV(bp))
  440. return;
  441. /* free vf database */
  442. __bnx2x_iov_free_vfdb(bp);
  443. }
  444. void bnx2x_iov_free_mem(struct bnx2x *bp)
  445. {
  446. int i;
  447. if (!IS_SRIOV(bp))
  448. return;
  449. /* free vfs hw contexts */
  450. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  451. struct hw_dma *cxt = &bp->vfdb->context[i];
  452. BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
  453. }
  454. BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
  455. BP_VFDB(bp)->sp_dma.mapping,
  456. BP_VFDB(bp)->sp_dma.size);
  457. BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
  458. BP_VF_MBX_DMA(bp)->mapping,
  459. BP_VF_MBX_DMA(bp)->size);
  460. }
  461. int bnx2x_iov_alloc_mem(struct bnx2x *bp)
  462. {
  463. size_t tot_size;
  464. int i, rc = 0;
  465. if (!IS_SRIOV(bp))
  466. return rc;
  467. /* allocate vfs hw contexts */
  468. tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
  469. BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
  470. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  471. struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
  472. cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
  473. if (cxt->size) {
  474. BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
  475. } else {
  476. cxt->addr = NULL;
  477. cxt->mapping = 0;
  478. }
  479. tot_size -= cxt->size;
  480. }
  481. /* allocate vfs ramrods dma memory - client_init and set_mac */
  482. tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
  483. BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
  484. tot_size);
  485. BP_VFDB(bp)->sp_dma.size = tot_size;
  486. /* allocate mailboxes */
  487. tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
  488. BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
  489. tot_size);
  490. BP_VF_MBX_DMA(bp)->size = tot_size;
  491. return 0;
  492. alloc_mem_err:
  493. return -ENOMEM;
  494. }
  495. /* called by bnx2x_nic_load */
  496. int bnx2x_iov_nic_init(struct bnx2x *bp)
  497. {
  498. int vfid, qcount, i;
  499. if (!IS_SRIOV(bp)) {
  500. DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
  501. return 0;
  502. }
  503. DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
  504. /* initialize vf database */
  505. for_each_vf(bp, vfid) {
  506. struct bnx2x_virtf *vf = BP_VF(bp, vfid);
  507. int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
  508. BNX2X_CIDS_PER_VF;
  509. union cdu_context *base_cxt = (union cdu_context *)
  510. BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
  511. (base_vf_cid & (ILT_PAGE_CIDS-1));
  512. DP(BNX2X_MSG_IOV,
  513. "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
  514. vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
  515. BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
  516. /* init statically provisioned resources */
  517. bnx2x_iov_static_resc(bp, &vf->alloc_resc);
  518. /* queues are initialized during VF-ACQUIRE */
  519. /* reserve the vf vlan credit */
  520. bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
  521. vf->filter_state = 0;
  522. vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
  523. /* init mcast object - This object will be re-initialized
  524. * during VF-ACQUIRE with the proper cl_id and cid.
  525. * It needs to be initialized here so that it can be safely
  526. * handled by a subsequent FLR flow.
  527. */
  528. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
  529. 0xFF, 0xFF, 0xFF,
  530. bnx2x_vf_sp(bp, vf, mcast_rdata),
  531. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  532. BNX2X_FILTER_MCAST_PENDING,
  533. &vf->filter_state,
  534. BNX2X_OBJ_TYPE_RX_TX);
  535. /* set the mailbox message addresses */
  536. BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
  537. (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
  538. MBX_MSG_ALIGNED_SIZE);
  539. BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
  540. vfid * MBX_MSG_ALIGNED_SIZE;
  541. /* Enable vf mailbox */
  542. bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
  543. }
  544. /* Final VF init */
  545. qcount = 0;
  546. for_each_vf(bp, i) {
  547. struct bnx2x_virtf *vf = BP_VF(bp, i);
  548. /* fill in the BDF and bars */
  549. vf->bus = bnx2x_vf_bus(bp, i);
  550. vf->devfn = bnx2x_vf_devfn(bp, i);
  551. bnx2x_vf_set_bars(bp, vf);
  552. DP(BNX2X_MSG_IOV,
  553. "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
  554. vf->abs_vfid, vf->bus, vf->devfn,
  555. (unsigned)vf->bars[0].bar, vf->bars[0].size,
  556. (unsigned)vf->bars[1].bar, vf->bars[1].size,
  557. (unsigned)vf->bars[2].bar, vf->bars[2].size);
  558. /* set local queue arrays */
  559. vf->vfqs = &bp->vfdb->vfqs[qcount];
  560. qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
  561. }
  562. return 0;
  563. }
  564. /* called by bnx2x_init_hw_func, returns the next ilt line */
  565. int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
  566. {
  567. int i;
  568. struct bnx2x_ilt *ilt = BP_ILT(bp);
  569. if (!IS_SRIOV(bp))
  570. return line;
  571. /* set vfs ilt lines */
  572. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  573. struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
  574. ilt->lines[line+i].page = hw_cxt->addr;
  575. ilt->lines[line+i].page_mapping = hw_cxt->mapping;
  576. ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
  577. }
  578. return line + i;
  579. }
  580. static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
  581. {
  582. return ((cid >= BNX2X_FIRST_VF_CID) &&
  583. ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
  584. }
  585. static
  586. void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
  587. struct bnx2x_vf_queue *vfq,
  588. union event_ring_elem *elem)
  589. {
  590. unsigned long ramrod_flags = 0;
  591. int rc = 0;
  592. /* Always push next commands out, don't wait here */
  593. set_bit(RAMROD_CONT, &ramrod_flags);
  594. switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
  595. case BNX2X_FILTER_MAC_PENDING:
  596. rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
  597. &ramrod_flags);
  598. break;
  599. case BNX2X_FILTER_VLAN_PENDING:
  600. rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
  601. &ramrod_flags);
  602. break;
  603. default:
  604. BNX2X_ERR("Unsupported classification command: %d\n",
  605. elem->message.data.eth_event.echo);
  606. return;
  607. }
  608. if (rc < 0)
  609. BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
  610. else if (rc > 0)
  611. DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
  612. }
  613. static
  614. void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
  615. struct bnx2x_virtf *vf)
  616. {
  617. struct bnx2x_mcast_ramrod_params rparam = {NULL};
  618. int rc;
  619. rparam.mcast_obj = &vf->mcast_obj;
  620. vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
  621. /* If there are pending mcast commands - send them */
  622. if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
  623. rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
  624. if (rc < 0)
  625. BNX2X_ERR("Failed to send pending mcast commands: %d\n",
  626. rc);
  627. }
  628. }
  629. static
  630. void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
  631. struct bnx2x_virtf *vf)
  632. {
  633. smp_mb__before_clear_bit();
  634. clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
  635. smp_mb__after_clear_bit();
  636. }
  637. int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
  638. {
  639. struct bnx2x_virtf *vf;
  640. int qidx = 0, abs_vfid;
  641. u8 opcode;
  642. u16 cid = 0xffff;
  643. if (!IS_SRIOV(bp))
  644. return 1;
  645. /* first get the cid - the only events we handle here are cfc-delete
  646. * and set-mac completion
  647. */
  648. opcode = elem->message.opcode;
  649. switch (opcode) {
  650. case EVENT_RING_OPCODE_CFC_DEL:
  651. cid = SW_CID((__force __le32)
  652. elem->message.data.cfc_del_event.cid);
  653. DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
  654. break;
  655. case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
  656. case EVENT_RING_OPCODE_MULTICAST_RULES:
  657. case EVENT_RING_OPCODE_FILTERS_RULES:
  658. cid = (elem->message.data.eth_event.echo &
  659. BNX2X_SWCID_MASK);
  660. DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
  661. break;
  662. case EVENT_RING_OPCODE_VF_FLR:
  663. abs_vfid = elem->message.data.vf_flr_event.vf_id;
  664. DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
  665. abs_vfid);
  666. goto get_vf;
  667. case EVENT_RING_OPCODE_MALICIOUS_VF:
  668. abs_vfid = elem->message.data.malicious_vf_event.vf_id;
  669. DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
  670. abs_vfid);
  671. goto get_vf;
  672. default:
  673. return 1;
  674. }
  675. /* check if the cid is the VF range */
  676. if (!bnx2x_iov_is_vf_cid(bp, cid)) {
  677. DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
  678. return 1;
  679. }
  680. /* extract vf and rxq index from vf_cid - relies on the following:
  681. * 1. vfid on cid reflects the true abs_vfid
  682. * 2. the max number of VFs (per path) is 64
  683. */
  684. qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
  685. abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
  686. get_vf:
  687. vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  688. if (!vf) {
  689. BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
  690. cid, abs_vfid);
  691. return 0;
  692. }
  693. switch (opcode) {
  694. case EVENT_RING_OPCODE_CFC_DEL:
  695. DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
  696. vf->abs_vfid, qidx);
  697. vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
  698. &vfq_get(vf,
  699. qidx)->sp_obj,
  700. BNX2X_Q_CMD_CFC_DEL);
  701. break;
  702. case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
  703. DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
  704. vf->abs_vfid, qidx);
  705. bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
  706. break;
  707. case EVENT_RING_OPCODE_MULTICAST_RULES:
  708. DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
  709. vf->abs_vfid, qidx);
  710. bnx2x_vf_handle_mcast_eqe(bp, vf);
  711. break;
  712. case EVENT_RING_OPCODE_FILTERS_RULES:
  713. DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
  714. vf->abs_vfid, qidx);
  715. bnx2x_vf_handle_filters_eqe(bp, vf);
  716. break;
  717. case EVENT_RING_OPCODE_VF_FLR:
  718. DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
  719. vf->abs_vfid);
  720. /* Do nothing for now */
  721. break;
  722. case EVENT_RING_OPCODE_MALICIOUS_VF:
  723. DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
  724. vf->abs_vfid);
  725. /* Do nothing for now */
  726. break;
  727. }
  728. /* SRIOV: reschedule any 'in_progress' operations */
  729. bnx2x_iov_sp_event(bp, cid, false);
  730. return 0;
  731. }
  732. static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
  733. {
  734. /* extract the vf from vf_cid - relies on the following:
  735. * 1. vfid on cid reflects the true abs_vfid
  736. * 2. the max number of VFs (per path) is 64
  737. */
  738. int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
  739. return bnx2x_vf_by_abs_fid(bp, abs_vfid);
  740. }
  741. void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
  742. struct bnx2x_queue_sp_obj **q_obj)
  743. {
  744. struct bnx2x_virtf *vf;
  745. if (!IS_SRIOV(bp))
  746. return;
  747. vf = bnx2x_vf_by_cid(bp, vf_cid);
  748. if (vf) {
  749. /* extract queue index from vf_cid - relies on the following:
  750. * 1. vfid on cid reflects the true abs_vfid
  751. * 2. the max number of VFs (per path) is 64
  752. */
  753. int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
  754. *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
  755. } else {
  756. BNX2X_ERR("No vf matching cid %d\n", vf_cid);
  757. }
  758. }
  759. void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
  760. {
  761. struct bnx2x_virtf *vf;
  762. /* check if the cid is the VF range */
  763. if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
  764. return;
  765. vf = bnx2x_vf_by_cid(bp, vf_cid);
  766. if (vf) {
  767. /* set in_progress flag */
  768. atomic_set(&vf->op_in_progress, 1);
  769. if (queue_work)
  770. queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  771. }
  772. }
  773. void bnx2x_iov_sp_task(struct bnx2x *bp)
  774. {
  775. int i;
  776. if (!IS_SRIOV(bp))
  777. return;
  778. /* Iterate over all VFs and invoke state transition for VFs with
  779. * 'in-progress' slow-path operations
  780. */
  781. DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
  782. for_each_vf(bp, i) {
  783. struct bnx2x_virtf *vf = BP_VF(bp, i);
  784. if (!list_empty(&vf->op_list_head) &&
  785. atomic_read(&vf->op_in_progress)) {
  786. DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
  787. bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
  788. }
  789. }
  790. }