bnx2x_vfpf.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714
  1. /* bnx2x_vfpf.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2013 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. */
  19. #include "bnx2x.h"
  20. #include "bnx2x_cmn.h"
  21. #include <linux/crc32.h>
  22. /* place a given tlv on the tlv buffer at a given offset */
  23. void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
  24. u16 length)
  25. {
  26. struct channel_tlv *tl =
  27. (struct channel_tlv *)(tlvs_list + offset);
  28. tl->type = type;
  29. tl->length = length;
  30. }
  31. /* Clear the mailbox and init the header of the first tlv */
  32. void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  33. u16 type, u16 length)
  34. {
  35. mutex_lock(&bp->vf2pf_mutex);
  36. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  37. type);
  38. /* Clear mailbox */
  39. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  40. /* init type and length */
  41. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  42. /* init first tlv header */
  43. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  44. }
  45. /* releases the mailbox */
  46. void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
  47. {
  48. DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
  49. first_tlv->tl.type);
  50. mutex_unlock(&bp->vf2pf_mutex);
  51. }
  52. /* list the types and lengths of the tlvs on the buffer */
  53. void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  54. {
  55. int i = 1;
  56. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  57. while (tlv->type != CHANNEL_TLV_LIST_END) {
  58. /* output tlv */
  59. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  60. tlv->type, tlv->length);
  61. /* advance to next tlv */
  62. tlvs_list += tlv->length;
  63. /* cast general tlv list pointer to channel tlv header*/
  64. tlv = (struct channel_tlv *)tlvs_list;
  65. i++;
  66. /* break condition for this loop */
  67. if (i > MAX_TLVS_IN_LIST) {
  68. WARN(true, "corrupt tlvs");
  69. return;
  70. }
  71. }
  72. /* output last tlv */
  73. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  74. tlv->type, tlv->length);
  75. }
  76. /* test whether we support a tlv type */
  77. bool bnx2x_tlv_supported(u16 tlvtype)
  78. {
  79. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  80. }
  81. static inline int bnx2x_pfvf_status_codes(int rc)
  82. {
  83. switch (rc) {
  84. case 0:
  85. return PFVF_STATUS_SUCCESS;
  86. case -ENOMEM:
  87. return PFVF_STATUS_NO_RESOURCE;
  88. default:
  89. return PFVF_STATUS_FAILURE;
  90. }
  91. }
  92. static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
  93. {
  94. struct cstorm_vf_zone_data __iomem *zone_data =
  95. REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
  96. int tout = 100, interval = 100; /* wait for 10 seconds */
  97. if (*done) {
  98. BNX2X_ERR("done was non zero before message to pf was sent\n");
  99. WARN_ON(true);
  100. return -EINVAL;
  101. }
  102. /* if PF indicated channel is down avoid sending message. Return success
  103. * so calling flow can continue
  104. */
  105. bnx2x_sample_bulletin(bp);
  106. if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
  107. DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
  108. *done = PFVF_STATUS_SUCCESS;
  109. return 0;
  110. }
  111. /* Write message address */
  112. writel(U64_LO(msg_mapping),
  113. &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
  114. writel(U64_HI(msg_mapping),
  115. &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
  116. /* make sure the address is written before FW accesses it */
  117. wmb();
  118. /* Trigger the PF FW */
  119. writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
  120. /* Wait for PF to complete */
  121. while ((tout >= 0) && (!*done)) {
  122. msleep(interval);
  123. tout -= 1;
  124. /* progress indicator - HV can take its own sweet time in
  125. * answering VFs...
  126. */
  127. DP_CONT(BNX2X_MSG_IOV, ".");
  128. }
  129. if (!*done) {
  130. BNX2X_ERR("PF response has timed out\n");
  131. return -EAGAIN;
  132. }
  133. DP(BNX2X_MSG_SP, "Got a response from PF\n");
  134. return 0;
  135. }
  136. static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
  137. {
  138. u32 me_reg;
  139. int tout = 10, interval = 100; /* Wait for 1 sec */
  140. do {
  141. /* pxp traps vf read of doorbells and returns me reg value */
  142. me_reg = readl(bp->doorbells);
  143. if (GOOD_ME_REG(me_reg))
  144. break;
  145. msleep(interval);
  146. BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
  147. me_reg);
  148. } while (tout-- > 0);
  149. if (!GOOD_ME_REG(me_reg)) {
  150. BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
  151. return -EINVAL;
  152. }
  153. BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
  154. *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  155. return 0;
  156. }
  157. int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
  158. {
  159. int rc = 0, attempts = 0;
  160. struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
  161. struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
  162. u32 vf_id;
  163. bool resources_acquired = false;
  164. /* clear mailbox and prep first tlv */
  165. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  166. if (bnx2x_get_vf_id(bp, &vf_id)) {
  167. rc = -EAGAIN;
  168. goto out;
  169. }
  170. req->vfdev_info.vf_id = vf_id;
  171. req->vfdev_info.vf_os = 0;
  172. req->resc_request.num_rxqs = rx_count;
  173. req->resc_request.num_txqs = tx_count;
  174. req->resc_request.num_sbs = bp->igu_sb_cnt;
  175. req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
  176. req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
  177. /* pf 2 vf bulletin board address */
  178. req->bulletin_addr = bp->pf2vf_bulletin_mapping;
  179. /* add list termination tlv */
  180. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  181. sizeof(struct channel_list_end_tlv));
  182. /* output tlvs list */
  183. bnx2x_dp_tlv_list(bp, req);
  184. while (!resources_acquired) {
  185. DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
  186. /* send acquire request */
  187. rc = bnx2x_send_msg2pf(bp,
  188. &resp->hdr.status,
  189. bp->vf2pf_mbox_mapping);
  190. /* PF timeout */
  191. if (rc)
  192. goto out;
  193. /* copy acquire response from buffer to bp */
  194. memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
  195. attempts++;
  196. /* test whether the PF accepted our request. If not, humble
  197. * the request and try again.
  198. */
  199. if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
  200. DP(BNX2X_MSG_SP, "resources acquired\n");
  201. resources_acquired = true;
  202. } else if (bp->acquire_resp.hdr.status ==
  203. PFVF_STATUS_NO_RESOURCE &&
  204. attempts < VF_ACQUIRE_THRESH) {
  205. DP(BNX2X_MSG_SP,
  206. "PF unwilling to fulfill resource request. Try PF recommended amount\n");
  207. /* humble our request */
  208. req->resc_request.num_txqs =
  209. bp->acquire_resp.resc.num_txqs;
  210. req->resc_request.num_rxqs =
  211. bp->acquire_resp.resc.num_rxqs;
  212. req->resc_request.num_sbs =
  213. bp->acquire_resp.resc.num_sbs;
  214. req->resc_request.num_mac_filters =
  215. bp->acquire_resp.resc.num_mac_filters;
  216. req->resc_request.num_vlan_filters =
  217. bp->acquire_resp.resc.num_vlan_filters;
  218. req->resc_request.num_mc_filters =
  219. bp->acquire_resp.resc.num_mc_filters;
  220. /* Clear response buffer */
  221. memset(&bp->vf2pf_mbox->resp, 0,
  222. sizeof(union pfvf_tlvs));
  223. } else {
  224. /* PF reports error */
  225. BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
  226. bp->acquire_resp.hdr.status);
  227. rc = -EAGAIN;
  228. goto out;
  229. }
  230. }
  231. /* get HW info */
  232. bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
  233. bp->link_params.chip_id = bp->common.chip_id;
  234. bp->db_size = bp->acquire_resp.pfdev_info.db_size;
  235. bp->common.int_block = INT_BLOCK_IGU;
  236. bp->common.chip_port_mode = CHIP_2_PORT_MODE;
  237. bp->igu_dsb_id = -1;
  238. bp->mf_ov = 0;
  239. bp->mf_mode = 0;
  240. bp->common.flash_size = 0;
  241. bp->flags |=
  242. NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
  243. bp->igu_sb_cnt = 1;
  244. bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
  245. strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
  246. sizeof(bp->fw_ver));
  247. if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
  248. memcpy(bp->dev->dev_addr,
  249. bp->acquire_resp.resc.current_mac_addr,
  250. ETH_ALEN);
  251. out:
  252. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  253. return rc;
  254. }
  255. int bnx2x_vfpf_release(struct bnx2x *bp)
  256. {
  257. struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
  258. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  259. u32 rc, vf_id;
  260. /* clear mailbox and prep first tlv */
  261. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
  262. if (bnx2x_get_vf_id(bp, &vf_id)) {
  263. rc = -EAGAIN;
  264. goto out;
  265. }
  266. req->vf_id = vf_id;
  267. /* add list termination tlv */
  268. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  269. sizeof(struct channel_list_end_tlv));
  270. /* output tlvs list */
  271. bnx2x_dp_tlv_list(bp, req);
  272. /* send release request */
  273. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  274. if (rc)
  275. /* PF timeout */
  276. goto out;
  277. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  278. /* PF released us */
  279. DP(BNX2X_MSG_SP, "vf released\n");
  280. } else {
  281. /* PF reports error */
  282. BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
  283. resp->hdr.status);
  284. rc = -EAGAIN;
  285. goto out;
  286. }
  287. out:
  288. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  289. return rc;
  290. }
  291. /* Tell PF about SB addresses */
  292. int bnx2x_vfpf_init(struct bnx2x *bp)
  293. {
  294. struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
  295. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  296. int rc, i;
  297. /* clear mailbox and prep first tlv */
  298. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
  299. /* status blocks */
  300. for_each_eth_queue(bp, i)
  301. req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
  302. status_blk_mapping);
  303. /* statistics - requests only supports single queue for now */
  304. req->stats_addr = bp->fw_stats_data_mapping +
  305. offsetof(struct bnx2x_fw_stats_data, queue_stats);
  306. /* add list termination tlv */
  307. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  308. sizeof(struct channel_list_end_tlv));
  309. /* output tlvs list */
  310. bnx2x_dp_tlv_list(bp, req);
  311. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  312. if (rc)
  313. goto out;
  314. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  315. BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
  316. resp->hdr.status);
  317. rc = -EAGAIN;
  318. goto out;
  319. }
  320. DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
  321. out:
  322. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  323. return rc;
  324. }
  325. /* CLOSE VF - opposite to INIT_VF */
  326. void bnx2x_vfpf_close_vf(struct bnx2x *bp)
  327. {
  328. struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
  329. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  330. int i, rc;
  331. u32 vf_id;
  332. /* If we haven't got a valid VF id, there is no sense to
  333. * continue with sending messages
  334. */
  335. if (bnx2x_get_vf_id(bp, &vf_id))
  336. goto free_irq;
  337. /* Close the queues */
  338. for_each_queue(bp, i)
  339. bnx2x_vfpf_teardown_queue(bp, i);
  340. /* remove mac */
  341. bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
  342. /* clear mailbox and prep first tlv */
  343. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
  344. req->vf_id = vf_id;
  345. /* add list termination tlv */
  346. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  347. sizeof(struct channel_list_end_tlv));
  348. /* output tlvs list */
  349. bnx2x_dp_tlv_list(bp, req);
  350. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  351. if (rc)
  352. BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
  353. else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  354. BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
  355. resp->hdr.status);
  356. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  357. free_irq:
  358. /* Disable HW interrupts, NAPI */
  359. bnx2x_netif_stop(bp, 0);
  360. /* Delete all NAPI objects */
  361. bnx2x_del_all_napi(bp);
  362. /* Release IRQs */
  363. bnx2x_free_irq(bp);
  364. }
  365. /* ask the pf to open a queue for the vf */
  366. int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
  367. {
  368. struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
  369. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  370. struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
  371. u16 tpa_agg_size = 0, flags = 0;
  372. int rc;
  373. /* clear mailbox and prep first tlv */
  374. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
  375. /* select tpa mode to request */
  376. if (!fp->disable_tpa) {
  377. flags |= VFPF_QUEUE_FLG_TPA;
  378. flags |= VFPF_QUEUE_FLG_TPA_IPV6;
  379. if (fp->mode == TPA_MODE_GRO)
  380. flags |= VFPF_QUEUE_FLG_TPA_GRO;
  381. tpa_agg_size = TPA_AGG_SIZE;
  382. }
  383. /* calculate queue flags */
  384. flags |= VFPF_QUEUE_FLG_STATS;
  385. flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
  386. flags |= VFPF_QUEUE_FLG_VLAN;
  387. DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
  388. /* Common */
  389. req->vf_qid = fp_idx;
  390. req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
  391. /* Rx */
  392. req->rxq.rcq_addr = fp->rx_comp_mapping;
  393. req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  394. req->rxq.rxq_addr = fp->rx_desc_mapping;
  395. req->rxq.sge_addr = fp->rx_sge_mapping;
  396. req->rxq.vf_sb = fp_idx;
  397. req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
  398. req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
  399. req->rxq.mtu = bp->dev->mtu;
  400. req->rxq.buf_sz = fp->rx_buf_size;
  401. req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
  402. req->rxq.tpa_agg_sz = tpa_agg_size;
  403. req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
  404. req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
  405. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  406. req->rxq.flags = flags;
  407. req->rxq.drop_flags = 0;
  408. req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  409. req->rxq.stat_id = -1; /* No stats at the moment */
  410. /* Tx */
  411. req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
  412. req->txq.vf_sb = fp_idx;
  413. req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
  414. req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
  415. req->txq.flags = flags;
  416. req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
  417. /* add list termination tlv */
  418. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  419. sizeof(struct channel_list_end_tlv));
  420. /* output tlvs list */
  421. bnx2x_dp_tlv_list(bp, req);
  422. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  423. if (rc)
  424. BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
  425. fp_idx);
  426. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  427. BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
  428. fp_idx, resp->hdr.status);
  429. rc = -EINVAL;
  430. }
  431. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  432. return rc;
  433. }
  434. int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
  435. {
  436. struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
  437. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  438. int rc;
  439. /* clear mailbox and prep first tlv */
  440. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
  441. sizeof(*req));
  442. req->vf_qid = qidx;
  443. /* add list termination tlv */
  444. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  445. sizeof(struct channel_list_end_tlv));
  446. /* output tlvs list */
  447. bnx2x_dp_tlv_list(bp, req);
  448. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  449. if (rc) {
  450. BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
  451. rc);
  452. goto out;
  453. }
  454. /* PF failed the transaction */
  455. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  456. BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
  457. resp->hdr.status);
  458. rc = -EINVAL;
  459. }
  460. out:
  461. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  462. return rc;
  463. }
  464. /* request pf to add a mac for the vf */
  465. int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
  466. {
  467. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  468. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  469. struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
  470. int rc = 0;
  471. /* clear mailbox and prep first tlv */
  472. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  473. sizeof(*req));
  474. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  475. req->vf_qid = vf_qid;
  476. req->n_mac_vlan_filters = 1;
  477. req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
  478. if (set)
  479. req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
  480. /* sample bulletin board for new mac */
  481. bnx2x_sample_bulletin(bp);
  482. /* copy mac from device to request */
  483. memcpy(req->filters[0].mac, addr, ETH_ALEN);
  484. /* add list termination tlv */
  485. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  486. sizeof(struct channel_list_end_tlv));
  487. /* output tlvs list */
  488. bnx2x_dp_tlv_list(bp, req);
  489. /* send message to pf */
  490. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  491. if (rc) {
  492. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  493. goto out;
  494. }
  495. /* failure may mean PF was configured with a new mac for us */
  496. while (resp->hdr.status == PFVF_STATUS_FAILURE) {
  497. DP(BNX2X_MSG_IOV,
  498. "vfpf SET MAC failed. Check bulletin board for new posts\n");
  499. /* copy mac from bulletin to device */
  500. memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
  501. /* check if bulletin board was updated */
  502. if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
  503. /* copy mac from device to request */
  504. memcpy(req->filters[0].mac, bp->dev->dev_addr,
  505. ETH_ALEN);
  506. /* send message to pf */
  507. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
  508. bp->vf2pf_mbox_mapping);
  509. } else {
  510. /* no new info in bulletin */
  511. break;
  512. }
  513. }
  514. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  515. BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
  516. rc = -EINVAL;
  517. }
  518. out:
  519. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  520. return 0;
  521. }
  522. int bnx2x_vfpf_set_mcast(struct net_device *dev)
  523. {
  524. struct bnx2x *bp = netdev_priv(dev);
  525. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  526. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  527. int rc, i = 0;
  528. struct netdev_hw_addr *ha;
  529. if (bp->state != BNX2X_STATE_OPEN) {
  530. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  531. return -EINVAL;
  532. }
  533. /* clear mailbox and prep first tlv */
  534. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  535. sizeof(*req));
  536. /* Get Rx mode requested */
  537. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  538. netdev_for_each_mc_addr(ha, dev) {
  539. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  540. bnx2x_mc_addr(ha));
  541. memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
  542. i++;
  543. }
  544. /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
  545. * addresses tops
  546. */
  547. if (i >= PFVF_MAX_MULTICAST_PER_VF) {
  548. DP(NETIF_MSG_IFUP,
  549. "VF supports not more than %d multicast MAC addresses\n",
  550. PFVF_MAX_MULTICAST_PER_VF);
  551. return -EINVAL;
  552. }
  553. req->n_multicast = i;
  554. req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
  555. req->vf_qid = 0;
  556. /* add list termination tlv */
  557. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  558. sizeof(struct channel_list_end_tlv));
  559. /* output tlvs list */
  560. bnx2x_dp_tlv_list(bp, req);
  561. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  562. if (rc) {
  563. BNX2X_ERR("Sending a message failed: %d\n", rc);
  564. goto out;
  565. }
  566. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  567. BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
  568. resp->hdr.status);
  569. rc = -EINVAL;
  570. }
  571. out:
  572. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  573. return 0;
  574. }
  575. int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
  576. {
  577. int mode = bp->rx_mode;
  578. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  579. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  580. int rc;
  581. /* clear mailbox and prep first tlv */
  582. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  583. sizeof(*req));
  584. DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
  585. switch (mode) {
  586. case BNX2X_RX_MODE_NONE: /* no Rx */
  587. req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
  588. break;
  589. case BNX2X_RX_MODE_NORMAL:
  590. req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
  591. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  592. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  593. break;
  594. case BNX2X_RX_MODE_ALLMULTI:
  595. req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
  596. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  597. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  598. break;
  599. case BNX2X_RX_MODE_PROMISC:
  600. req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
  601. req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
  602. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  603. break;
  604. default:
  605. BNX2X_ERR("BAD rx mode (%d)\n", mode);
  606. rc = -EINVAL;
  607. goto out;
  608. }
  609. req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
  610. req->vf_qid = 0;
  611. /* add list termination tlv */
  612. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  613. sizeof(struct channel_list_end_tlv));
  614. /* output tlvs list */
  615. bnx2x_dp_tlv_list(bp, req);
  616. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  617. if (rc)
  618. BNX2X_ERR("Sending a message failed: %d\n", rc);
  619. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  620. BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
  621. rc = -EINVAL;
  622. }
  623. out:
  624. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  625. return rc;
  626. }
  627. /* General service functions */
  628. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  629. {
  630. u32 addr = BAR_CSTRORM_INTMEM +
  631. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  632. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  633. }
  634. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  635. {
  636. u32 addr = BAR_CSTRORM_INTMEM +
  637. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  638. REG_WR8(bp, addr, 1);
  639. }
  640. static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
  641. {
  642. int i;
  643. for_each_vf(bp, i)
  644. storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
  645. }
  646. /* enable vf_pf mailbox (aka vf-pf-channel) */
  647. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  648. {
  649. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  650. /* enable the mailbox in the FW */
  651. storm_memset_vf_mbx_ack(bp, abs_vfid);
  652. storm_memset_vf_mbx_valid(bp, abs_vfid);
  653. /* enable the VF access to the mailbox */
  654. bnx2x_vf_enable_access(bp, abs_vfid);
  655. }
  656. /* this works only on !E1h */
  657. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  658. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  659. u32 vf_addr_lo, u32 len32)
  660. {
  661. struct dmae_command dmae;
  662. if (CHIP_IS_E1x(bp)) {
  663. BNX2X_ERR("Chip revision does not support VFs\n");
  664. return DMAE_NOT_RDY;
  665. }
  666. if (!bp->dmae_ready) {
  667. BNX2X_ERR("DMAE is not ready, can not copy\n");
  668. return DMAE_NOT_RDY;
  669. }
  670. /* set opcode and fixed command fields */
  671. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  672. if (from_vf) {
  673. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  674. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  675. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  676. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  677. dmae.src_addr_lo = vf_addr_lo;
  678. dmae.src_addr_hi = vf_addr_hi;
  679. dmae.dst_addr_lo = U64_LO(pf_addr);
  680. dmae.dst_addr_hi = U64_HI(pf_addr);
  681. } else {
  682. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  683. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  684. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  685. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  686. dmae.src_addr_lo = U64_LO(pf_addr);
  687. dmae.src_addr_hi = U64_HI(pf_addr);
  688. dmae.dst_addr_lo = vf_addr_lo;
  689. dmae.dst_addr_hi = vf_addr_hi;
  690. }
  691. dmae.len = len32;
  692. /* issue the command and wait for completion */
  693. return bnx2x_issue_dmae_with_comp(bp, &dmae);
  694. }
  695. static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
  696. {
  697. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  698. u64 vf_addr;
  699. dma_addr_t pf_addr;
  700. u16 length, type;
  701. int rc;
  702. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  703. /* prepare response */
  704. type = mbx->first_tlv.tl.type;
  705. length = type == CHANNEL_TLV_ACQUIRE ?
  706. sizeof(struct pfvf_acquire_resp_tlv) :
  707. sizeof(struct pfvf_general_resp_tlv);
  708. bnx2x_add_tlv(bp, resp, 0, type, length);
  709. resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
  710. bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
  711. sizeof(struct channel_list_end_tlv));
  712. bnx2x_dp_tlv_list(bp, resp);
  713. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  714. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  715. /* send response */
  716. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  717. mbx->first_tlv.resp_msg_offset;
  718. pf_addr = mbx->msg_mapping +
  719. offsetof(struct bnx2x_vf_mbx_msg, resp);
  720. /* copy the response body, if there is one, before the header, as the vf
  721. * is sensitive to the header being written
  722. */
  723. if (resp->hdr.tl.length > sizeof(u64)) {
  724. length = resp->hdr.tl.length - sizeof(u64);
  725. vf_addr += sizeof(u64);
  726. pf_addr += sizeof(u64);
  727. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  728. U64_HI(vf_addr),
  729. U64_LO(vf_addr),
  730. length/4);
  731. if (rc) {
  732. BNX2X_ERR("Failed to copy response body to VF %d\n",
  733. vf->abs_vfid);
  734. goto mbx_error;
  735. }
  736. vf_addr -= sizeof(u64);
  737. pf_addr -= sizeof(u64);
  738. }
  739. /* ack the FW */
  740. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  741. mmiowb();
  742. /* initiate dmae to send the response */
  743. mbx->flags &= ~VF_MSG_INPROCESS;
  744. /* copy the response header including status-done field,
  745. * must be last dmae, must be after FW is acked
  746. */
  747. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  748. U64_HI(vf_addr),
  749. U64_LO(vf_addr),
  750. sizeof(u64)/4);
  751. /* unlock channel mutex */
  752. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  753. if (rc) {
  754. BNX2X_ERR("Failed to copy response status to VF %d\n",
  755. vf->abs_vfid);
  756. goto mbx_error;
  757. }
  758. return;
  759. mbx_error:
  760. bnx2x_vf_release(bp, vf, false); /* non blocking */
  761. }
  762. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  763. struct bnx2x_vf_mbx *mbx, int vfop_status)
  764. {
  765. int i;
  766. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  767. struct pf_vf_resc *resc = &resp->resc;
  768. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  769. memset(resp, 0, sizeof(*resp));
  770. /* fill in pfdev info */
  771. resp->pfdev_info.chip_num = bp->common.chip_id;
  772. resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
  773. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  774. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  775. /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
  776. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  777. sizeof(resp->pfdev_info.fw_ver));
  778. if (status == PFVF_STATUS_NO_RESOURCE ||
  779. status == PFVF_STATUS_SUCCESS) {
  780. /* set resources numbers, if status equals NO_RESOURCE these
  781. * are max possible numbers
  782. */
  783. resc->num_rxqs = vf_rxq_count(vf) ? :
  784. bnx2x_vf_max_queue_cnt(bp, vf);
  785. resc->num_txqs = vf_txq_count(vf) ? :
  786. bnx2x_vf_max_queue_cnt(bp, vf);
  787. resc->num_sbs = vf_sb_count(vf);
  788. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  789. resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
  790. resc->num_mc_filters = 0;
  791. if (status == PFVF_STATUS_SUCCESS) {
  792. /* fill in the allocated resources */
  793. struct pf_vf_bulletin_content *bulletin =
  794. BP_VF_BULLETIN(bp, vf->index);
  795. for_each_vfq(vf, i)
  796. resc->hw_qid[i] =
  797. vfq_qzone_id(vf, vfq_get(vf, i));
  798. for_each_vf_sb(vf, i) {
  799. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  800. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  801. }
  802. /* if a mac has been set for this vf, supply it */
  803. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  804. memcpy(resc->current_mac_addr, bulletin->mac,
  805. ETH_ALEN);
  806. }
  807. }
  808. }
  809. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  810. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  811. vf->abs_vfid,
  812. resp->pfdev_info.chip_num,
  813. resp->pfdev_info.db_size,
  814. resp->pfdev_info.indices_per_sb,
  815. resp->pfdev_info.pf_cap,
  816. resc->num_rxqs,
  817. resc->num_txqs,
  818. resc->num_sbs,
  819. resc->num_mac_filters,
  820. resc->num_vlan_filters,
  821. resc->num_mc_filters,
  822. resp->pfdev_info.fw_ver);
  823. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  824. for (i = 0; i < vf_rxq_count(vf); i++)
  825. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  826. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  827. for (i = 0; i < vf_sb_count(vf); i++)
  828. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  829. resc->hw_sbs[i].hw_sb_id,
  830. resc->hw_sbs[i].sb_qid);
  831. DP_CONT(BNX2X_MSG_IOV, "]\n");
  832. /* send the response */
  833. vf->op_rc = vfop_status;
  834. bnx2x_vf_mbx_resp(bp, vf);
  835. }
  836. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  837. struct bnx2x_vf_mbx *mbx)
  838. {
  839. int rc;
  840. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  841. /* log vfdef info */
  842. DP(BNX2X_MSG_IOV,
  843. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  844. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  845. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  846. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  847. acquire->resc_request.num_vlan_filters,
  848. acquire->resc_request.num_mc_filters);
  849. /* acquire the resources */
  850. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  851. /* store address of vf's bulletin board */
  852. vf->bulletin_map = acquire->bulletin_addr;
  853. /* response */
  854. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  855. }
  856. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  857. struct bnx2x_vf_mbx *mbx)
  858. {
  859. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  860. /* record ghost addresses from vf message */
  861. vf->spq_map = init->spq_addr;
  862. vf->fw_stat_map = init->stats_addr;
  863. vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  864. /* response */
  865. bnx2x_vf_mbx_resp(bp, vf);
  866. }
  867. /* convert MBX queue-flags to standard SP queue-flags */
  868. static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
  869. unsigned long *sp_q_flags)
  870. {
  871. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  872. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  873. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  874. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  875. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  876. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  877. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  878. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  879. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  880. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  881. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  882. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  883. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  884. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  885. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  886. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  887. /* outer vlan removal is set according to PF's multi function mode */
  888. if (IS_MF_SD(bp))
  889. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  890. }
  891. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  892. struct bnx2x_vf_mbx *mbx)
  893. {
  894. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  895. struct bnx2x_vfop_cmd cmd = {
  896. .done = bnx2x_vf_mbx_resp,
  897. .block = false,
  898. };
  899. /* verify vf_qid */
  900. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  901. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  902. setup_q->vf_qid, vf_rxq_count(vf));
  903. vf->op_rc = -EINVAL;
  904. goto response;
  905. }
  906. /* tx queues must be setup alongside rx queues thus if the rx queue
  907. * is not marked as valid there's nothing to do.
  908. */
  909. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  910. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  911. unsigned long q_type = 0;
  912. struct bnx2x_queue_init_params *init_p;
  913. struct bnx2x_queue_setup_params *setup_p;
  914. /* re-init the VF operation context */
  915. memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
  916. setup_p = &vf->op_params.qctor.prep_qsetup;
  917. init_p = &vf->op_params.qctor.qstate.params.init;
  918. /* activate immediately */
  919. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  920. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  921. struct bnx2x_txq_setup_params *txq_params =
  922. &setup_p->txq_params;
  923. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  924. /* save sb resource index */
  925. q->sb_idx = setup_q->txq.vf_sb;
  926. /* tx init */
  927. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  928. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  929. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  930. &init_p->tx.flags);
  931. /* tx setup - flags */
  932. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  933. &setup_p->flags);
  934. /* tx setup - general, nothing */
  935. /* tx setup - tx */
  936. txq_params->dscr_map = setup_q->txq.txq_addr;
  937. txq_params->sb_cq_index = setup_q->txq.sb_index;
  938. txq_params->traffic_type = setup_q->txq.traffic_type;
  939. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  940. q->index, q->sb_idx);
  941. }
  942. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  943. struct bnx2x_rxq_setup_params *rxq_params =
  944. &setup_p->rxq_params;
  945. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  946. /* Note: there is no support for different SBs
  947. * for TX and RX
  948. */
  949. q->sb_idx = setup_q->rxq.vf_sb;
  950. /* rx init */
  951. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  952. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  953. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  954. &init_p->rx.flags);
  955. /* rx setup - flags */
  956. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  957. &setup_p->flags);
  958. /* rx setup - general */
  959. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  960. /* rx setup - rx */
  961. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  962. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  963. rxq_params->sge_map = setup_q->rxq.sge_addr;
  964. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  965. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  966. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  967. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  968. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  969. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  970. rxq_params->cache_line_log =
  971. setup_q->rxq.cache_line_log;
  972. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  973. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  974. q->index, q->sb_idx);
  975. }
  976. /* complete the preparations */
  977. bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
  978. vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
  979. if (vf->op_rc)
  980. goto response;
  981. return;
  982. }
  983. response:
  984. bnx2x_vf_mbx_resp(bp, vf);
  985. }
  986. enum bnx2x_vfop_filters_state {
  987. BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  988. BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
  989. BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
  990. BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
  991. BNX2X_VFOP_MBX_Q_FILTERS_DONE
  992. };
  993. static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
  994. struct bnx2x_virtf *vf,
  995. struct vfpf_set_q_filters_tlv *tlv,
  996. struct bnx2x_vfop_filters **pfl,
  997. u32 type_flag)
  998. {
  999. int i, j;
  1000. struct bnx2x_vfop_filters *fl = NULL;
  1001. size_t fsz;
  1002. fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
  1003. sizeof(struct bnx2x_vfop_filters);
  1004. fl = kzalloc(fsz, GFP_KERNEL);
  1005. if (!fl)
  1006. return -ENOMEM;
  1007. INIT_LIST_HEAD(&fl->head);
  1008. for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
  1009. struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
  1010. if ((msg_filter->flags & type_flag) != type_flag)
  1011. continue;
  1012. if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
  1013. fl->filters[j].mac = msg_filter->mac;
  1014. fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
  1015. } else {
  1016. fl->filters[j].vid = msg_filter->vlan_tag;
  1017. fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
  1018. }
  1019. fl->filters[j].add =
  1020. (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
  1021. true : false;
  1022. list_add_tail(&fl->filters[j++].link, &fl->head);
  1023. }
  1024. if (list_empty(&fl->head))
  1025. kfree(fl);
  1026. else
  1027. *pfl = fl;
  1028. return 0;
  1029. }
  1030. static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
  1031. struct vfpf_q_mac_vlan_filter *filter)
  1032. {
  1033. DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
  1034. if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
  1035. DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
  1036. if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
  1037. DP_CONT(msglvl, ", MAC=%pM", filter->mac);
  1038. DP_CONT(msglvl, "\n");
  1039. }
  1040. static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
  1041. struct vfpf_set_q_filters_tlv *filters)
  1042. {
  1043. int i;
  1044. if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
  1045. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1046. bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
  1047. &filters->filters[i]);
  1048. if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
  1049. DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
  1050. if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
  1051. for (i = 0; i < filters->n_multicast; i++)
  1052. DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
  1053. }
  1054. #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
  1055. #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
  1056. static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
  1057. {
  1058. int rc;
  1059. struct vfpf_set_q_filters_tlv *msg =
  1060. &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
  1061. struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
  1062. enum bnx2x_vfop_filters_state state = vfop->state;
  1063. struct bnx2x_vfop_cmd cmd = {
  1064. .done = bnx2x_vfop_mbx_qfilters,
  1065. .block = false,
  1066. };
  1067. DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
  1068. if (vfop->rc < 0)
  1069. goto op_err;
  1070. switch (state) {
  1071. case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
  1072. /* next state */
  1073. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
  1074. /* check for any vlan/mac changes */
  1075. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1076. /* build mac list */
  1077. struct bnx2x_vfop_filters *fl = NULL;
  1078. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1079. VFPF_MAC_FILTER);
  1080. if (vfop->rc)
  1081. goto op_err;
  1082. if (fl) {
  1083. /* set mac list */
  1084. rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
  1085. msg->vf_qid,
  1086. false);
  1087. if (rc) {
  1088. vfop->rc = rc;
  1089. goto op_err;
  1090. }
  1091. return;
  1092. }
  1093. }
  1094. /* fall through */
  1095. case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
  1096. /* next state */
  1097. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
  1098. /* check for any vlan/mac changes */
  1099. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1100. /* build vlan list */
  1101. struct bnx2x_vfop_filters *fl = NULL;
  1102. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1103. VFPF_VLAN_FILTER);
  1104. if (vfop->rc)
  1105. goto op_err;
  1106. if (fl) {
  1107. /* set vlan list */
  1108. rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
  1109. msg->vf_qid,
  1110. false);
  1111. if (rc) {
  1112. vfop->rc = rc;
  1113. goto op_err;
  1114. }
  1115. return;
  1116. }
  1117. }
  1118. /* fall through */
  1119. case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
  1120. /* next state */
  1121. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
  1122. if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  1123. unsigned long accept = 0;
  1124. /* covert VF-PF if mask to bnx2x accept flags */
  1125. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
  1126. __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
  1127. if (msg->rx_mask &
  1128. VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
  1129. __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
  1130. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
  1131. __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
  1132. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
  1133. __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
  1134. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
  1135. __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  1136. /* A packet arriving the vf's mac should be accepted
  1137. * with any vlan
  1138. */
  1139. __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  1140. /* set rx-mode */
  1141. rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
  1142. msg->vf_qid, accept);
  1143. if (rc) {
  1144. vfop->rc = rc;
  1145. goto op_err;
  1146. }
  1147. return;
  1148. }
  1149. /* fall through */
  1150. case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
  1151. /* next state */
  1152. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
  1153. if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
  1154. /* set mcasts */
  1155. rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
  1156. msg->n_multicast, false);
  1157. if (rc) {
  1158. vfop->rc = rc;
  1159. goto op_err;
  1160. }
  1161. return;
  1162. }
  1163. /* fall through */
  1164. op_done:
  1165. case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
  1166. bnx2x_vfop_end(bp, vf, vfop);
  1167. return;
  1168. op_err:
  1169. BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
  1170. vf->abs_vfid, msg->vf_qid, vfop->rc);
  1171. goto op_done;
  1172. default:
  1173. bnx2x_vfop_default(state);
  1174. }
  1175. }
  1176. static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
  1177. struct bnx2x_virtf *vf,
  1178. struct bnx2x_vfop_cmd *cmd)
  1179. {
  1180. struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
  1181. if (vfop) {
  1182. bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  1183. bnx2x_vfop_mbx_qfilters, cmd->done);
  1184. return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
  1185. cmd->block);
  1186. }
  1187. return -ENOMEM;
  1188. }
  1189. static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
  1190. struct bnx2x_virtf *vf,
  1191. struct bnx2x_vf_mbx *mbx)
  1192. {
  1193. struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
  1194. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1195. struct bnx2x_vfop_cmd cmd = {
  1196. .done = bnx2x_vf_mbx_resp,
  1197. .block = false,
  1198. };
  1199. /* if a mac was already set for this VF via the set vf mac ndo, we only
  1200. * accept mac configurations of that mac. Why accept them at all?
  1201. * because PF may have been unable to configure the mac at the time
  1202. * since queue was not set up.
  1203. */
  1204. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1205. /* once a mac was set by ndo can only accept a single mac... */
  1206. if (filters->n_mac_vlan_filters > 1) {
  1207. BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
  1208. vf->abs_vfid);
  1209. vf->op_rc = -EPERM;
  1210. goto response;
  1211. }
  1212. /* ...and only the mac set by the ndo */
  1213. if (filters->n_mac_vlan_filters == 1 &&
  1214. memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
  1215. BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  1216. vf->abs_vfid);
  1217. vf->op_rc = -EPERM;
  1218. goto response;
  1219. }
  1220. }
  1221. /* verify vf_qid */
  1222. if (filters->vf_qid > vf_rxq_count(vf))
  1223. goto response;
  1224. DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
  1225. vf->abs_vfid,
  1226. filters->vf_qid);
  1227. /* print q_filter message */
  1228. bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
  1229. vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
  1230. if (vf->op_rc)
  1231. goto response;
  1232. return;
  1233. response:
  1234. bnx2x_vf_mbx_resp(bp, vf);
  1235. }
  1236. static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1237. struct bnx2x_vf_mbx *mbx)
  1238. {
  1239. int qid = mbx->msg->req.q_op.vf_qid;
  1240. struct bnx2x_vfop_cmd cmd = {
  1241. .done = bnx2x_vf_mbx_resp,
  1242. .block = false,
  1243. };
  1244. DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
  1245. vf->abs_vfid, qid);
  1246. vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
  1247. if (vf->op_rc)
  1248. bnx2x_vf_mbx_resp(bp, vf);
  1249. }
  1250. static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1251. struct bnx2x_vf_mbx *mbx)
  1252. {
  1253. struct bnx2x_vfop_cmd cmd = {
  1254. .done = bnx2x_vf_mbx_resp,
  1255. .block = false,
  1256. };
  1257. DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
  1258. vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
  1259. if (vf->op_rc)
  1260. bnx2x_vf_mbx_resp(bp, vf);
  1261. }
  1262. static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1263. struct bnx2x_vf_mbx *mbx)
  1264. {
  1265. struct bnx2x_vfop_cmd cmd = {
  1266. .done = bnx2x_vf_mbx_resp,
  1267. .block = false,
  1268. };
  1269. DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
  1270. vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
  1271. if (vf->op_rc)
  1272. bnx2x_vf_mbx_resp(bp, vf);
  1273. }
  1274. /* dispatch request */
  1275. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1276. struct bnx2x_vf_mbx *mbx)
  1277. {
  1278. int i;
  1279. /* check if tlv type is known */
  1280. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  1281. /* Lock the per vf op mutex and note the locker's identity.
  1282. * The unlock will take place in mbx response.
  1283. */
  1284. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1285. /* switch on the opcode */
  1286. switch (mbx->first_tlv.tl.type) {
  1287. case CHANNEL_TLV_ACQUIRE:
  1288. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  1289. break;
  1290. case CHANNEL_TLV_INIT:
  1291. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  1292. break;
  1293. case CHANNEL_TLV_SETUP_Q:
  1294. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  1295. break;
  1296. case CHANNEL_TLV_SET_Q_FILTERS:
  1297. bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
  1298. break;
  1299. case CHANNEL_TLV_TEARDOWN_Q:
  1300. bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
  1301. break;
  1302. case CHANNEL_TLV_CLOSE:
  1303. bnx2x_vf_mbx_close_vf(bp, vf, mbx);
  1304. break;
  1305. case CHANNEL_TLV_RELEASE:
  1306. bnx2x_vf_mbx_release_vf(bp, vf, mbx);
  1307. break;
  1308. }
  1309. } else {
  1310. /* unknown TLV - this may belong to a VF driver from the future
  1311. * - a version written after this PF driver was written, which
  1312. * supports features unknown as of yet. Too bad since we don't
  1313. * support them. Or this may be because someone wrote a crappy
  1314. * VF driver and is sending garbage over the channel.
  1315. */
  1316. BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
  1317. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
  1318. vf->state);
  1319. for (i = 0; i < 20; i++)
  1320. DP_CONT(BNX2X_MSG_IOV, "%x ",
  1321. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  1322. /* test whether we can respond to the VF (do we have an address
  1323. * for it?)
  1324. */
  1325. if (vf->state == VF_ACQUIRED) {
  1326. /* mbx_resp uses the op_rc of the VF */
  1327. vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
  1328. /* notify the VF that we do not support this request */
  1329. bnx2x_vf_mbx_resp(bp, vf);
  1330. } else {
  1331. /* can't send a response since this VF is unknown to us
  1332. * just ack the FW to release the mailbox and unlock
  1333. * the channel.
  1334. */
  1335. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  1336. mmiowb();
  1337. bnx2x_unlock_vf_pf_channel(bp, vf,
  1338. mbx->first_tlv.tl.type);
  1339. }
  1340. }
  1341. }
  1342. /* handle new vf-pf message */
  1343. void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
  1344. {
  1345. struct bnx2x_virtf *vf;
  1346. struct bnx2x_vf_mbx *mbx;
  1347. u8 vf_idx;
  1348. int rc;
  1349. DP(BNX2X_MSG_IOV,
  1350. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  1351. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  1352. /* Sanity checks consider removing later */
  1353. /* check if the vf_id is valid */
  1354. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  1355. BNX2X_NR_VIRTFN(bp)) {
  1356. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  1357. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  1358. goto mbx_done;
  1359. }
  1360. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  1361. mbx = BP_VF_MBX(bp, vf_idx);
  1362. /* verify an event is not currently being processed -
  1363. * debug failsafe only
  1364. */
  1365. if (mbx->flags & VF_MSG_INPROCESS) {
  1366. BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
  1367. vfpf_event->vf_id);
  1368. goto mbx_done;
  1369. }
  1370. vf = BP_VF(bp, vf_idx);
  1371. /* save the VF message address */
  1372. mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
  1373. mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
  1374. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  1375. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  1376. /* dmae to get the VF request */
  1377. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
  1378. mbx->vf_addr_hi, mbx->vf_addr_lo,
  1379. sizeof(union vfpf_tlvs)/4);
  1380. if (rc) {
  1381. BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
  1382. goto mbx_error;
  1383. }
  1384. /* process the VF message header */
  1385. mbx->first_tlv = mbx->msg->req.first_tlv;
  1386. /* dispatch the request (will prepare the response) */
  1387. bnx2x_vf_mbx_request(bp, vf, mbx);
  1388. goto mbx_done;
  1389. mbx_error:
  1390. bnx2x_vf_release(bp, vf, false); /* non blocking */
  1391. mbx_done:
  1392. return;
  1393. }
  1394. /* propagate local bulletin board to vf */
  1395. int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
  1396. {
  1397. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
  1398. dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
  1399. vf * BULLETIN_CONTENT_SIZE;
  1400. dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
  1401. int rc;
  1402. /* can only update vf after init took place */
  1403. if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
  1404. bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
  1405. return 0;
  1406. /* increment bulletin board version and compute crc */
  1407. bulletin->version++;
  1408. bulletin->length = BULLETIN_CONTENT_SIZE;
  1409. bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
  1410. /* propagate bulletin board via dmae to vm memory */
  1411. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
  1412. bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
  1413. U64_LO(vf_addr), bulletin->length / 4);
  1414. return rc;
  1415. }