bnx2x_vfpf.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701
  1. /* bnx2x_vfpf.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2013 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. */
  19. #include "bnx2x.h"
  20. #include "bnx2x_cmn.h"
  21. #include <linux/crc32.h>
  22. /* place a given tlv on the tlv buffer at a given offset */
  23. void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
  24. u16 length)
  25. {
  26. struct channel_tlv *tl =
  27. (struct channel_tlv *)(tlvs_list + offset);
  28. tl->type = type;
  29. tl->length = length;
  30. }
  31. /* Clear the mailbox and init the header of the first tlv */
  32. void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  33. u16 type, u16 length)
  34. {
  35. mutex_lock(&bp->vf2pf_mutex);
  36. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  37. type);
  38. /* Clear mailbox */
  39. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  40. /* init type and length */
  41. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  42. /* init first tlv header */
  43. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  44. }
  45. /* releases the mailbox */
  46. void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
  47. {
  48. DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
  49. first_tlv->tl.type);
  50. mutex_unlock(&bp->vf2pf_mutex);
  51. }
  52. /* list the types and lengths of the tlvs on the buffer */
  53. void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  54. {
  55. int i = 1;
  56. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  57. while (tlv->type != CHANNEL_TLV_LIST_END) {
  58. /* output tlv */
  59. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  60. tlv->type, tlv->length);
  61. /* advance to next tlv */
  62. tlvs_list += tlv->length;
  63. /* cast general tlv list pointer to channel tlv header*/
  64. tlv = (struct channel_tlv *)tlvs_list;
  65. i++;
  66. /* break condition for this loop */
  67. if (i > MAX_TLVS_IN_LIST) {
  68. WARN(true, "corrupt tlvs");
  69. return;
  70. }
  71. }
  72. /* output last tlv */
  73. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  74. tlv->type, tlv->length);
  75. }
  76. /* test whether we support a tlv type */
  77. bool bnx2x_tlv_supported(u16 tlvtype)
  78. {
  79. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  80. }
  81. static inline int bnx2x_pfvf_status_codes(int rc)
  82. {
  83. switch (rc) {
  84. case 0:
  85. return PFVF_STATUS_SUCCESS;
  86. case -ENOMEM:
  87. return PFVF_STATUS_NO_RESOURCE;
  88. default:
  89. return PFVF_STATUS_FAILURE;
  90. }
  91. }
  92. static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
  93. {
  94. struct cstorm_vf_zone_data __iomem *zone_data =
  95. REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
  96. int tout = 600, interval = 100; /* wait for 60 seconds */
  97. if (*done) {
  98. BNX2X_ERR("done was non zero before message to pf was sent\n");
  99. WARN_ON(true);
  100. return -EINVAL;
  101. }
  102. /* Write message address */
  103. writel(U64_LO(msg_mapping),
  104. &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
  105. writel(U64_HI(msg_mapping),
  106. &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
  107. /* make sure the address is written before FW accesses it */
  108. wmb();
  109. /* Trigger the PF FW */
  110. writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
  111. /* Wait for PF to complete */
  112. while ((tout >= 0) && (!*done)) {
  113. msleep(interval);
  114. tout -= 1;
  115. /* progress indicator - HV can take its own sweet time in
  116. * answering VFs...
  117. */
  118. DP_CONT(BNX2X_MSG_IOV, ".");
  119. }
  120. if (!*done) {
  121. BNX2X_ERR("PF response has timed out\n");
  122. return -EAGAIN;
  123. }
  124. DP(BNX2X_MSG_SP, "Got a response from PF\n");
  125. return 0;
  126. }
  127. static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
  128. {
  129. u32 me_reg;
  130. int tout = 10, interval = 100; /* Wait for 1 sec */
  131. do {
  132. /* pxp traps vf read of doorbells and returns me reg value */
  133. me_reg = readl(bp->doorbells);
  134. if (GOOD_ME_REG(me_reg))
  135. break;
  136. msleep(interval);
  137. BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
  138. me_reg);
  139. } while (tout-- > 0);
  140. if (!GOOD_ME_REG(me_reg)) {
  141. BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
  142. return -EINVAL;
  143. }
  144. BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
  145. *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  146. return 0;
  147. }
  148. int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
  149. {
  150. int rc = 0, attempts = 0;
  151. struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
  152. struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
  153. u32 vf_id;
  154. bool resources_acquired = false;
  155. /* clear mailbox and prep first tlv */
  156. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  157. if (bnx2x_get_vf_id(bp, &vf_id)) {
  158. rc = -EAGAIN;
  159. goto out;
  160. }
  161. req->vfdev_info.vf_id = vf_id;
  162. req->vfdev_info.vf_os = 0;
  163. req->resc_request.num_rxqs = rx_count;
  164. req->resc_request.num_txqs = tx_count;
  165. req->resc_request.num_sbs = bp->igu_sb_cnt;
  166. req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
  167. req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
  168. /* pf 2 vf bulletin board address */
  169. req->bulletin_addr = bp->pf2vf_bulletin_mapping;
  170. /* add list termination tlv */
  171. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  172. sizeof(struct channel_list_end_tlv));
  173. /* output tlvs list */
  174. bnx2x_dp_tlv_list(bp, req);
  175. while (!resources_acquired) {
  176. DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
  177. /* send acquire request */
  178. rc = bnx2x_send_msg2pf(bp,
  179. &resp->hdr.status,
  180. bp->vf2pf_mbox_mapping);
  181. /* PF timeout */
  182. if (rc)
  183. goto out;
  184. /* copy acquire response from buffer to bp */
  185. memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
  186. attempts++;
  187. /* test whether the PF accepted our request. If not, humble the
  188. * the request and try again.
  189. */
  190. if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
  191. DP(BNX2X_MSG_SP, "resources acquired\n");
  192. resources_acquired = true;
  193. } else if (bp->acquire_resp.hdr.status ==
  194. PFVF_STATUS_NO_RESOURCE &&
  195. attempts < VF_ACQUIRE_THRESH) {
  196. DP(BNX2X_MSG_SP,
  197. "PF unwilling to fulfill resource request. Try PF recommended amount\n");
  198. /* humble our request */
  199. req->resc_request.num_txqs =
  200. bp->acquire_resp.resc.num_txqs;
  201. req->resc_request.num_rxqs =
  202. bp->acquire_resp.resc.num_rxqs;
  203. req->resc_request.num_sbs =
  204. bp->acquire_resp.resc.num_sbs;
  205. req->resc_request.num_mac_filters =
  206. bp->acquire_resp.resc.num_mac_filters;
  207. req->resc_request.num_vlan_filters =
  208. bp->acquire_resp.resc.num_vlan_filters;
  209. req->resc_request.num_mc_filters =
  210. bp->acquire_resp.resc.num_mc_filters;
  211. /* Clear response buffer */
  212. memset(&bp->vf2pf_mbox->resp, 0,
  213. sizeof(union pfvf_tlvs));
  214. } else {
  215. /* PF reports error */
  216. BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
  217. bp->acquire_resp.hdr.status);
  218. rc = -EAGAIN;
  219. goto out;
  220. }
  221. }
  222. /* get HW info */
  223. bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
  224. bp->link_params.chip_id = bp->common.chip_id;
  225. bp->db_size = bp->acquire_resp.pfdev_info.db_size;
  226. bp->common.int_block = INT_BLOCK_IGU;
  227. bp->common.chip_port_mode = CHIP_2_PORT_MODE;
  228. bp->igu_dsb_id = -1;
  229. bp->mf_ov = 0;
  230. bp->mf_mode = 0;
  231. bp->common.flash_size = 0;
  232. bp->flags |=
  233. NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
  234. bp->igu_sb_cnt = 1;
  235. bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
  236. strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
  237. sizeof(bp->fw_ver));
  238. if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
  239. memcpy(bp->dev->dev_addr,
  240. bp->acquire_resp.resc.current_mac_addr,
  241. ETH_ALEN);
  242. out:
  243. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  244. return rc;
  245. }
  246. int bnx2x_vfpf_release(struct bnx2x *bp)
  247. {
  248. struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
  249. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  250. u32 rc, vf_id;
  251. /* clear mailbox and prep first tlv */
  252. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
  253. if (bnx2x_get_vf_id(bp, &vf_id)) {
  254. rc = -EAGAIN;
  255. goto out;
  256. }
  257. req->vf_id = vf_id;
  258. /* add list termination tlv */
  259. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  260. sizeof(struct channel_list_end_tlv));
  261. /* output tlvs list */
  262. bnx2x_dp_tlv_list(bp, req);
  263. /* send release request */
  264. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  265. if (rc)
  266. /* PF timeout */
  267. goto out;
  268. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  269. /* PF released us */
  270. DP(BNX2X_MSG_SP, "vf released\n");
  271. } else {
  272. /* PF reports error */
  273. BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
  274. resp->hdr.status);
  275. rc = -EAGAIN;
  276. goto out;
  277. }
  278. out:
  279. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  280. return rc;
  281. }
  282. /* Tell PF about SB addresses */
  283. int bnx2x_vfpf_init(struct bnx2x *bp)
  284. {
  285. struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
  286. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  287. int rc, i;
  288. /* clear mailbox and prep first tlv */
  289. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
  290. /* status blocks */
  291. for_each_eth_queue(bp, i)
  292. req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
  293. status_blk_mapping);
  294. /* statistics - requests only supports single queue for now */
  295. req->stats_addr = bp->fw_stats_data_mapping +
  296. offsetof(struct bnx2x_fw_stats_data, queue_stats);
  297. /* add list termination tlv */
  298. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  299. sizeof(struct channel_list_end_tlv));
  300. /* output tlvs list */
  301. bnx2x_dp_tlv_list(bp, req);
  302. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  303. if (rc)
  304. goto out;
  305. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  306. BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
  307. resp->hdr.status);
  308. rc = -EAGAIN;
  309. goto out;
  310. }
  311. DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
  312. out:
  313. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  314. return rc;
  315. }
  316. /* CLOSE VF - opposite to INIT_VF */
  317. void bnx2x_vfpf_close_vf(struct bnx2x *bp)
  318. {
  319. struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
  320. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  321. int i, rc;
  322. u32 vf_id;
  323. /* If we haven't got a valid VF id, there is no sense to
  324. * continue with sending messages
  325. */
  326. if (bnx2x_get_vf_id(bp, &vf_id))
  327. goto free_irq;
  328. /* Close the queues */
  329. for_each_queue(bp, i)
  330. bnx2x_vfpf_teardown_queue(bp, i);
  331. /* remove mac */
  332. bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
  333. /* clear mailbox and prep first tlv */
  334. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
  335. req->vf_id = vf_id;
  336. /* add list termination tlv */
  337. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  338. sizeof(struct channel_list_end_tlv));
  339. /* output tlvs list */
  340. bnx2x_dp_tlv_list(bp, req);
  341. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  342. if (rc)
  343. BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
  344. else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  345. BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
  346. resp->hdr.status);
  347. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  348. free_irq:
  349. /* Disable HW interrupts, NAPI */
  350. bnx2x_netif_stop(bp, 0);
  351. /* Delete all NAPI objects */
  352. bnx2x_del_all_napi(bp);
  353. /* Release IRQs */
  354. bnx2x_free_irq(bp);
  355. }
  356. /* ask the pf to open a queue for the vf */
  357. int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
  358. {
  359. struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
  360. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  361. struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
  362. u16 tpa_agg_size = 0, flags = 0;
  363. int rc;
  364. /* clear mailbox and prep first tlv */
  365. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
  366. /* select tpa mode to request */
  367. if (!fp->disable_tpa) {
  368. flags |= VFPF_QUEUE_FLG_TPA;
  369. flags |= VFPF_QUEUE_FLG_TPA_IPV6;
  370. if (fp->mode == TPA_MODE_GRO)
  371. flags |= VFPF_QUEUE_FLG_TPA_GRO;
  372. tpa_agg_size = TPA_AGG_SIZE;
  373. }
  374. /* calculate queue flags */
  375. flags |= VFPF_QUEUE_FLG_STATS;
  376. flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
  377. flags |= VFPF_QUEUE_FLG_VLAN;
  378. DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
  379. /* Common */
  380. req->vf_qid = fp_idx;
  381. req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
  382. /* Rx */
  383. req->rxq.rcq_addr = fp->rx_comp_mapping;
  384. req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  385. req->rxq.rxq_addr = fp->rx_desc_mapping;
  386. req->rxq.sge_addr = fp->rx_sge_mapping;
  387. req->rxq.vf_sb = fp_idx;
  388. req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
  389. req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
  390. req->rxq.mtu = bp->dev->mtu;
  391. req->rxq.buf_sz = fp->rx_buf_size;
  392. req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
  393. req->rxq.tpa_agg_sz = tpa_agg_size;
  394. req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
  395. req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
  396. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  397. req->rxq.flags = flags;
  398. req->rxq.drop_flags = 0;
  399. req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  400. req->rxq.stat_id = -1; /* No stats at the moment */
  401. /* Tx */
  402. req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
  403. req->txq.vf_sb = fp_idx;
  404. req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
  405. req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
  406. req->txq.flags = flags;
  407. req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
  408. /* add list termination tlv */
  409. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  410. sizeof(struct channel_list_end_tlv));
  411. /* output tlvs list */
  412. bnx2x_dp_tlv_list(bp, req);
  413. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  414. if (rc)
  415. BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
  416. fp_idx);
  417. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  418. BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
  419. fp_idx, resp->hdr.status);
  420. rc = -EINVAL;
  421. }
  422. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  423. return rc;
  424. }
  425. int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
  426. {
  427. struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
  428. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  429. int rc;
  430. /* clear mailbox and prep first tlv */
  431. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
  432. sizeof(*req));
  433. req->vf_qid = qidx;
  434. /* add list termination tlv */
  435. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  436. sizeof(struct channel_list_end_tlv));
  437. /* output tlvs list */
  438. bnx2x_dp_tlv_list(bp, req);
  439. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  440. if (rc) {
  441. BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
  442. rc);
  443. goto out;
  444. }
  445. /* PF failed the transaction */
  446. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  447. BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
  448. resp->hdr.status);
  449. rc = -EINVAL;
  450. }
  451. out:
  452. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  453. return rc;
  454. }
  455. /* request pf to add a mac for the vf */
  456. int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
  457. {
  458. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  459. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  460. struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
  461. int rc = 0;
  462. /* clear mailbox and prep first tlv */
  463. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  464. sizeof(*req));
  465. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  466. req->vf_qid = vf_qid;
  467. req->n_mac_vlan_filters = 1;
  468. req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
  469. if (set)
  470. req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
  471. /* sample bulletin board for new mac */
  472. bnx2x_sample_bulletin(bp);
  473. /* copy mac from device to request */
  474. memcpy(req->filters[0].mac, addr, ETH_ALEN);
  475. /* add list termination tlv */
  476. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  477. sizeof(struct channel_list_end_tlv));
  478. /* output tlvs list */
  479. bnx2x_dp_tlv_list(bp, req);
  480. /* send message to pf */
  481. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  482. if (rc) {
  483. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  484. goto out;
  485. }
  486. /* failure may mean PF was configured with a new mac for us */
  487. while (resp->hdr.status == PFVF_STATUS_FAILURE) {
  488. DP(BNX2X_MSG_IOV,
  489. "vfpf SET MAC failed. Check bulletin board for new posts\n");
  490. /* copy mac from bulletin to device */
  491. memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
  492. /* check if bulletin board was updated */
  493. if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
  494. /* copy mac from device to request */
  495. memcpy(req->filters[0].mac, bp->dev->dev_addr,
  496. ETH_ALEN);
  497. /* send message to pf */
  498. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
  499. bp->vf2pf_mbox_mapping);
  500. } else {
  501. /* no new info in bulletin */
  502. break;
  503. }
  504. }
  505. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  506. BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
  507. rc = -EINVAL;
  508. }
  509. out:
  510. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  511. return 0;
  512. }
  513. int bnx2x_vfpf_set_mcast(struct net_device *dev)
  514. {
  515. struct bnx2x *bp = netdev_priv(dev);
  516. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  517. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  518. int rc, i = 0;
  519. struct netdev_hw_addr *ha;
  520. if (bp->state != BNX2X_STATE_OPEN) {
  521. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  522. return -EINVAL;
  523. }
  524. /* clear mailbox and prep first tlv */
  525. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  526. sizeof(*req));
  527. /* Get Rx mode requested */
  528. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  529. netdev_for_each_mc_addr(ha, dev) {
  530. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  531. bnx2x_mc_addr(ha));
  532. memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
  533. i++;
  534. }
  535. /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
  536. * addresses tops
  537. */
  538. if (i >= PFVF_MAX_MULTICAST_PER_VF) {
  539. DP(NETIF_MSG_IFUP,
  540. "VF supports not more than %d multicast MAC addresses\n",
  541. PFVF_MAX_MULTICAST_PER_VF);
  542. return -EINVAL;
  543. }
  544. req->n_multicast = i;
  545. req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
  546. req->vf_qid = 0;
  547. /* add list termination tlv */
  548. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  549. sizeof(struct channel_list_end_tlv));
  550. /* output tlvs list */
  551. bnx2x_dp_tlv_list(bp, req);
  552. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  553. if (rc) {
  554. BNX2X_ERR("Sending a message failed: %d\n", rc);
  555. goto out;
  556. }
  557. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  558. BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
  559. resp->hdr.status);
  560. rc = -EINVAL;
  561. }
  562. out:
  563. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  564. return 0;
  565. }
  566. int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
  567. {
  568. int mode = bp->rx_mode;
  569. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  570. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  571. int rc;
  572. /* clear mailbox and prep first tlv */
  573. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  574. sizeof(*req));
  575. DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
  576. switch (mode) {
  577. case BNX2X_RX_MODE_NONE: /* no Rx */
  578. req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
  579. break;
  580. case BNX2X_RX_MODE_NORMAL:
  581. req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
  582. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  583. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  584. break;
  585. case BNX2X_RX_MODE_ALLMULTI:
  586. req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
  587. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  588. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  589. break;
  590. case BNX2X_RX_MODE_PROMISC:
  591. req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
  592. req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
  593. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  594. break;
  595. default:
  596. BNX2X_ERR("BAD rx mode (%d)\n", mode);
  597. rc = -EINVAL;
  598. goto out;
  599. }
  600. req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
  601. req->vf_qid = 0;
  602. /* add list termination tlv */
  603. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  604. sizeof(struct channel_list_end_tlv));
  605. /* output tlvs list */
  606. bnx2x_dp_tlv_list(bp, req);
  607. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  608. if (rc)
  609. BNX2X_ERR("Sending a message failed: %d\n", rc);
  610. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  611. BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
  612. rc = -EINVAL;
  613. }
  614. out:
  615. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  616. return rc;
  617. }
  618. /* General service functions */
  619. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  620. {
  621. u32 addr = BAR_CSTRORM_INTMEM +
  622. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  623. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  624. }
  625. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  626. {
  627. u32 addr = BAR_CSTRORM_INTMEM +
  628. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  629. REG_WR8(bp, addr, 1);
  630. }
  631. static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
  632. {
  633. int i;
  634. for_each_vf(bp, i)
  635. storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
  636. }
  637. /* enable vf_pf mailbox (aka vf-pf-chanell) */
  638. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  639. {
  640. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  641. /* enable the mailbox in the FW */
  642. storm_memset_vf_mbx_ack(bp, abs_vfid);
  643. storm_memset_vf_mbx_valid(bp, abs_vfid);
  644. /* enable the VF access to the mailbox */
  645. bnx2x_vf_enable_access(bp, abs_vfid);
  646. }
  647. /* this works only on !E1h */
  648. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  649. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  650. u32 vf_addr_lo, u32 len32)
  651. {
  652. struct dmae_command dmae;
  653. if (CHIP_IS_E1x(bp)) {
  654. BNX2X_ERR("Chip revision does not support VFs\n");
  655. return DMAE_NOT_RDY;
  656. }
  657. if (!bp->dmae_ready) {
  658. BNX2X_ERR("DMAE is not ready, can not copy\n");
  659. return DMAE_NOT_RDY;
  660. }
  661. /* set opcode and fixed command fields */
  662. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  663. if (from_vf) {
  664. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  665. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  666. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  667. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  668. dmae.src_addr_lo = vf_addr_lo;
  669. dmae.src_addr_hi = vf_addr_hi;
  670. dmae.dst_addr_lo = U64_LO(pf_addr);
  671. dmae.dst_addr_hi = U64_HI(pf_addr);
  672. } else {
  673. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  674. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  675. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  676. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  677. dmae.src_addr_lo = U64_LO(pf_addr);
  678. dmae.src_addr_hi = U64_HI(pf_addr);
  679. dmae.dst_addr_lo = vf_addr_lo;
  680. dmae.dst_addr_hi = vf_addr_hi;
  681. }
  682. dmae.len = len32;
  683. bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
  684. /* issue the command and wait for completion */
  685. return bnx2x_issue_dmae_with_comp(bp, &dmae);
  686. }
  687. static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
  688. {
  689. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  690. u64 vf_addr;
  691. dma_addr_t pf_addr;
  692. u16 length, type;
  693. int rc;
  694. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  695. /* prepare response */
  696. type = mbx->first_tlv.tl.type;
  697. length = type == CHANNEL_TLV_ACQUIRE ?
  698. sizeof(struct pfvf_acquire_resp_tlv) :
  699. sizeof(struct pfvf_general_resp_tlv);
  700. bnx2x_add_tlv(bp, resp, 0, type, length);
  701. resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
  702. bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
  703. sizeof(struct channel_list_end_tlv));
  704. bnx2x_dp_tlv_list(bp, resp);
  705. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  706. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  707. /* send response */
  708. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  709. mbx->first_tlv.resp_msg_offset;
  710. pf_addr = mbx->msg_mapping +
  711. offsetof(struct bnx2x_vf_mbx_msg, resp);
  712. /* copy the response body, if there is one, before the header, as the vf
  713. * is sensitive to the header being written
  714. */
  715. if (resp->hdr.tl.length > sizeof(u64)) {
  716. length = resp->hdr.tl.length - sizeof(u64);
  717. vf_addr += sizeof(u64);
  718. pf_addr += sizeof(u64);
  719. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  720. U64_HI(vf_addr),
  721. U64_LO(vf_addr),
  722. length/4);
  723. if (rc) {
  724. BNX2X_ERR("Failed to copy response body to VF %d\n",
  725. vf->abs_vfid);
  726. goto mbx_error;
  727. }
  728. vf_addr -= sizeof(u64);
  729. pf_addr -= sizeof(u64);
  730. }
  731. /* ack the FW */
  732. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  733. mmiowb();
  734. /* initiate dmae to send the response */
  735. mbx->flags &= ~VF_MSG_INPROCESS;
  736. /* copy the response header including status-done field,
  737. * must be last dmae, must be after FW is acked
  738. */
  739. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  740. U64_HI(vf_addr),
  741. U64_LO(vf_addr),
  742. sizeof(u64)/4);
  743. /* unlock channel mutex */
  744. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  745. if (rc) {
  746. BNX2X_ERR("Failed to copy response status to VF %d\n",
  747. vf->abs_vfid);
  748. goto mbx_error;
  749. }
  750. return;
  751. mbx_error:
  752. bnx2x_vf_release(bp, vf, false); /* non blocking */
  753. }
  754. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  755. struct bnx2x_vf_mbx *mbx, int vfop_status)
  756. {
  757. int i;
  758. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  759. struct pf_vf_resc *resc = &resp->resc;
  760. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  761. memset(resp, 0, sizeof(*resp));
  762. /* fill in pfdev info */
  763. resp->pfdev_info.chip_num = bp->common.chip_id;
  764. resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
  765. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  766. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  767. /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
  768. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  769. sizeof(resp->pfdev_info.fw_ver));
  770. if (status == PFVF_STATUS_NO_RESOURCE ||
  771. status == PFVF_STATUS_SUCCESS) {
  772. /* set resources numbers, if status equals NO_RESOURCE these
  773. * are max possible numbers
  774. */
  775. resc->num_rxqs = vf_rxq_count(vf) ? :
  776. bnx2x_vf_max_queue_cnt(bp, vf);
  777. resc->num_txqs = vf_txq_count(vf) ? :
  778. bnx2x_vf_max_queue_cnt(bp, vf);
  779. resc->num_sbs = vf_sb_count(vf);
  780. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  781. resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
  782. resc->num_mc_filters = 0;
  783. if (status == PFVF_STATUS_SUCCESS) {
  784. /* fill in the allocated resources */
  785. struct pf_vf_bulletin_content *bulletin =
  786. BP_VF_BULLETIN(bp, vf->index);
  787. for_each_vfq(vf, i)
  788. resc->hw_qid[i] =
  789. vfq_qzone_id(vf, vfq_get(vf, i));
  790. for_each_vf_sb(vf, i) {
  791. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  792. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  793. }
  794. /* if a mac has been set for this vf, supply it */
  795. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  796. memcpy(resc->current_mac_addr, bulletin->mac,
  797. ETH_ALEN);
  798. }
  799. }
  800. }
  801. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  802. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  803. vf->abs_vfid,
  804. resp->pfdev_info.chip_num,
  805. resp->pfdev_info.db_size,
  806. resp->pfdev_info.indices_per_sb,
  807. resp->pfdev_info.pf_cap,
  808. resc->num_rxqs,
  809. resc->num_txqs,
  810. resc->num_sbs,
  811. resc->num_mac_filters,
  812. resc->num_vlan_filters,
  813. resc->num_mc_filters,
  814. resp->pfdev_info.fw_ver);
  815. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  816. for (i = 0; i < vf_rxq_count(vf); i++)
  817. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  818. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  819. for (i = 0; i < vf_sb_count(vf); i++)
  820. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  821. resc->hw_sbs[i].hw_sb_id,
  822. resc->hw_sbs[i].sb_qid);
  823. DP_CONT(BNX2X_MSG_IOV, "]\n");
  824. /* send the response */
  825. vf->op_rc = vfop_status;
  826. bnx2x_vf_mbx_resp(bp, vf);
  827. }
  828. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  829. struct bnx2x_vf_mbx *mbx)
  830. {
  831. int rc;
  832. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  833. /* log vfdef info */
  834. DP(BNX2X_MSG_IOV,
  835. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  836. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  837. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  838. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  839. acquire->resc_request.num_vlan_filters,
  840. acquire->resc_request.num_mc_filters);
  841. /* acquire the resources */
  842. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  843. /* store address of vf's bulletin board */
  844. vf->bulletin_map = acquire->bulletin_addr;
  845. /* response */
  846. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  847. }
  848. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  849. struct bnx2x_vf_mbx *mbx)
  850. {
  851. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  852. /* record ghost addresses from vf message */
  853. vf->spq_map = init->spq_addr;
  854. vf->fw_stat_map = init->stats_addr;
  855. vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  856. /* response */
  857. bnx2x_vf_mbx_resp(bp, vf);
  858. }
  859. /* convert MBX queue-flags to standard SP queue-flags */
  860. static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
  861. unsigned long *sp_q_flags)
  862. {
  863. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  864. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  865. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  866. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  867. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  868. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  869. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  870. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  871. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  872. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  873. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  874. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  875. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  876. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  877. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  878. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  879. /* outer vlan removal is set according to the PF's multi fuction mode */
  880. if (IS_MF_SD(bp))
  881. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  882. }
  883. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  884. struct bnx2x_vf_mbx *mbx)
  885. {
  886. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  887. struct bnx2x_vfop_cmd cmd = {
  888. .done = bnx2x_vf_mbx_resp,
  889. .block = false,
  890. };
  891. /* verify vf_qid */
  892. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  893. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  894. setup_q->vf_qid, vf_rxq_count(vf));
  895. vf->op_rc = -EINVAL;
  896. goto response;
  897. }
  898. /* tx queues must be setup alongside rx queues thus if the rx queue
  899. * is not marked as valid there's nothing to do.
  900. */
  901. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  902. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  903. unsigned long q_type = 0;
  904. struct bnx2x_queue_init_params *init_p;
  905. struct bnx2x_queue_setup_params *setup_p;
  906. /* reinit the VF operation context */
  907. memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
  908. setup_p = &vf->op_params.qctor.prep_qsetup;
  909. init_p = &vf->op_params.qctor.qstate.params.init;
  910. /* activate immediately */
  911. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  912. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  913. struct bnx2x_txq_setup_params *txq_params =
  914. &setup_p->txq_params;
  915. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  916. /* save sb resource index */
  917. q->sb_idx = setup_q->txq.vf_sb;
  918. /* tx init */
  919. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  920. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  921. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  922. &init_p->tx.flags);
  923. /* tx setup - flags */
  924. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  925. &setup_p->flags);
  926. /* tx setup - general, nothing */
  927. /* tx setup - tx */
  928. txq_params->dscr_map = setup_q->txq.txq_addr;
  929. txq_params->sb_cq_index = setup_q->txq.sb_index;
  930. txq_params->traffic_type = setup_q->txq.traffic_type;
  931. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  932. q->index, q->sb_idx);
  933. }
  934. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  935. struct bnx2x_rxq_setup_params *rxq_params =
  936. &setup_p->rxq_params;
  937. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  938. /* Note: there is no support for different SBs
  939. * for TX and RX
  940. */
  941. q->sb_idx = setup_q->rxq.vf_sb;
  942. /* rx init */
  943. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  944. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  945. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  946. &init_p->rx.flags);
  947. /* rx setup - flags */
  948. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  949. &setup_p->flags);
  950. /* rx setup - general */
  951. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  952. /* rx setup - rx */
  953. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  954. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  955. rxq_params->sge_map = setup_q->rxq.sge_addr;
  956. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  957. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  958. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  959. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  960. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  961. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  962. rxq_params->cache_line_log =
  963. setup_q->rxq.cache_line_log;
  964. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  965. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  966. q->index, q->sb_idx);
  967. }
  968. /* complete the preparations */
  969. bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
  970. vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
  971. if (vf->op_rc)
  972. goto response;
  973. return;
  974. }
  975. response:
  976. bnx2x_vf_mbx_resp(bp, vf);
  977. }
  978. enum bnx2x_vfop_filters_state {
  979. BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  980. BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
  981. BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
  982. BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
  983. BNX2X_VFOP_MBX_Q_FILTERS_DONE
  984. };
  985. static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
  986. struct bnx2x_virtf *vf,
  987. struct vfpf_set_q_filters_tlv *tlv,
  988. struct bnx2x_vfop_filters **pfl,
  989. u32 type_flag)
  990. {
  991. int i, j;
  992. struct bnx2x_vfop_filters *fl = NULL;
  993. size_t fsz;
  994. fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
  995. sizeof(struct bnx2x_vfop_filters);
  996. fl = kzalloc(fsz, GFP_KERNEL);
  997. if (!fl)
  998. return -ENOMEM;
  999. INIT_LIST_HEAD(&fl->head);
  1000. for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
  1001. struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
  1002. if ((msg_filter->flags & type_flag) != type_flag)
  1003. continue;
  1004. if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
  1005. fl->filters[j].mac = msg_filter->mac;
  1006. fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
  1007. } else {
  1008. fl->filters[j].vid = msg_filter->vlan_tag;
  1009. fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
  1010. }
  1011. fl->filters[j].add =
  1012. (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
  1013. true : false;
  1014. list_add_tail(&fl->filters[j++].link, &fl->head);
  1015. }
  1016. if (list_empty(&fl->head))
  1017. kfree(fl);
  1018. else
  1019. *pfl = fl;
  1020. return 0;
  1021. }
  1022. static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
  1023. struct vfpf_q_mac_vlan_filter *filter)
  1024. {
  1025. DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
  1026. if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
  1027. DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
  1028. if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
  1029. DP_CONT(msglvl, ", MAC=%pM", filter->mac);
  1030. DP_CONT(msglvl, "\n");
  1031. }
  1032. static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
  1033. struct vfpf_set_q_filters_tlv *filters)
  1034. {
  1035. int i;
  1036. if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
  1037. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1038. bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
  1039. &filters->filters[i]);
  1040. if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
  1041. DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
  1042. if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
  1043. for (i = 0; i < filters->n_multicast; i++)
  1044. DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
  1045. }
  1046. #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
  1047. #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
  1048. static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
  1049. {
  1050. int rc;
  1051. struct vfpf_set_q_filters_tlv *msg =
  1052. &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
  1053. struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
  1054. enum bnx2x_vfop_filters_state state = vfop->state;
  1055. struct bnx2x_vfop_cmd cmd = {
  1056. .done = bnx2x_vfop_mbx_qfilters,
  1057. .block = false,
  1058. };
  1059. DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
  1060. if (vfop->rc < 0)
  1061. goto op_err;
  1062. switch (state) {
  1063. case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
  1064. /* next state */
  1065. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
  1066. /* check for any vlan/mac changes */
  1067. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1068. /* build mac list */
  1069. struct bnx2x_vfop_filters *fl = NULL;
  1070. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1071. VFPF_MAC_FILTER);
  1072. if (vfop->rc)
  1073. goto op_err;
  1074. if (fl) {
  1075. /* set mac list */
  1076. rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
  1077. msg->vf_qid,
  1078. false);
  1079. if (rc) {
  1080. vfop->rc = rc;
  1081. goto op_err;
  1082. }
  1083. return;
  1084. }
  1085. }
  1086. /* fall through */
  1087. case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
  1088. /* next state */
  1089. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
  1090. /* check for any vlan/mac changes */
  1091. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1092. /* build vlan list */
  1093. struct bnx2x_vfop_filters *fl = NULL;
  1094. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1095. VFPF_VLAN_FILTER);
  1096. if (vfop->rc)
  1097. goto op_err;
  1098. if (fl) {
  1099. /* set vlan list */
  1100. rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
  1101. msg->vf_qid,
  1102. false);
  1103. if (rc) {
  1104. vfop->rc = rc;
  1105. goto op_err;
  1106. }
  1107. return;
  1108. }
  1109. }
  1110. /* fall through */
  1111. case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
  1112. /* next state */
  1113. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
  1114. if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  1115. unsigned long accept = 0;
  1116. /* covert VF-PF if mask to bnx2x accept flags */
  1117. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
  1118. __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
  1119. if (msg->rx_mask &
  1120. VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
  1121. __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
  1122. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
  1123. __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
  1124. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
  1125. __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
  1126. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
  1127. __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  1128. /* A packet arriving the vf's mac should be accepted
  1129. * with any vlan
  1130. */
  1131. __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  1132. /* set rx-mode */
  1133. rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
  1134. msg->vf_qid, accept);
  1135. if (rc) {
  1136. vfop->rc = rc;
  1137. goto op_err;
  1138. }
  1139. return;
  1140. }
  1141. /* fall through */
  1142. case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
  1143. /* next state */
  1144. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
  1145. if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
  1146. /* set mcasts */
  1147. rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
  1148. msg->n_multicast, false);
  1149. if (rc) {
  1150. vfop->rc = rc;
  1151. goto op_err;
  1152. }
  1153. return;
  1154. }
  1155. /* fall through */
  1156. op_done:
  1157. case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
  1158. bnx2x_vfop_end(bp, vf, vfop);
  1159. return;
  1160. op_err:
  1161. BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
  1162. vf->abs_vfid, msg->vf_qid, vfop->rc);
  1163. goto op_done;
  1164. default:
  1165. bnx2x_vfop_default(state);
  1166. }
  1167. }
  1168. static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
  1169. struct bnx2x_virtf *vf,
  1170. struct bnx2x_vfop_cmd *cmd)
  1171. {
  1172. struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
  1173. if (vfop) {
  1174. bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  1175. bnx2x_vfop_mbx_qfilters, cmd->done);
  1176. return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
  1177. cmd->block);
  1178. }
  1179. return -ENOMEM;
  1180. }
  1181. static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
  1182. struct bnx2x_virtf *vf,
  1183. struct bnx2x_vf_mbx *mbx)
  1184. {
  1185. struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
  1186. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1187. struct bnx2x_vfop_cmd cmd = {
  1188. .done = bnx2x_vf_mbx_resp,
  1189. .block = false,
  1190. };
  1191. /* if a mac was already set for this VF via the set vf mac ndo, we only
  1192. * accept mac configurations of that mac. Why accept them at all?
  1193. * because PF may have been unable to configure the mac at the time
  1194. * since queue was not set up.
  1195. */
  1196. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1197. /* once a mac was set by ndo can only accept a single mac... */
  1198. if (filters->n_mac_vlan_filters > 1) {
  1199. BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
  1200. vf->abs_vfid);
  1201. vf->op_rc = -EPERM;
  1202. goto response;
  1203. }
  1204. /* ...and only the mac set by the ndo */
  1205. if (filters->n_mac_vlan_filters == 1 &&
  1206. memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
  1207. BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  1208. vf->abs_vfid);
  1209. vf->op_rc = -EPERM;
  1210. goto response;
  1211. }
  1212. }
  1213. /* verify vf_qid */
  1214. if (filters->vf_qid > vf_rxq_count(vf))
  1215. goto response;
  1216. DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
  1217. vf->abs_vfid,
  1218. filters->vf_qid);
  1219. /* print q_filter message */
  1220. bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
  1221. vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
  1222. if (vf->op_rc)
  1223. goto response;
  1224. return;
  1225. response:
  1226. bnx2x_vf_mbx_resp(bp, vf);
  1227. }
  1228. static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1229. struct bnx2x_vf_mbx *mbx)
  1230. {
  1231. int qid = mbx->msg->req.q_op.vf_qid;
  1232. struct bnx2x_vfop_cmd cmd = {
  1233. .done = bnx2x_vf_mbx_resp,
  1234. .block = false,
  1235. };
  1236. DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
  1237. vf->abs_vfid, qid);
  1238. vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
  1239. if (vf->op_rc)
  1240. bnx2x_vf_mbx_resp(bp, vf);
  1241. }
  1242. static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1243. struct bnx2x_vf_mbx *mbx)
  1244. {
  1245. struct bnx2x_vfop_cmd cmd = {
  1246. .done = bnx2x_vf_mbx_resp,
  1247. .block = false,
  1248. };
  1249. DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
  1250. vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
  1251. if (vf->op_rc)
  1252. bnx2x_vf_mbx_resp(bp, vf);
  1253. }
  1254. static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1255. struct bnx2x_vf_mbx *mbx)
  1256. {
  1257. struct bnx2x_vfop_cmd cmd = {
  1258. .done = bnx2x_vf_mbx_resp,
  1259. .block = false,
  1260. };
  1261. DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
  1262. vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
  1263. if (vf->op_rc)
  1264. bnx2x_vf_mbx_resp(bp, vf);
  1265. }
  1266. /* dispatch request */
  1267. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1268. struct bnx2x_vf_mbx *mbx)
  1269. {
  1270. int i;
  1271. /* check if tlv type is known */
  1272. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  1273. /* Lock the per vf op mutex and note the locker's identity.
  1274. * The unlock will take place in mbx response.
  1275. */
  1276. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1277. /* switch on the opcode */
  1278. switch (mbx->first_tlv.tl.type) {
  1279. case CHANNEL_TLV_ACQUIRE:
  1280. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  1281. break;
  1282. case CHANNEL_TLV_INIT:
  1283. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  1284. break;
  1285. case CHANNEL_TLV_SETUP_Q:
  1286. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  1287. break;
  1288. case CHANNEL_TLV_SET_Q_FILTERS:
  1289. bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
  1290. break;
  1291. case CHANNEL_TLV_TEARDOWN_Q:
  1292. bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
  1293. break;
  1294. case CHANNEL_TLV_CLOSE:
  1295. bnx2x_vf_mbx_close_vf(bp, vf, mbx);
  1296. break;
  1297. case CHANNEL_TLV_RELEASE:
  1298. bnx2x_vf_mbx_release_vf(bp, vf, mbx);
  1299. break;
  1300. }
  1301. } else {
  1302. /* unknown TLV - this may belong to a VF driver from the future
  1303. * - a version written after this PF driver was written, which
  1304. * supports features unknown as of yet. Too bad since we don't
  1305. * support them. Or this may be because someone wrote a crappy
  1306. * VF driver and is sending garbage over the channel.
  1307. */
  1308. BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
  1309. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
  1310. for (i = 0; i < 20; i++)
  1311. DP_CONT(BNX2X_MSG_IOV, "%x ",
  1312. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  1313. /* test whether we can respond to the VF (do we have an address
  1314. * for it?)
  1315. */
  1316. if (vf->state == VF_ACQUIRED) {
  1317. /* mbx_resp uses the op_rc of the VF */
  1318. vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
  1319. /* notify the VF that we do not support this request */
  1320. bnx2x_vf_mbx_resp(bp, vf);
  1321. } else {
  1322. /* can't send a response since this VF is unknown to us
  1323. * just unlock the channel and be done with.
  1324. */
  1325. bnx2x_unlock_vf_pf_channel(bp, vf,
  1326. mbx->first_tlv.tl.type);
  1327. }
  1328. }
  1329. }
  1330. /* handle new vf-pf message */
  1331. void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
  1332. {
  1333. struct bnx2x_virtf *vf;
  1334. struct bnx2x_vf_mbx *mbx;
  1335. u8 vf_idx;
  1336. int rc;
  1337. DP(BNX2X_MSG_IOV,
  1338. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  1339. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  1340. /* Sanity checks consider removing later */
  1341. /* check if the vf_id is valid */
  1342. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  1343. BNX2X_NR_VIRTFN(bp)) {
  1344. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  1345. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  1346. goto mbx_done;
  1347. }
  1348. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  1349. mbx = BP_VF_MBX(bp, vf_idx);
  1350. /* verify an event is not currently being processed -
  1351. * debug failsafe only
  1352. */
  1353. if (mbx->flags & VF_MSG_INPROCESS) {
  1354. BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
  1355. vfpf_event->vf_id);
  1356. goto mbx_done;
  1357. }
  1358. vf = BP_VF(bp, vf_idx);
  1359. /* save the VF message address */
  1360. mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
  1361. mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
  1362. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  1363. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  1364. /* dmae to get the VF request */
  1365. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
  1366. mbx->vf_addr_hi, mbx->vf_addr_lo,
  1367. sizeof(union vfpf_tlvs)/4);
  1368. if (rc) {
  1369. BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
  1370. goto mbx_error;
  1371. }
  1372. /* process the VF message header */
  1373. mbx->first_tlv = mbx->msg->req.first_tlv;
  1374. /* dispatch the request (will prepare the response) */
  1375. bnx2x_vf_mbx_request(bp, vf, mbx);
  1376. goto mbx_done;
  1377. mbx_error:
  1378. bnx2x_vf_release(bp, vf, false); /* non blocking */
  1379. mbx_done:
  1380. return;
  1381. }
  1382. /* propagate local bulletin board to vf */
  1383. int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
  1384. {
  1385. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
  1386. dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
  1387. vf * BULLETIN_CONTENT_SIZE;
  1388. dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
  1389. int rc;
  1390. /* can only update vf after init took place */
  1391. if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
  1392. bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
  1393. return 0;
  1394. /* increment bulletin board version and compute crc */
  1395. bulletin->version++;
  1396. bulletin->length = BULLETIN_CONTENT_SIZE;
  1397. bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
  1398. /* propagate bulletin board via dmae to vm memory */
  1399. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
  1400. bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
  1401. U64_LO(vf_addr), bulletin->length / 4);
  1402. return rc;
  1403. }