bnx2x_vfpf.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. /* bnx2x_vfpf.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2012 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. */
  19. #include "bnx2x.h"
  20. #include "bnx2x_sriov.h"
  21. #include <linux/crc32.h>
  22. /* place a given tlv on the tlv buffer at a given offset */
  23. void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
  24. u16 length)
  25. {
  26. struct channel_tlv *tl =
  27. (struct channel_tlv *)(tlvs_list + offset);
  28. tl->type = type;
  29. tl->length = length;
  30. }
  31. /* Clear the mailbox and init the header of the first tlv */
  32. void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  33. u16 type, u16 length)
  34. {
  35. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  36. type);
  37. /* Clear mailbox */
  38. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  39. /* init type and length */
  40. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  41. /* init first tlv header */
  42. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  43. }
  44. /* list the types and lengths of the tlvs on the buffer */
  45. void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  46. {
  47. int i = 1;
  48. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  49. while (tlv->type != CHANNEL_TLV_LIST_END) {
  50. /* output tlv */
  51. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  52. tlv->type, tlv->length);
  53. /* advance to next tlv */
  54. tlvs_list += tlv->length;
  55. /* cast general tlv list pointer to channel tlv header*/
  56. tlv = (struct channel_tlv *)tlvs_list;
  57. i++;
  58. /* break condition for this loop */
  59. if (i > MAX_TLVS_IN_LIST) {
  60. WARN(true, "corrupt tlvs");
  61. return;
  62. }
  63. }
  64. /* output last tlv */
  65. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  66. tlv->type, tlv->length);
  67. }
  68. /* test whether we support a tlv type */
  69. bool bnx2x_tlv_supported(u16 tlvtype)
  70. {
  71. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  72. }
  73. static inline int bnx2x_pfvf_status_codes(int rc)
  74. {
  75. switch (rc) {
  76. case 0:
  77. return PFVF_STATUS_SUCCESS;
  78. case -ENOMEM:
  79. return PFVF_STATUS_NO_RESOURCE;
  80. default:
  81. return PFVF_STATUS_FAILURE;
  82. }
  83. }
  84. /* General service functions */
  85. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  86. {
  87. u32 addr = BAR_CSTRORM_INTMEM +
  88. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  89. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  90. }
  91. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  92. {
  93. u32 addr = BAR_CSTRORM_INTMEM +
  94. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  95. REG_WR8(bp, addr, 1);
  96. }
  97. static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
  98. {
  99. int i;
  100. for_each_vf(bp, i)
  101. storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
  102. }
  103. /* enable vf_pf mailbox (aka vf-pf-chanell) */
  104. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  105. {
  106. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  107. /* enable the mailbox in the FW */
  108. storm_memset_vf_mbx_ack(bp, abs_vfid);
  109. storm_memset_vf_mbx_valid(bp, abs_vfid);
  110. /* enable the VF access to the mailbox */
  111. bnx2x_vf_enable_access(bp, abs_vfid);
  112. }
  113. /* this works only on !E1h */
  114. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  115. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  116. u32 vf_addr_lo, u32 len32)
  117. {
  118. struct dmae_command dmae;
  119. if (CHIP_IS_E1x(bp)) {
  120. BNX2X_ERR("Chip revision does not support VFs\n");
  121. return DMAE_NOT_RDY;
  122. }
  123. if (!bp->dmae_ready) {
  124. BNX2X_ERR("DMAE is not ready, can not copy\n");
  125. return DMAE_NOT_RDY;
  126. }
  127. /* set opcode and fixed command fields */
  128. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  129. if (from_vf) {
  130. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  131. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  132. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  133. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  134. dmae.src_addr_lo = vf_addr_lo;
  135. dmae.src_addr_hi = vf_addr_hi;
  136. dmae.dst_addr_lo = U64_LO(pf_addr);
  137. dmae.dst_addr_hi = U64_HI(pf_addr);
  138. } else {
  139. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  140. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  141. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  142. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  143. dmae.src_addr_lo = U64_LO(pf_addr);
  144. dmae.src_addr_hi = U64_HI(pf_addr);
  145. dmae.dst_addr_lo = vf_addr_lo;
  146. dmae.dst_addr_hi = vf_addr_hi;
  147. }
  148. dmae.len = len32;
  149. bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
  150. /* issue the command and wait for completion */
  151. return bnx2x_issue_dmae_with_comp(bp, &dmae);
  152. }
  153. static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
  154. {
  155. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  156. u64 vf_addr;
  157. dma_addr_t pf_addr;
  158. u16 length, type;
  159. int rc;
  160. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  161. /* prepare response */
  162. type = mbx->first_tlv.tl.type;
  163. length = type == CHANNEL_TLV_ACQUIRE ?
  164. sizeof(struct pfvf_acquire_resp_tlv) :
  165. sizeof(struct pfvf_general_resp_tlv);
  166. bnx2x_add_tlv(bp, resp, 0, type, length);
  167. resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
  168. bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
  169. sizeof(struct channel_list_end_tlv));
  170. bnx2x_dp_tlv_list(bp, resp);
  171. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  172. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  173. /* send response */
  174. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  175. mbx->first_tlv.resp_msg_offset;
  176. pf_addr = mbx->msg_mapping +
  177. offsetof(struct bnx2x_vf_mbx_msg, resp);
  178. /* copy the response body, if there is one, before the header, as the vf
  179. * is sensitive to the header being written
  180. */
  181. if (resp->hdr.tl.length > sizeof(u64)) {
  182. length = resp->hdr.tl.length - sizeof(u64);
  183. vf_addr += sizeof(u64);
  184. pf_addr += sizeof(u64);
  185. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  186. U64_HI(vf_addr),
  187. U64_LO(vf_addr),
  188. length/4);
  189. if (rc) {
  190. BNX2X_ERR("Failed to copy response body to VF %d\n",
  191. vf->abs_vfid);
  192. return;
  193. }
  194. vf_addr -= sizeof(u64);
  195. pf_addr -= sizeof(u64);
  196. }
  197. /* ack the FW */
  198. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  199. mmiowb();
  200. /* initiate dmae to send the response */
  201. mbx->flags &= ~VF_MSG_INPROCESS;
  202. /* copy the response header including status-done field,
  203. * must be last dmae, must be after FW is acked
  204. */
  205. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  206. U64_HI(vf_addr),
  207. U64_LO(vf_addr),
  208. sizeof(u64)/4);
  209. /* unlock channel mutex */
  210. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  211. if (rc) {
  212. BNX2X_ERR("Failed to copy response status to VF %d\n",
  213. vf->abs_vfid);
  214. }
  215. return;
  216. }
  217. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  218. struct bnx2x_vf_mbx *mbx, int vfop_status)
  219. {
  220. int i;
  221. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  222. struct pf_vf_resc *resc = &resp->resc;
  223. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  224. memset(resp, 0, sizeof(*resp));
  225. /* fill in pfdev info */
  226. resp->pfdev_info.chip_num = bp->common.chip_id;
  227. resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
  228. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  229. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  230. /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
  231. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  232. sizeof(resp->pfdev_info.fw_ver));
  233. if (status == PFVF_STATUS_NO_RESOURCE ||
  234. status == PFVF_STATUS_SUCCESS) {
  235. /* set resources numbers, if status equals NO_RESOURCE these
  236. * are max possible numbers
  237. */
  238. resc->num_rxqs = vf_rxq_count(vf) ? :
  239. bnx2x_vf_max_queue_cnt(bp, vf);
  240. resc->num_txqs = vf_txq_count(vf) ? :
  241. bnx2x_vf_max_queue_cnt(bp, vf);
  242. resc->num_sbs = vf_sb_count(vf);
  243. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  244. resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
  245. resc->num_mc_filters = 0;
  246. if (status == PFVF_STATUS_SUCCESS) {
  247. for_each_vfq(vf, i)
  248. resc->hw_qid[i] =
  249. vfq_qzone_id(vf, vfq_get(vf, i));
  250. for_each_vf_sb(vf, i) {
  251. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  252. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  253. }
  254. }
  255. }
  256. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  257. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  258. vf->abs_vfid,
  259. resp->pfdev_info.chip_num,
  260. resp->pfdev_info.db_size,
  261. resp->pfdev_info.indices_per_sb,
  262. resp->pfdev_info.pf_cap,
  263. resc->num_rxqs,
  264. resc->num_txqs,
  265. resc->num_sbs,
  266. resc->num_mac_filters,
  267. resc->num_vlan_filters,
  268. resc->num_mc_filters,
  269. resp->pfdev_info.fw_ver);
  270. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  271. for (i = 0; i < vf_rxq_count(vf); i++)
  272. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  273. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  274. for (i = 0; i < vf_sb_count(vf); i++)
  275. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  276. resc->hw_sbs[i].hw_sb_id,
  277. resc->hw_sbs[i].sb_qid);
  278. DP_CONT(BNX2X_MSG_IOV, "]\n");
  279. /* send the response */
  280. vf->op_rc = vfop_status;
  281. bnx2x_vf_mbx_resp(bp, vf);
  282. }
  283. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  284. struct bnx2x_vf_mbx *mbx)
  285. {
  286. int rc;
  287. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  288. /* log vfdef info */
  289. DP(BNX2X_MSG_IOV,
  290. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  291. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  292. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  293. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  294. acquire->resc_request.num_vlan_filters,
  295. acquire->resc_request.num_mc_filters);
  296. /* acquire the resources */
  297. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  298. /* response */
  299. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  300. }
  301. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  302. struct bnx2x_vf_mbx *mbx)
  303. {
  304. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  305. /* record ghost addresses from vf message */
  306. vf->spq_map = init->spq_addr;
  307. vf->fw_stat_map = init->stats_addr;
  308. vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  309. /* response */
  310. bnx2x_vf_mbx_resp(bp, vf);
  311. }
  312. /* convert MBX queue-flags to standard SP queue-flags */
  313. static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
  314. unsigned long *sp_q_flags)
  315. {
  316. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  317. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  318. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  319. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  320. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  321. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  322. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  323. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  324. if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
  325. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  326. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  327. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  328. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  329. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  330. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  331. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  332. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  333. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  334. }
  335. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  336. struct bnx2x_vf_mbx *mbx)
  337. {
  338. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  339. struct bnx2x_vfop_cmd cmd = {
  340. .done = bnx2x_vf_mbx_resp,
  341. .block = false,
  342. };
  343. /* verify vf_qid */
  344. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  345. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  346. setup_q->vf_qid, vf_rxq_count(vf));
  347. vf->op_rc = -EINVAL;
  348. goto response;
  349. }
  350. /* tx queues must be setup alongside rx queues thus if the rx queue
  351. * is not marked as valid there's nothing to do.
  352. */
  353. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  354. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  355. unsigned long q_type = 0;
  356. struct bnx2x_queue_init_params *init_p;
  357. struct bnx2x_queue_setup_params *setup_p;
  358. /* reinit the VF operation context */
  359. memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
  360. setup_p = &vf->op_params.qctor.prep_qsetup;
  361. init_p = &vf->op_params.qctor.qstate.params.init;
  362. /* activate immediately */
  363. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  364. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  365. struct bnx2x_txq_setup_params *txq_params =
  366. &setup_p->txq_params;
  367. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  368. /* save sb resource index */
  369. q->sb_idx = setup_q->txq.vf_sb;
  370. /* tx init */
  371. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  372. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  373. bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
  374. &init_p->tx.flags);
  375. /* tx setup - flags */
  376. bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
  377. &setup_p->flags);
  378. /* tx setup - general, nothing */
  379. /* tx setup - tx */
  380. txq_params->dscr_map = setup_q->txq.txq_addr;
  381. txq_params->sb_cq_index = setup_q->txq.sb_index;
  382. txq_params->traffic_type = setup_q->txq.traffic_type;
  383. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  384. q->index, q->sb_idx);
  385. }
  386. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  387. struct bnx2x_rxq_setup_params *rxq_params =
  388. &setup_p->rxq_params;
  389. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  390. /* Note: there is no support for different SBs
  391. * for TX and RX
  392. */
  393. q->sb_idx = setup_q->rxq.vf_sb;
  394. /* rx init */
  395. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  396. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  397. bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
  398. &init_p->rx.flags);
  399. /* rx setup - flags */
  400. bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
  401. &setup_p->flags);
  402. /* rx setup - general */
  403. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  404. /* rx setup - rx */
  405. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  406. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  407. rxq_params->sge_map = setup_q->rxq.sge_addr;
  408. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  409. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  410. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  411. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  412. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  413. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  414. rxq_params->cache_line_log =
  415. setup_q->rxq.cache_line_log;
  416. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  417. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  418. q->index, q->sb_idx);
  419. }
  420. /* complete the preparations */
  421. bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
  422. vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
  423. if (vf->op_rc)
  424. goto response;
  425. return;
  426. }
  427. response:
  428. bnx2x_vf_mbx_resp(bp, vf);
  429. }
  430. /* dispatch request */
  431. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  432. struct bnx2x_vf_mbx *mbx)
  433. {
  434. int i;
  435. /* check if tlv type is known */
  436. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  437. /* Lock the per vf op mutex and note the locker's identity.
  438. * The unlock will take place in mbx response.
  439. */
  440. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  441. /* switch on the opcode */
  442. switch (mbx->first_tlv.tl.type) {
  443. case CHANNEL_TLV_ACQUIRE:
  444. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  445. break;
  446. case CHANNEL_TLV_INIT:
  447. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  448. break;
  449. case CHANNEL_TLV_SETUP_Q:
  450. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  451. break;
  452. }
  453. } else {
  454. /* unknown TLV - this may belong to a VF driver from the future
  455. * - a version written after this PF driver was written, which
  456. * supports features unknown as of yet. Too bad since we don't
  457. * support them. Or this may be because someone wrote a crappy
  458. * VF driver and is sending garbage over the channel.
  459. */
  460. BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
  461. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
  462. for (i = 0; i < 20; i++)
  463. DP_CONT(BNX2X_MSG_IOV, "%x ",
  464. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  465. /* test whether we can respond to the VF (do we have an address
  466. * for it?)
  467. */
  468. if (vf->state == VF_ACQUIRED) {
  469. /* mbx_resp uses the op_rc of the VF */
  470. vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
  471. /* notify the VF that we do not support this request */
  472. bnx2x_vf_mbx_resp(bp, vf);
  473. } else {
  474. /* can't send a response since this VF is unknown to us
  475. * just unlock the channel and be done with.
  476. */
  477. bnx2x_unlock_vf_pf_channel(bp, vf,
  478. mbx->first_tlv.tl.type);
  479. }
  480. }
  481. }
  482. /* handle new vf-pf message */
  483. void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
  484. {
  485. struct bnx2x_virtf *vf;
  486. struct bnx2x_vf_mbx *mbx;
  487. u8 vf_idx;
  488. int rc;
  489. DP(BNX2X_MSG_IOV,
  490. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  491. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  492. /* Sanity checks consider removing later */
  493. /* check if the vf_id is valid */
  494. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  495. BNX2X_NR_VIRTFN(bp)) {
  496. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  497. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  498. goto mbx_done;
  499. }
  500. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  501. mbx = BP_VF_MBX(bp, vf_idx);
  502. /* verify an event is not currently being processed -
  503. * debug failsafe only
  504. */
  505. if (mbx->flags & VF_MSG_INPROCESS) {
  506. BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
  507. vfpf_event->vf_id);
  508. goto mbx_done;
  509. }
  510. vf = BP_VF(bp, vf_idx);
  511. /* save the VF message address */
  512. mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
  513. mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
  514. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  515. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  516. /* dmae to get the VF request */
  517. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
  518. mbx->vf_addr_hi, mbx->vf_addr_lo,
  519. sizeof(union vfpf_tlvs)/4);
  520. if (rc) {
  521. BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
  522. goto mbx_error;
  523. }
  524. /* process the VF message header */
  525. mbx->first_tlv = mbx->msg->req.first_tlv;
  526. /* dispatch the request (will prepare the response) */
  527. bnx2x_vf_mbx_request(bp, vf, mbx);
  528. goto mbx_done;
  529. mbx_error:
  530. mbx_done:
  531. return;
  532. }