bnx2x_vfpf.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. /* bnx2x_vfpf.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2012 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. */
  19. #include "bnx2x.h"
  20. #include "bnx2x_sriov.h"
  21. #include <linux/crc32.h>
  22. /* place a given tlv on the tlv buffer at a given offset */
  23. void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
  24. u16 length)
  25. {
  26. struct channel_tlv *tl =
  27. (struct channel_tlv *)(tlvs_list + offset);
  28. tl->type = type;
  29. tl->length = length;
  30. }
  31. /* Clear the mailbox and init the header of the first tlv */
  32. void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  33. u16 type, u16 length)
  34. {
  35. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  36. type);
  37. /* Clear mailbox */
  38. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  39. /* init type and length */
  40. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  41. /* init first tlv header */
  42. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  43. }
  44. /* list the types and lengths of the tlvs on the buffer */
  45. void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  46. {
  47. int i = 1;
  48. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  49. while (tlv->type != CHANNEL_TLV_LIST_END) {
  50. /* output tlv */
  51. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  52. tlv->type, tlv->length);
  53. /* advance to next tlv */
  54. tlvs_list += tlv->length;
  55. /* cast general tlv list pointer to channel tlv header*/
  56. tlv = (struct channel_tlv *)tlvs_list;
  57. i++;
  58. /* break condition for this loop */
  59. if (i > MAX_TLVS_IN_LIST) {
  60. WARN(true, "corrupt tlvs");
  61. return;
  62. }
  63. }
  64. /* output last tlv */
  65. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  66. tlv->type, tlv->length);
  67. }
  68. /* test whether we support a tlv type */
  69. bool bnx2x_tlv_supported(u16 tlvtype)
  70. {
  71. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  72. }
  73. static inline int bnx2x_pfvf_status_codes(int rc)
  74. {
  75. switch (rc) {
  76. case 0:
  77. return PFVF_STATUS_SUCCESS;
  78. case -ENOMEM:
  79. return PFVF_STATUS_NO_RESOURCE;
  80. default:
  81. return PFVF_STATUS_FAILURE;
  82. }
  83. }
  84. /* General service functions */
  85. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  86. {
  87. u32 addr = BAR_CSTRORM_INTMEM +
  88. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  89. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  90. }
  91. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  92. {
  93. u32 addr = BAR_CSTRORM_INTMEM +
  94. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  95. REG_WR8(bp, addr, 1);
  96. }
  97. static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
  98. {
  99. int i;
  100. for_each_vf(bp, i)
  101. storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
  102. }
  103. /* enable vf_pf mailbox (aka vf-pf-chanell) */
  104. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  105. {
  106. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  107. /* enable the mailbox in the FW */
  108. storm_memset_vf_mbx_ack(bp, abs_vfid);
  109. storm_memset_vf_mbx_valid(bp, abs_vfid);
  110. /* enable the VF access to the mailbox */
  111. bnx2x_vf_enable_access(bp, abs_vfid);
  112. }
  113. /* this works only on !E1h */
  114. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  115. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  116. u32 vf_addr_lo, u32 len32)
  117. {
  118. struct dmae_command dmae;
  119. if (CHIP_IS_E1x(bp)) {
  120. BNX2X_ERR("Chip revision does not support VFs\n");
  121. return DMAE_NOT_RDY;
  122. }
  123. if (!bp->dmae_ready) {
  124. BNX2X_ERR("DMAE is not ready, can not copy\n");
  125. return DMAE_NOT_RDY;
  126. }
  127. /* set opcode and fixed command fields */
  128. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  129. if (from_vf) {
  130. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  131. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  132. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  133. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  134. dmae.src_addr_lo = vf_addr_lo;
  135. dmae.src_addr_hi = vf_addr_hi;
  136. dmae.dst_addr_lo = U64_LO(pf_addr);
  137. dmae.dst_addr_hi = U64_HI(pf_addr);
  138. } else {
  139. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  140. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  141. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  142. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  143. dmae.src_addr_lo = U64_LO(pf_addr);
  144. dmae.src_addr_hi = U64_HI(pf_addr);
  145. dmae.dst_addr_lo = vf_addr_lo;
  146. dmae.dst_addr_hi = vf_addr_hi;
  147. }
  148. dmae.len = len32;
  149. bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
  150. /* issue the command and wait for completion */
  151. return bnx2x_issue_dmae_with_comp(bp, &dmae);
  152. }
  153. static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
  154. {
  155. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  156. u64 vf_addr;
  157. dma_addr_t pf_addr;
  158. u16 length, type;
  159. int rc;
  160. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  161. /* prepare response */
  162. type = mbx->first_tlv.tl.type;
  163. length = type == CHANNEL_TLV_ACQUIRE ?
  164. sizeof(struct pfvf_acquire_resp_tlv) :
  165. sizeof(struct pfvf_general_resp_tlv);
  166. bnx2x_add_tlv(bp, resp, 0, type, length);
  167. resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
  168. bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
  169. sizeof(struct channel_list_end_tlv));
  170. bnx2x_dp_tlv_list(bp, resp);
  171. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  172. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  173. /* send response */
  174. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  175. mbx->first_tlv.resp_msg_offset;
  176. pf_addr = mbx->msg_mapping +
  177. offsetof(struct bnx2x_vf_mbx_msg, resp);
  178. /* copy the response body, if there is one, before the header, as the vf
  179. * is sensitive to the header being written
  180. */
  181. if (resp->hdr.tl.length > sizeof(u64)) {
  182. length = resp->hdr.tl.length - sizeof(u64);
  183. vf_addr += sizeof(u64);
  184. pf_addr += sizeof(u64);
  185. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  186. U64_HI(vf_addr),
  187. U64_LO(vf_addr),
  188. length/4);
  189. if (rc) {
  190. BNX2X_ERR("Failed to copy response body to VF %d\n",
  191. vf->abs_vfid);
  192. return;
  193. }
  194. vf_addr -= sizeof(u64);
  195. pf_addr -= sizeof(u64);
  196. }
  197. /* ack the FW */
  198. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  199. mmiowb();
  200. /* initiate dmae to send the response */
  201. mbx->flags &= ~VF_MSG_INPROCESS;
  202. /* copy the response header including status-done field,
  203. * must be last dmae, must be after FW is acked
  204. */
  205. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  206. U64_HI(vf_addr),
  207. U64_LO(vf_addr),
  208. sizeof(u64)/4);
  209. /* unlock channel mutex */
  210. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  211. if (rc) {
  212. BNX2X_ERR("Failed to copy response status to VF %d\n",
  213. vf->abs_vfid);
  214. }
  215. return;
  216. }
  217. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  218. struct bnx2x_vf_mbx *mbx, int vfop_status)
  219. {
  220. int i;
  221. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  222. struct pf_vf_resc *resc = &resp->resc;
  223. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  224. memset(resp, 0, sizeof(*resp));
  225. /* fill in pfdev info */
  226. resp->pfdev_info.chip_num = bp->common.chip_id;
  227. resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
  228. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  229. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  230. /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
  231. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  232. sizeof(resp->pfdev_info.fw_ver));
  233. if (status == PFVF_STATUS_NO_RESOURCE ||
  234. status == PFVF_STATUS_SUCCESS) {
  235. /* set resources numbers, if status equals NO_RESOURCE these
  236. * are max possible numbers
  237. */
  238. resc->num_rxqs = vf_rxq_count(vf) ? :
  239. bnx2x_vf_max_queue_cnt(bp, vf);
  240. resc->num_txqs = vf_txq_count(vf) ? :
  241. bnx2x_vf_max_queue_cnt(bp, vf);
  242. resc->num_sbs = vf_sb_count(vf);
  243. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  244. resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
  245. resc->num_mc_filters = 0;
  246. if (status == PFVF_STATUS_SUCCESS) {
  247. for_each_vfq(vf, i)
  248. resc->hw_qid[i] =
  249. vfq_qzone_id(vf, vfq_get(vf, i));
  250. for_each_vf_sb(vf, i) {
  251. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  252. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  253. }
  254. }
  255. }
  256. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  257. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  258. vf->abs_vfid,
  259. resp->pfdev_info.chip_num,
  260. resp->pfdev_info.db_size,
  261. resp->pfdev_info.indices_per_sb,
  262. resp->pfdev_info.pf_cap,
  263. resc->num_rxqs,
  264. resc->num_txqs,
  265. resc->num_sbs,
  266. resc->num_mac_filters,
  267. resc->num_vlan_filters,
  268. resc->num_mc_filters,
  269. resp->pfdev_info.fw_ver);
  270. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  271. for (i = 0; i < vf_rxq_count(vf); i++)
  272. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  273. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  274. for (i = 0; i < vf_sb_count(vf); i++)
  275. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  276. resc->hw_sbs[i].hw_sb_id,
  277. resc->hw_sbs[i].sb_qid);
  278. DP_CONT(BNX2X_MSG_IOV, "]\n");
  279. /* send the response */
  280. vf->op_rc = vfop_status;
  281. bnx2x_vf_mbx_resp(bp, vf);
  282. }
  283. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  284. struct bnx2x_vf_mbx *mbx)
  285. {
  286. int rc;
  287. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  288. /* log vfdef info */
  289. DP(BNX2X_MSG_IOV,
  290. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  291. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  292. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  293. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  294. acquire->resc_request.num_vlan_filters,
  295. acquire->resc_request.num_mc_filters);
  296. /* acquire the resources */
  297. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  298. /* response */
  299. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  300. }
  301. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  302. struct bnx2x_vf_mbx *mbx)
  303. {
  304. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  305. /* record ghost addresses from vf message */
  306. vf->spq_map = init->spq_addr;
  307. vf->fw_stat_map = init->stats_addr;
  308. vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  309. /* response */
  310. bnx2x_vf_mbx_resp(bp, vf);
  311. }
  312. /* convert MBX queue-flags to standard SP queue-flags */
  313. static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
  314. unsigned long *sp_q_flags)
  315. {
  316. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  317. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  318. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  319. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  320. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  321. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  322. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  323. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  324. if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
  325. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  326. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  327. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  328. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  329. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  330. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  331. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  332. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  333. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  334. }
  335. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  336. struct bnx2x_vf_mbx *mbx)
  337. {
  338. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  339. struct bnx2x_vfop_cmd cmd = {
  340. .done = bnx2x_vf_mbx_resp,
  341. .block = false,
  342. };
  343. /* verify vf_qid */
  344. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  345. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  346. setup_q->vf_qid, vf_rxq_count(vf));
  347. vf->op_rc = -EINVAL;
  348. goto response;
  349. }
  350. /* tx queues must be setup alongside rx queues thus if the rx queue
  351. * is not marked as valid there's nothing to do.
  352. */
  353. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  354. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  355. unsigned long q_type = 0;
  356. struct bnx2x_queue_init_params *init_p;
  357. struct bnx2x_queue_setup_params *setup_p;
  358. /* reinit the VF operation context */
  359. memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
  360. setup_p = &vf->op_params.qctor.prep_qsetup;
  361. init_p = &vf->op_params.qctor.qstate.params.init;
  362. /* activate immediately */
  363. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  364. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  365. struct bnx2x_txq_setup_params *txq_params =
  366. &setup_p->txq_params;
  367. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  368. /* save sb resource index */
  369. q->sb_idx = setup_q->txq.vf_sb;
  370. /* tx init */
  371. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  372. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  373. bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
  374. &init_p->tx.flags);
  375. /* tx setup - flags */
  376. bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
  377. &setup_p->flags);
  378. /* tx setup - general, nothing */
  379. /* tx setup - tx */
  380. txq_params->dscr_map = setup_q->txq.txq_addr;
  381. txq_params->sb_cq_index = setup_q->txq.sb_index;
  382. txq_params->traffic_type = setup_q->txq.traffic_type;
  383. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  384. q->index, q->sb_idx);
  385. }
  386. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  387. struct bnx2x_rxq_setup_params *rxq_params =
  388. &setup_p->rxq_params;
  389. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  390. /* Note: there is no support for different SBs
  391. * for TX and RX
  392. */
  393. q->sb_idx = setup_q->rxq.vf_sb;
  394. /* rx init */
  395. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  396. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  397. bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
  398. &init_p->rx.flags);
  399. /* rx setup - flags */
  400. bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
  401. &setup_p->flags);
  402. /* rx setup - general */
  403. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  404. /* rx setup - rx */
  405. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  406. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  407. rxq_params->sge_map = setup_q->rxq.sge_addr;
  408. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  409. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  410. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  411. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  412. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  413. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  414. rxq_params->cache_line_log =
  415. setup_q->rxq.cache_line_log;
  416. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  417. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  418. q->index, q->sb_idx);
  419. }
  420. /* complete the preparations */
  421. bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
  422. vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
  423. if (vf->op_rc)
  424. goto response;
  425. return;
  426. }
  427. response:
  428. bnx2x_vf_mbx_resp(bp, vf);
  429. }
  430. enum bnx2x_vfop_filters_state {
  431. BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  432. BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
  433. BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
  434. BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
  435. BNX2X_VFOP_MBX_Q_FILTERS_DONE
  436. };
  437. static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
  438. struct bnx2x_virtf *vf,
  439. struct vfpf_set_q_filters_tlv *tlv,
  440. struct bnx2x_vfop_filters **pfl,
  441. u32 type_flag)
  442. {
  443. int i, j;
  444. struct bnx2x_vfop_filters *fl = NULL;
  445. size_t fsz;
  446. fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
  447. sizeof(struct bnx2x_vfop_filters);
  448. fl = kzalloc(fsz, GFP_KERNEL);
  449. if (!fl)
  450. return -ENOMEM;
  451. INIT_LIST_HEAD(&fl->head);
  452. for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
  453. struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
  454. if ((msg_filter->flags & type_flag) != type_flag)
  455. continue;
  456. if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
  457. fl->filters[j].mac = msg_filter->mac;
  458. fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
  459. } else {
  460. fl->filters[j].vid = msg_filter->vlan_tag;
  461. fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
  462. }
  463. fl->filters[j].add =
  464. (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
  465. true : false;
  466. list_add_tail(&fl->filters[j++].link, &fl->head);
  467. }
  468. if (list_empty(&fl->head))
  469. kfree(fl);
  470. else
  471. *pfl = fl;
  472. return 0;
  473. }
  474. static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
  475. struct vfpf_q_mac_vlan_filter *filter)
  476. {
  477. DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
  478. if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
  479. DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
  480. if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
  481. DP_CONT(msglvl, ", MAC=%pM", filter->mac);
  482. DP_CONT(msglvl, "\n");
  483. }
  484. static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
  485. struct vfpf_set_q_filters_tlv *filters)
  486. {
  487. int i;
  488. if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
  489. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  490. bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
  491. &filters->filters[i]);
  492. if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
  493. DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
  494. if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
  495. for (i = 0; i < filters->n_multicast; i++)
  496. DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
  497. }
  498. #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
  499. #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
  500. static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
  501. {
  502. int rc;
  503. struct vfpf_set_q_filters_tlv *msg =
  504. &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
  505. struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
  506. enum bnx2x_vfop_filters_state state = vfop->state;
  507. struct bnx2x_vfop_cmd cmd = {
  508. .done = bnx2x_vfop_mbx_qfilters,
  509. .block = false,
  510. };
  511. DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
  512. if (vfop->rc < 0)
  513. goto op_err;
  514. switch (state) {
  515. case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
  516. /* next state */
  517. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
  518. /* check for any vlan/mac changes */
  519. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  520. /* build mac list */
  521. struct bnx2x_vfop_filters *fl = NULL;
  522. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  523. VFPF_MAC_FILTER);
  524. if (vfop->rc)
  525. goto op_err;
  526. if (fl) {
  527. /* set mac list */
  528. rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
  529. msg->vf_qid,
  530. false);
  531. if (rc) {
  532. vfop->rc = rc;
  533. goto op_err;
  534. }
  535. return;
  536. }
  537. }
  538. /* fall through */
  539. case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
  540. /* next state */
  541. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
  542. /* check for any vlan/mac changes */
  543. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  544. /* build vlan list */
  545. struct bnx2x_vfop_filters *fl = NULL;
  546. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  547. VFPF_VLAN_FILTER);
  548. if (vfop->rc)
  549. goto op_err;
  550. if (fl) {
  551. /* set vlan list */
  552. rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
  553. msg->vf_qid,
  554. false);
  555. if (rc) {
  556. vfop->rc = rc;
  557. goto op_err;
  558. }
  559. return;
  560. }
  561. }
  562. /* fall through */
  563. case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
  564. /* next state */
  565. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
  566. if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  567. unsigned long accept = 0;
  568. /* covert VF-PF if mask to bnx2x accept flags */
  569. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
  570. __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
  571. if (msg->rx_mask &
  572. VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
  573. __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
  574. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
  575. __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
  576. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
  577. __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
  578. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
  579. __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  580. /* A packet arriving the vf's mac should be accepted
  581. * with any vlan
  582. */
  583. __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  584. /* set rx-mode */
  585. rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
  586. msg->vf_qid, accept);
  587. if (rc) {
  588. vfop->rc = rc;
  589. goto op_err;
  590. }
  591. return;
  592. }
  593. /* fall through */
  594. case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
  595. /* next state */
  596. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
  597. if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
  598. /* set mcasts */
  599. rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
  600. msg->n_multicast, false);
  601. if (rc) {
  602. vfop->rc = rc;
  603. goto op_err;
  604. }
  605. return;
  606. }
  607. /* fall through */
  608. op_done:
  609. case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
  610. bnx2x_vfop_end(bp, vf, vfop);
  611. return;
  612. op_err:
  613. BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
  614. vf->abs_vfid, msg->vf_qid, vfop->rc);
  615. goto op_done;
  616. default:
  617. bnx2x_vfop_default(state);
  618. }
  619. }
  620. static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
  621. struct bnx2x_virtf *vf,
  622. struct bnx2x_vfop_cmd *cmd)
  623. {
  624. struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
  625. if (vfop) {
  626. bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  627. bnx2x_vfop_mbx_qfilters, cmd->done);
  628. return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
  629. cmd->block);
  630. }
  631. return -ENOMEM;
  632. }
  633. static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
  634. struct bnx2x_virtf *vf,
  635. struct bnx2x_vf_mbx *mbx)
  636. {
  637. struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
  638. struct bnx2x_vfop_cmd cmd = {
  639. .done = bnx2x_vf_mbx_resp,
  640. .block = false,
  641. };
  642. /* verify vf_qid */
  643. if (filters->vf_qid > vf_rxq_count(vf))
  644. goto response;
  645. DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
  646. vf->abs_vfid,
  647. filters->vf_qid);
  648. /* print q_filter message */
  649. bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
  650. vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
  651. if (vf->op_rc)
  652. goto response;
  653. return;
  654. response:
  655. bnx2x_vf_mbx_resp(bp, vf);
  656. }
  657. static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  658. struct bnx2x_vf_mbx *mbx)
  659. {
  660. int qid = mbx->msg->req.q_op.vf_qid;
  661. struct bnx2x_vfop_cmd cmd = {
  662. .done = bnx2x_vf_mbx_resp,
  663. .block = false,
  664. };
  665. DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
  666. vf->abs_vfid, qid);
  667. vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
  668. if (vf->op_rc)
  669. bnx2x_vf_mbx_resp(bp, vf);
  670. }
  671. /* dispatch request */
  672. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  673. struct bnx2x_vf_mbx *mbx)
  674. {
  675. int i;
  676. /* check if tlv type is known */
  677. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  678. /* Lock the per vf op mutex and note the locker's identity.
  679. * The unlock will take place in mbx response.
  680. */
  681. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  682. /* switch on the opcode */
  683. switch (mbx->first_tlv.tl.type) {
  684. case CHANNEL_TLV_ACQUIRE:
  685. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  686. break;
  687. case CHANNEL_TLV_INIT:
  688. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  689. break;
  690. case CHANNEL_TLV_SETUP_Q:
  691. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  692. break;
  693. case CHANNEL_TLV_SET_Q_FILTERS:
  694. bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
  695. break;
  696. case CHANNEL_TLV_TEARDOWN_Q:
  697. bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
  698. break;
  699. }
  700. } else {
  701. /* unknown TLV - this may belong to a VF driver from the future
  702. * - a version written after this PF driver was written, which
  703. * supports features unknown as of yet. Too bad since we don't
  704. * support them. Or this may be because someone wrote a crappy
  705. * VF driver and is sending garbage over the channel.
  706. */
  707. BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
  708. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
  709. for (i = 0; i < 20; i++)
  710. DP_CONT(BNX2X_MSG_IOV, "%x ",
  711. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  712. /* test whether we can respond to the VF (do we have an address
  713. * for it?)
  714. */
  715. if (vf->state == VF_ACQUIRED) {
  716. /* mbx_resp uses the op_rc of the VF */
  717. vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
  718. /* notify the VF that we do not support this request */
  719. bnx2x_vf_mbx_resp(bp, vf);
  720. } else {
  721. /* can't send a response since this VF is unknown to us
  722. * just unlock the channel and be done with.
  723. */
  724. bnx2x_unlock_vf_pf_channel(bp, vf,
  725. mbx->first_tlv.tl.type);
  726. }
  727. }
  728. }
  729. /* handle new vf-pf message */
  730. void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
  731. {
  732. struct bnx2x_virtf *vf;
  733. struct bnx2x_vf_mbx *mbx;
  734. u8 vf_idx;
  735. int rc;
  736. DP(BNX2X_MSG_IOV,
  737. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  738. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  739. /* Sanity checks consider removing later */
  740. /* check if the vf_id is valid */
  741. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  742. BNX2X_NR_VIRTFN(bp)) {
  743. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  744. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  745. goto mbx_done;
  746. }
  747. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  748. mbx = BP_VF_MBX(bp, vf_idx);
  749. /* verify an event is not currently being processed -
  750. * debug failsafe only
  751. */
  752. if (mbx->flags & VF_MSG_INPROCESS) {
  753. BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
  754. vfpf_event->vf_id);
  755. goto mbx_done;
  756. }
  757. vf = BP_VF(bp, vf_idx);
  758. /* save the VF message address */
  759. mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
  760. mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
  761. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  762. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  763. /* dmae to get the VF request */
  764. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
  765. mbx->vf_addr_hi, mbx->vf_addr_lo,
  766. sizeof(union vfpf_tlvs)/4);
  767. if (rc) {
  768. BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
  769. goto mbx_error;
  770. }
  771. /* process the VF message header */
  772. mbx->first_tlv = mbx->msg->req.first_tlv;
  773. /* dispatch the request (will prepare the response) */
  774. bnx2x_vf_mbx_request(bp, vf, mbx);
  775. goto mbx_done;
  776. mbx_error:
  777. mbx_done:
  778. return;
  779. }