bnx2x_vfpf.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /* bnx2x_vfpf.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2012 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. */
  19. #include "bnx2x.h"
  20. #include "bnx2x_sriov.h"
  21. /* place a given tlv on the tlv buffer at a given offset */
  22. void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
  23. u16 length)
  24. {
  25. struct channel_tlv *tl =
  26. (struct channel_tlv *)(tlvs_list + offset);
  27. tl->type = type;
  28. tl->length = length;
  29. }
  30. /* Clear the mailbox and init the header of the first tlv */
  31. void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  32. u16 type, u16 length)
  33. {
  34. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  35. type);
  36. /* Clear mailbox */
  37. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  38. /* init type and length */
  39. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  40. /* init first tlv header */
  41. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  42. }
  43. /* list the types and lengths of the tlvs on the buffer */
  44. void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  45. {
  46. int i = 1;
  47. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  48. while (tlv->type != CHANNEL_TLV_LIST_END) {
  49. /* output tlv */
  50. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  51. tlv->type, tlv->length);
  52. /* advance to next tlv */
  53. tlvs_list += tlv->length;
  54. /* cast general tlv list pointer to channel tlv header*/
  55. tlv = (struct channel_tlv *)tlvs_list;
  56. i++;
  57. /* break condition for this loop */
  58. if (i > MAX_TLVS_IN_LIST) {
  59. WARN(true, "corrupt tlvs");
  60. return;
  61. }
  62. }
  63. /* output last tlv */
  64. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  65. tlv->type, tlv->length);
  66. }
  67. /* test whether we support a tlv type */
  68. bool bnx2x_tlv_supported(u16 tlvtype)
  69. {
  70. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  71. }
  72. static inline int bnx2x_pfvf_status_codes(int rc)
  73. {
  74. switch (rc) {
  75. case 0:
  76. return PFVF_STATUS_SUCCESS;
  77. case -ENOMEM:
  78. return PFVF_STATUS_NO_RESOURCE;
  79. default:
  80. return PFVF_STATUS_FAILURE;
  81. }
  82. }
  83. /* General service functions */
  84. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  85. {
  86. u32 addr = BAR_CSTRORM_INTMEM +
  87. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  88. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  89. }
  90. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  91. {
  92. u32 addr = BAR_CSTRORM_INTMEM +
  93. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  94. REG_WR8(bp, addr, 1);
  95. }
  96. static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
  97. {
  98. int i;
  99. for_each_vf(bp, i)
  100. storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
  101. }
  102. /* enable vf_pf mailbox (aka vf-pf-chanell) */
  103. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  104. {
  105. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  106. /* enable the mailbox in the FW */
  107. storm_memset_vf_mbx_ack(bp, abs_vfid);
  108. storm_memset_vf_mbx_valid(bp, abs_vfid);
  109. /* enable the VF access to the mailbox */
  110. bnx2x_vf_enable_access(bp, abs_vfid);
  111. }
  112. /* this works only on !E1h */
  113. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  114. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  115. u32 vf_addr_lo, u32 len32)
  116. {
  117. struct dmae_command dmae;
  118. if (CHIP_IS_E1x(bp)) {
  119. BNX2X_ERR("Chip revision does not support VFs\n");
  120. return DMAE_NOT_RDY;
  121. }
  122. if (!bp->dmae_ready) {
  123. BNX2X_ERR("DMAE is not ready, can not copy\n");
  124. return DMAE_NOT_RDY;
  125. }
  126. /* set opcode and fixed command fields */
  127. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  128. if (from_vf) {
  129. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  130. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  131. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  132. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  133. dmae.src_addr_lo = vf_addr_lo;
  134. dmae.src_addr_hi = vf_addr_hi;
  135. dmae.dst_addr_lo = U64_LO(pf_addr);
  136. dmae.dst_addr_hi = U64_HI(pf_addr);
  137. } else {
  138. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  139. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  140. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  141. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  142. dmae.src_addr_lo = U64_LO(pf_addr);
  143. dmae.src_addr_hi = U64_HI(pf_addr);
  144. dmae.dst_addr_lo = vf_addr_lo;
  145. dmae.dst_addr_hi = vf_addr_hi;
  146. }
  147. dmae.len = len32;
  148. bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
  149. /* issue the command and wait for completion */
  150. return bnx2x_issue_dmae_with_comp(bp, &dmae);
  151. }
  152. static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
  153. {
  154. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  155. u64 vf_addr;
  156. dma_addr_t pf_addr;
  157. u16 length, type;
  158. int rc;
  159. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  160. /* prepare response */
  161. type = mbx->first_tlv.tl.type;
  162. length = type == CHANNEL_TLV_ACQUIRE ?
  163. sizeof(struct pfvf_acquire_resp_tlv) :
  164. sizeof(struct pfvf_general_resp_tlv);
  165. bnx2x_add_tlv(bp, resp, 0, type, length);
  166. resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
  167. bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
  168. sizeof(struct channel_list_end_tlv));
  169. bnx2x_dp_tlv_list(bp, resp);
  170. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  171. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  172. /* send response */
  173. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  174. mbx->first_tlv.resp_msg_offset;
  175. pf_addr = mbx->msg_mapping +
  176. offsetof(struct bnx2x_vf_mbx_msg, resp);
  177. /* copy the response body, if there is one, before the header, as the vf
  178. * is sensitive to the header being written
  179. */
  180. if (resp->hdr.tl.length > sizeof(u64)) {
  181. length = resp->hdr.tl.length - sizeof(u64);
  182. vf_addr += sizeof(u64);
  183. pf_addr += sizeof(u64);
  184. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  185. U64_HI(vf_addr),
  186. U64_LO(vf_addr),
  187. length/4);
  188. if (rc) {
  189. BNX2X_ERR("Failed to copy response body to VF %d\n",
  190. vf->abs_vfid);
  191. return;
  192. }
  193. vf_addr -= sizeof(u64);
  194. pf_addr -= sizeof(u64);
  195. }
  196. /* ack the FW */
  197. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  198. mmiowb();
  199. /* initiate dmae to send the response */
  200. mbx->flags &= ~VF_MSG_INPROCESS;
  201. /* copy the response header including status-done field,
  202. * must be last dmae, must be after FW is acked
  203. */
  204. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  205. U64_HI(vf_addr),
  206. U64_LO(vf_addr),
  207. sizeof(u64)/4);
  208. /* unlock channel mutex */
  209. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  210. if (rc) {
  211. BNX2X_ERR("Failed to copy response status to VF %d\n",
  212. vf->abs_vfid);
  213. }
  214. return;
  215. }
  216. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  217. struct bnx2x_vf_mbx *mbx, int vfop_status)
  218. {
  219. int i;
  220. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  221. struct pf_vf_resc *resc = &resp->resc;
  222. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  223. memset(resp, 0, sizeof(*resp));
  224. /* fill in pfdev info */
  225. resp->pfdev_info.chip_num = bp->common.chip_id;
  226. resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
  227. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  228. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  229. /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
  230. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  231. sizeof(resp->pfdev_info.fw_ver));
  232. if (status == PFVF_STATUS_NO_RESOURCE ||
  233. status == PFVF_STATUS_SUCCESS) {
  234. /* set resources numbers, if status equals NO_RESOURCE these
  235. * are max possible numbers
  236. */
  237. resc->num_rxqs = vf_rxq_count(vf) ? :
  238. bnx2x_vf_max_queue_cnt(bp, vf);
  239. resc->num_txqs = vf_txq_count(vf) ? :
  240. bnx2x_vf_max_queue_cnt(bp, vf);
  241. resc->num_sbs = vf_sb_count(vf);
  242. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  243. resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
  244. resc->num_mc_filters = 0;
  245. if (status == PFVF_STATUS_SUCCESS) {
  246. for_each_vfq(vf, i)
  247. resc->hw_qid[i] =
  248. vfq_qzone_id(vf, vfq_get(vf, i));
  249. for_each_vf_sb(vf, i) {
  250. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  251. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  252. }
  253. }
  254. }
  255. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  256. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  257. vf->abs_vfid,
  258. resp->pfdev_info.chip_num,
  259. resp->pfdev_info.db_size,
  260. resp->pfdev_info.indices_per_sb,
  261. resp->pfdev_info.pf_cap,
  262. resc->num_rxqs,
  263. resc->num_txqs,
  264. resc->num_sbs,
  265. resc->num_mac_filters,
  266. resc->num_vlan_filters,
  267. resc->num_mc_filters,
  268. resp->pfdev_info.fw_ver);
  269. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  270. for (i = 0; i < vf_rxq_count(vf); i++)
  271. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  272. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  273. for (i = 0; i < vf_sb_count(vf); i++)
  274. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  275. resc->hw_sbs[i].hw_sb_id,
  276. resc->hw_sbs[i].sb_qid);
  277. DP_CONT(BNX2X_MSG_IOV, "]\n");
  278. /* send the response */
  279. vf->op_rc = vfop_status;
  280. bnx2x_vf_mbx_resp(bp, vf);
  281. }
  282. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  283. struct bnx2x_vf_mbx *mbx)
  284. {
  285. int rc;
  286. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  287. /* log vfdef info */
  288. DP(BNX2X_MSG_IOV,
  289. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  290. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  291. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  292. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  293. acquire->resc_request.num_vlan_filters,
  294. acquire->resc_request.num_mc_filters);
  295. /* acquire the resources */
  296. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  297. /* response */
  298. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  299. }
  300. /* dispatch request */
  301. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  302. struct bnx2x_vf_mbx *mbx)
  303. {
  304. int i;
  305. /* check if tlv type is known */
  306. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  307. /* Lock the per vf op mutex and note the locker's identity.
  308. * The unlock will take place in mbx response.
  309. */
  310. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  311. /* switch on the opcode */
  312. switch (mbx->first_tlv.tl.type) {
  313. case CHANNEL_TLV_ACQUIRE:
  314. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  315. break;
  316. }
  317. } else {
  318. /* unknown TLV - this may belong to a VF driver from the future
  319. * - a version written after this PF driver was written, which
  320. * supports features unknown as of yet. Too bad since we don't
  321. * support them. Or this may be because someone wrote a crappy
  322. * VF driver and is sending garbage over the channel.
  323. */
  324. BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
  325. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
  326. for (i = 0; i < 20; i++)
  327. DP_CONT(BNX2X_MSG_IOV, "%x ",
  328. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  329. /* test whether we can respond to the VF (do we have an address
  330. * for it?)
  331. */
  332. if (vf->state == VF_ACQUIRED) {
  333. /* mbx_resp uses the op_rc of the VF */
  334. vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
  335. /* notify the VF that we do not support this request */
  336. bnx2x_vf_mbx_resp(bp, vf);
  337. } else {
  338. /* can't send a response since this VF is unknown to us
  339. * just unlock the channel and be done with.
  340. */
  341. bnx2x_unlock_vf_pf_channel(bp, vf,
  342. mbx->first_tlv.tl.type);
  343. }
  344. }
  345. }
  346. /* handle new vf-pf message */
  347. void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
  348. {
  349. struct bnx2x_virtf *vf;
  350. struct bnx2x_vf_mbx *mbx;
  351. u8 vf_idx;
  352. int rc;
  353. DP(BNX2X_MSG_IOV,
  354. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  355. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  356. /* Sanity checks consider removing later */
  357. /* check if the vf_id is valid */
  358. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  359. BNX2X_NR_VIRTFN(bp)) {
  360. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  361. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  362. goto mbx_done;
  363. }
  364. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  365. mbx = BP_VF_MBX(bp, vf_idx);
  366. /* verify an event is not currently being processed -
  367. * debug failsafe only
  368. */
  369. if (mbx->flags & VF_MSG_INPROCESS) {
  370. BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
  371. vfpf_event->vf_id);
  372. goto mbx_done;
  373. }
  374. vf = BP_VF(bp, vf_idx);
  375. /* save the VF message address */
  376. mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
  377. mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
  378. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  379. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  380. /* dmae to get the VF request */
  381. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
  382. mbx->vf_addr_hi, mbx->vf_addr_lo,
  383. sizeof(union vfpf_tlvs)/4);
  384. if (rc) {
  385. BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
  386. goto mbx_error;
  387. }
  388. /* process the VF message header */
  389. mbx->first_tlv = mbx->msg->req.first_tlv;
  390. /* dispatch the request (will prepare the response) */
  391. bnx2x_vf_mbx_request(bp, vf, mbx);
  392. goto mbx_done;
  393. mbx_error:
  394. mbx_done:
  395. return;
  396. }