bnx2x_vfpf.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913
  1. /* bnx2x_vfpf.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2013 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. */
  19. #include "bnx2x.h"
  20. #include "bnx2x_cmn.h"
  21. #include <linux/crc32.h>
  22. /* place a given tlv on the tlv buffer at a given offset */
  23. void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
  24. u16 length)
  25. {
  26. struct channel_tlv *tl =
  27. (struct channel_tlv *)(tlvs_list + offset);
  28. tl->type = type;
  29. tl->length = length;
  30. }
  31. /* Clear the mailbox and init the header of the first tlv */
  32. void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  33. u16 type, u16 length)
  34. {
  35. mutex_lock(&bp->vf2pf_mutex);
  36. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  37. type);
  38. /* Clear mailbox */
  39. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  40. /* init type and length */
  41. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  42. /* init first tlv header */
  43. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  44. }
  45. /* releases the mailbox */
  46. void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
  47. {
  48. DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
  49. first_tlv->tl.type);
  50. mutex_unlock(&bp->vf2pf_mutex);
  51. }
  52. /* list the types and lengths of the tlvs on the buffer */
  53. void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  54. {
  55. int i = 1;
  56. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  57. while (tlv->type != CHANNEL_TLV_LIST_END) {
  58. /* output tlv */
  59. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  60. tlv->type, tlv->length);
  61. /* advance to next tlv */
  62. tlvs_list += tlv->length;
  63. /* cast general tlv list pointer to channel tlv header*/
  64. tlv = (struct channel_tlv *)tlvs_list;
  65. i++;
  66. /* break condition for this loop */
  67. if (i > MAX_TLVS_IN_LIST) {
  68. WARN(true, "corrupt tlvs");
  69. return;
  70. }
  71. }
  72. /* output last tlv */
  73. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  74. tlv->type, tlv->length);
  75. }
  76. /* test whether we support a tlv type */
  77. bool bnx2x_tlv_supported(u16 tlvtype)
  78. {
  79. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  80. }
  81. static inline int bnx2x_pfvf_status_codes(int rc)
  82. {
  83. switch (rc) {
  84. case 0:
  85. return PFVF_STATUS_SUCCESS;
  86. case -ENOMEM:
  87. return PFVF_STATUS_NO_RESOURCE;
  88. default:
  89. return PFVF_STATUS_FAILURE;
  90. }
  91. }
  92. static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
  93. {
  94. struct cstorm_vf_zone_data __iomem *zone_data =
  95. REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
  96. int tout = 100, interval = 100; /* wait for 10 seconds */
  97. if (*done) {
  98. BNX2X_ERR("done was non zero before message to pf was sent\n");
  99. WARN_ON(true);
  100. return -EINVAL;
  101. }
  102. /* if PF indicated channel is down avoid sending message. Return success
  103. * so calling flow can continue
  104. */
  105. bnx2x_sample_bulletin(bp);
  106. if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
  107. DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
  108. *done = PFVF_STATUS_SUCCESS;
  109. return 0;
  110. }
  111. /* Write message address */
  112. writel(U64_LO(msg_mapping),
  113. &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
  114. writel(U64_HI(msg_mapping),
  115. &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
  116. /* make sure the address is written before FW accesses it */
  117. wmb();
  118. /* Trigger the PF FW */
  119. writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
  120. /* Wait for PF to complete */
  121. while ((tout >= 0) && (!*done)) {
  122. msleep(interval);
  123. tout -= 1;
  124. /* progress indicator - HV can take its own sweet time in
  125. * answering VFs...
  126. */
  127. DP_CONT(BNX2X_MSG_IOV, ".");
  128. }
  129. if (!*done) {
  130. BNX2X_ERR("PF response has timed out\n");
  131. return -EAGAIN;
  132. }
  133. DP(BNX2X_MSG_SP, "Got a response from PF\n");
  134. return 0;
  135. }
  136. static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
  137. {
  138. u32 me_reg;
  139. int tout = 10, interval = 100; /* Wait for 1 sec */
  140. do {
  141. /* pxp traps vf read of doorbells and returns me reg value */
  142. me_reg = readl(bp->doorbells);
  143. if (GOOD_ME_REG(me_reg))
  144. break;
  145. msleep(interval);
  146. BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
  147. me_reg);
  148. } while (tout-- > 0);
  149. if (!GOOD_ME_REG(me_reg)) {
  150. BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
  151. return -EINVAL;
  152. }
  153. BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
  154. *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  155. return 0;
  156. }
  157. int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
  158. {
  159. int rc = 0, attempts = 0;
  160. struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
  161. struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
  162. u32 vf_id;
  163. bool resources_acquired = false;
  164. /* clear mailbox and prep first tlv */
  165. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  166. if (bnx2x_get_vf_id(bp, &vf_id)) {
  167. rc = -EAGAIN;
  168. goto out;
  169. }
  170. req->vfdev_info.vf_id = vf_id;
  171. req->vfdev_info.vf_os = 0;
  172. req->resc_request.num_rxqs = rx_count;
  173. req->resc_request.num_txqs = tx_count;
  174. req->resc_request.num_sbs = bp->igu_sb_cnt;
  175. req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
  176. req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
  177. /* pf 2 vf bulletin board address */
  178. req->bulletin_addr = bp->pf2vf_bulletin_mapping;
  179. /* add list termination tlv */
  180. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  181. sizeof(struct channel_list_end_tlv));
  182. /* output tlvs list */
  183. bnx2x_dp_tlv_list(bp, req);
  184. while (!resources_acquired) {
  185. DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
  186. /* send acquire request */
  187. rc = bnx2x_send_msg2pf(bp,
  188. &resp->hdr.status,
  189. bp->vf2pf_mbox_mapping);
  190. /* PF timeout */
  191. if (rc)
  192. goto out;
  193. /* copy acquire response from buffer to bp */
  194. memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
  195. attempts++;
  196. /* test whether the PF accepted our request. If not, humble
  197. * the request and try again.
  198. */
  199. if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
  200. DP(BNX2X_MSG_SP, "resources acquired\n");
  201. resources_acquired = true;
  202. } else if (bp->acquire_resp.hdr.status ==
  203. PFVF_STATUS_NO_RESOURCE &&
  204. attempts < VF_ACQUIRE_THRESH) {
  205. DP(BNX2X_MSG_SP,
  206. "PF unwilling to fulfill resource request. Try PF recommended amount\n");
  207. /* humble our request */
  208. req->resc_request.num_txqs =
  209. min(req->resc_request.num_txqs,
  210. bp->acquire_resp.resc.num_txqs);
  211. req->resc_request.num_rxqs =
  212. min(req->resc_request.num_rxqs,
  213. bp->acquire_resp.resc.num_rxqs);
  214. req->resc_request.num_sbs =
  215. min(req->resc_request.num_sbs,
  216. bp->acquire_resp.resc.num_sbs);
  217. req->resc_request.num_mac_filters =
  218. min(req->resc_request.num_mac_filters,
  219. bp->acquire_resp.resc.num_mac_filters);
  220. req->resc_request.num_vlan_filters =
  221. min(req->resc_request.num_vlan_filters,
  222. bp->acquire_resp.resc.num_vlan_filters);
  223. req->resc_request.num_mc_filters =
  224. min(req->resc_request.num_mc_filters,
  225. bp->acquire_resp.resc.num_mc_filters);
  226. /* Clear response buffer */
  227. memset(&bp->vf2pf_mbox->resp, 0,
  228. sizeof(union pfvf_tlvs));
  229. } else {
  230. /* PF reports error */
  231. BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
  232. bp->acquire_resp.hdr.status);
  233. rc = -EAGAIN;
  234. goto out;
  235. }
  236. }
  237. /* get HW info */
  238. bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
  239. bp->link_params.chip_id = bp->common.chip_id;
  240. bp->db_size = bp->acquire_resp.pfdev_info.db_size;
  241. bp->common.int_block = INT_BLOCK_IGU;
  242. bp->common.chip_port_mode = CHIP_2_PORT_MODE;
  243. bp->igu_dsb_id = -1;
  244. bp->mf_ov = 0;
  245. bp->mf_mode = 0;
  246. bp->common.flash_size = 0;
  247. bp->flags |=
  248. NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
  249. bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
  250. bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
  251. strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
  252. sizeof(bp->fw_ver));
  253. if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
  254. memcpy(bp->dev->dev_addr,
  255. bp->acquire_resp.resc.current_mac_addr,
  256. ETH_ALEN);
  257. out:
  258. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  259. return rc;
  260. }
  261. int bnx2x_vfpf_release(struct bnx2x *bp)
  262. {
  263. struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
  264. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  265. u32 rc, vf_id;
  266. /* clear mailbox and prep first tlv */
  267. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
  268. if (bnx2x_get_vf_id(bp, &vf_id)) {
  269. rc = -EAGAIN;
  270. goto out;
  271. }
  272. req->vf_id = vf_id;
  273. /* add list termination tlv */
  274. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  275. sizeof(struct channel_list_end_tlv));
  276. /* output tlvs list */
  277. bnx2x_dp_tlv_list(bp, req);
  278. /* send release request */
  279. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  280. if (rc)
  281. /* PF timeout */
  282. goto out;
  283. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  284. /* PF released us */
  285. DP(BNX2X_MSG_SP, "vf released\n");
  286. } else {
  287. /* PF reports error */
  288. BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
  289. resp->hdr.status);
  290. rc = -EAGAIN;
  291. goto out;
  292. }
  293. out:
  294. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  295. return rc;
  296. }
  297. /* Tell PF about SB addresses */
  298. int bnx2x_vfpf_init(struct bnx2x *bp)
  299. {
  300. struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
  301. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  302. int rc, i;
  303. /* clear mailbox and prep first tlv */
  304. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
  305. /* status blocks */
  306. for_each_eth_queue(bp, i)
  307. req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
  308. status_blk_mapping);
  309. /* statistics - requests only supports single queue for now */
  310. req->stats_addr = bp->fw_stats_data_mapping +
  311. offsetof(struct bnx2x_fw_stats_data, queue_stats);
  312. req->stats_stride = sizeof(struct per_queue_stats);
  313. /* add list termination tlv */
  314. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  315. sizeof(struct channel_list_end_tlv));
  316. /* output tlvs list */
  317. bnx2x_dp_tlv_list(bp, req);
  318. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  319. if (rc)
  320. goto out;
  321. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  322. BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
  323. resp->hdr.status);
  324. rc = -EAGAIN;
  325. goto out;
  326. }
  327. DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
  328. out:
  329. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  330. return rc;
  331. }
  332. /* CLOSE VF - opposite to INIT_VF */
  333. void bnx2x_vfpf_close_vf(struct bnx2x *bp)
  334. {
  335. struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
  336. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  337. int i, rc;
  338. u32 vf_id;
  339. /* If we haven't got a valid VF id, there is no sense to
  340. * continue with sending messages
  341. */
  342. if (bnx2x_get_vf_id(bp, &vf_id))
  343. goto free_irq;
  344. /* Close the queues */
  345. for_each_queue(bp, i)
  346. bnx2x_vfpf_teardown_queue(bp, i);
  347. /* remove mac */
  348. bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
  349. /* clear mailbox and prep first tlv */
  350. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
  351. req->vf_id = vf_id;
  352. /* add list termination tlv */
  353. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  354. sizeof(struct channel_list_end_tlv));
  355. /* output tlvs list */
  356. bnx2x_dp_tlv_list(bp, req);
  357. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  358. if (rc)
  359. BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
  360. else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  361. BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
  362. resp->hdr.status);
  363. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  364. free_irq:
  365. /* Disable HW interrupts, NAPI */
  366. bnx2x_netif_stop(bp, 0);
  367. /* Delete all NAPI objects */
  368. bnx2x_del_all_napi(bp);
  369. /* Release IRQs */
  370. bnx2x_free_irq(bp);
  371. }
  372. static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
  373. struct bnx2x_vf_queue *q)
  374. {
  375. u8 cl_id = vfq_cl_id(vf, q);
  376. u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
  377. /* mac */
  378. bnx2x_init_mac_obj(bp, &q->mac_obj,
  379. cl_id, q->cid, func_id,
  380. bnx2x_vf_sp(bp, vf, mac_rdata),
  381. bnx2x_vf_sp_map(bp, vf, mac_rdata),
  382. BNX2X_FILTER_MAC_PENDING,
  383. &vf->filter_state,
  384. BNX2X_OBJ_TYPE_RX_TX,
  385. &bp->macs_pool);
  386. /* vlan */
  387. bnx2x_init_vlan_obj(bp, &q->vlan_obj,
  388. cl_id, q->cid, func_id,
  389. bnx2x_vf_sp(bp, vf, vlan_rdata),
  390. bnx2x_vf_sp_map(bp, vf, vlan_rdata),
  391. BNX2X_FILTER_VLAN_PENDING,
  392. &vf->filter_state,
  393. BNX2X_OBJ_TYPE_RX_TX,
  394. &bp->vlans_pool);
  395. /* mcast */
  396. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
  397. q->cid, func_id, func_id,
  398. bnx2x_vf_sp(bp, vf, mcast_rdata),
  399. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  400. BNX2X_FILTER_MCAST_PENDING,
  401. &vf->filter_state,
  402. BNX2X_OBJ_TYPE_RX_TX);
  403. /* rss */
  404. bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
  405. func_id, func_id,
  406. bnx2x_vf_sp(bp, vf, rss_rdata),
  407. bnx2x_vf_sp_map(bp, vf, rss_rdata),
  408. BNX2X_FILTER_RSS_CONF_PENDING,
  409. &vf->filter_state,
  410. BNX2X_OBJ_TYPE_RX_TX);
  411. vf->leading_rss = cl_id;
  412. q->is_leading = true;
  413. }
  414. /* ask the pf to open a queue for the vf */
  415. int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  416. bool is_leading)
  417. {
  418. struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
  419. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  420. u8 fp_idx = fp->index;
  421. u16 tpa_agg_size = 0, flags = 0;
  422. int rc;
  423. /* clear mailbox and prep first tlv */
  424. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
  425. /* select tpa mode to request */
  426. if (!fp->disable_tpa) {
  427. flags |= VFPF_QUEUE_FLG_TPA;
  428. flags |= VFPF_QUEUE_FLG_TPA_IPV6;
  429. if (fp->mode == TPA_MODE_GRO)
  430. flags |= VFPF_QUEUE_FLG_TPA_GRO;
  431. tpa_agg_size = TPA_AGG_SIZE;
  432. }
  433. if (is_leading)
  434. flags |= VFPF_QUEUE_FLG_LEADING_RSS;
  435. /* calculate queue flags */
  436. flags |= VFPF_QUEUE_FLG_STATS;
  437. flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
  438. flags |= VFPF_QUEUE_FLG_VLAN;
  439. DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
  440. /* Common */
  441. req->vf_qid = fp_idx;
  442. req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
  443. /* Rx */
  444. req->rxq.rcq_addr = fp->rx_comp_mapping;
  445. req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  446. req->rxq.rxq_addr = fp->rx_desc_mapping;
  447. req->rxq.sge_addr = fp->rx_sge_mapping;
  448. req->rxq.vf_sb = fp_idx;
  449. req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
  450. req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
  451. req->rxq.mtu = bp->dev->mtu;
  452. req->rxq.buf_sz = fp->rx_buf_size;
  453. req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
  454. req->rxq.tpa_agg_sz = tpa_agg_size;
  455. req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
  456. req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
  457. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  458. req->rxq.flags = flags;
  459. req->rxq.drop_flags = 0;
  460. req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  461. req->rxq.stat_id = -1; /* No stats at the moment */
  462. /* Tx */
  463. req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
  464. req->txq.vf_sb = fp_idx;
  465. req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
  466. req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
  467. req->txq.flags = flags;
  468. req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
  469. /* add list termination tlv */
  470. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  471. sizeof(struct channel_list_end_tlv));
  472. /* output tlvs list */
  473. bnx2x_dp_tlv_list(bp, req);
  474. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  475. if (rc)
  476. BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
  477. fp_idx);
  478. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  479. BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
  480. fp_idx, resp->hdr.status);
  481. rc = -EINVAL;
  482. }
  483. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  484. return rc;
  485. }
  486. int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
  487. {
  488. struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
  489. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  490. int rc;
  491. /* clear mailbox and prep first tlv */
  492. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
  493. sizeof(*req));
  494. req->vf_qid = qidx;
  495. /* add list termination tlv */
  496. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  497. sizeof(struct channel_list_end_tlv));
  498. /* output tlvs list */
  499. bnx2x_dp_tlv_list(bp, req);
  500. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  501. if (rc) {
  502. BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
  503. rc);
  504. goto out;
  505. }
  506. /* PF failed the transaction */
  507. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  508. BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
  509. resp->hdr.status);
  510. rc = -EINVAL;
  511. }
  512. out:
  513. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  514. return rc;
  515. }
  516. /* request pf to add a mac for the vf */
  517. int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
  518. {
  519. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  520. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  521. struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
  522. int rc = 0;
  523. /* clear mailbox and prep first tlv */
  524. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  525. sizeof(*req));
  526. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  527. req->vf_qid = vf_qid;
  528. req->n_mac_vlan_filters = 1;
  529. req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
  530. if (set)
  531. req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
  532. /* sample bulletin board for new mac */
  533. bnx2x_sample_bulletin(bp);
  534. /* copy mac from device to request */
  535. memcpy(req->filters[0].mac, addr, ETH_ALEN);
  536. /* add list termination tlv */
  537. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  538. sizeof(struct channel_list_end_tlv));
  539. /* output tlvs list */
  540. bnx2x_dp_tlv_list(bp, req);
  541. /* send message to pf */
  542. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  543. if (rc) {
  544. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  545. goto out;
  546. }
  547. /* failure may mean PF was configured with a new mac for us */
  548. while (resp->hdr.status == PFVF_STATUS_FAILURE) {
  549. DP(BNX2X_MSG_IOV,
  550. "vfpf SET MAC failed. Check bulletin board for new posts\n");
  551. /* copy mac from bulletin to device */
  552. memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
  553. /* check if bulletin board was updated */
  554. if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
  555. /* copy mac from device to request */
  556. memcpy(req->filters[0].mac, bp->dev->dev_addr,
  557. ETH_ALEN);
  558. /* send message to pf */
  559. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
  560. bp->vf2pf_mbox_mapping);
  561. } else {
  562. /* no new info in bulletin */
  563. break;
  564. }
  565. }
  566. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  567. BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
  568. rc = -EINVAL;
  569. }
  570. out:
  571. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  572. return 0;
  573. }
  574. /* request pf to config rss table for vf queues*/
  575. int bnx2x_vfpf_config_rss(struct bnx2x *bp,
  576. struct bnx2x_config_rss_params *params)
  577. {
  578. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  579. struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
  580. int rc = 0;
  581. /* clear mailbox and prep first tlv */
  582. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
  583. sizeof(*req));
  584. /* add list termination tlv */
  585. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  586. sizeof(struct channel_list_end_tlv));
  587. memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
  588. memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
  589. req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
  590. req->rss_key_size = T_ETH_RSS_KEY;
  591. req->rss_result_mask = params->rss_result_mask;
  592. /* flags handled individually for backward/forward compatability */
  593. if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
  594. req->rss_flags |= VFPF_RSS_MODE_DISABLED;
  595. if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
  596. req->rss_flags |= VFPF_RSS_MODE_REGULAR;
  597. if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
  598. req->rss_flags |= VFPF_RSS_SET_SRCH;
  599. if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
  600. req->rss_flags |= VFPF_RSS_IPV4;
  601. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
  602. req->rss_flags |= VFPF_RSS_IPV4_TCP;
  603. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
  604. req->rss_flags |= VFPF_RSS_IPV4_UDP;
  605. if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
  606. req->rss_flags |= VFPF_RSS_IPV6;
  607. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
  608. req->rss_flags |= VFPF_RSS_IPV6_TCP;
  609. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
  610. req->rss_flags |= VFPF_RSS_IPV6_UDP;
  611. DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
  612. /* output tlvs list */
  613. bnx2x_dp_tlv_list(bp, req);
  614. /* send message to pf */
  615. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  616. if (rc) {
  617. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  618. goto out;
  619. }
  620. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  621. BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
  622. resp->hdr.status);
  623. rc = -EINVAL;
  624. }
  625. out:
  626. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  627. return 0;
  628. }
  629. int bnx2x_vfpf_set_mcast(struct net_device *dev)
  630. {
  631. struct bnx2x *bp = netdev_priv(dev);
  632. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  633. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  634. int rc, i = 0;
  635. struct netdev_hw_addr *ha;
  636. if (bp->state != BNX2X_STATE_OPEN) {
  637. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  638. return -EINVAL;
  639. }
  640. /* clear mailbox and prep first tlv */
  641. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  642. sizeof(*req));
  643. /* Get Rx mode requested */
  644. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  645. netdev_for_each_mc_addr(ha, dev) {
  646. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  647. bnx2x_mc_addr(ha));
  648. memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
  649. i++;
  650. }
  651. /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
  652. * addresses tops
  653. */
  654. if (i >= PFVF_MAX_MULTICAST_PER_VF) {
  655. DP(NETIF_MSG_IFUP,
  656. "VF supports not more than %d multicast MAC addresses\n",
  657. PFVF_MAX_MULTICAST_PER_VF);
  658. return -EINVAL;
  659. }
  660. req->n_multicast = i;
  661. req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
  662. req->vf_qid = 0;
  663. /* add list termination tlv */
  664. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  665. sizeof(struct channel_list_end_tlv));
  666. /* output tlvs list */
  667. bnx2x_dp_tlv_list(bp, req);
  668. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  669. if (rc) {
  670. BNX2X_ERR("Sending a message failed: %d\n", rc);
  671. goto out;
  672. }
  673. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  674. BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
  675. resp->hdr.status);
  676. rc = -EINVAL;
  677. }
  678. out:
  679. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  680. return 0;
  681. }
  682. int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
  683. {
  684. int mode = bp->rx_mode;
  685. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  686. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  687. int rc;
  688. /* clear mailbox and prep first tlv */
  689. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  690. sizeof(*req));
  691. DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
  692. switch (mode) {
  693. case BNX2X_RX_MODE_NONE: /* no Rx */
  694. req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
  695. break;
  696. case BNX2X_RX_MODE_NORMAL:
  697. req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
  698. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  699. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  700. break;
  701. case BNX2X_RX_MODE_ALLMULTI:
  702. req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
  703. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  704. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  705. break;
  706. case BNX2X_RX_MODE_PROMISC:
  707. req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
  708. req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
  709. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  710. break;
  711. default:
  712. BNX2X_ERR("BAD rx mode (%d)\n", mode);
  713. rc = -EINVAL;
  714. goto out;
  715. }
  716. req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
  717. req->vf_qid = 0;
  718. /* add list termination tlv */
  719. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  720. sizeof(struct channel_list_end_tlv));
  721. /* output tlvs list */
  722. bnx2x_dp_tlv_list(bp, req);
  723. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  724. if (rc)
  725. BNX2X_ERR("Sending a message failed: %d\n", rc);
  726. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  727. BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
  728. rc = -EINVAL;
  729. }
  730. out:
  731. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  732. return rc;
  733. }
  734. /* General service functions */
  735. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  736. {
  737. u32 addr = BAR_CSTRORM_INTMEM +
  738. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  739. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  740. }
  741. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  742. {
  743. u32 addr = BAR_CSTRORM_INTMEM +
  744. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  745. REG_WR8(bp, addr, 1);
  746. }
  747. static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
  748. {
  749. int i;
  750. for_each_vf(bp, i)
  751. storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
  752. }
  753. /* enable vf_pf mailbox (aka vf-pf-channel) */
  754. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  755. {
  756. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  757. /* enable the mailbox in the FW */
  758. storm_memset_vf_mbx_ack(bp, abs_vfid);
  759. storm_memset_vf_mbx_valid(bp, abs_vfid);
  760. /* enable the VF access to the mailbox */
  761. bnx2x_vf_enable_access(bp, abs_vfid);
  762. }
  763. /* this works only on !E1h */
  764. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  765. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  766. u32 vf_addr_lo, u32 len32)
  767. {
  768. struct dmae_command dmae;
  769. if (CHIP_IS_E1x(bp)) {
  770. BNX2X_ERR("Chip revision does not support VFs\n");
  771. return DMAE_NOT_RDY;
  772. }
  773. if (!bp->dmae_ready) {
  774. BNX2X_ERR("DMAE is not ready, can not copy\n");
  775. return DMAE_NOT_RDY;
  776. }
  777. /* set opcode and fixed command fields */
  778. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  779. if (from_vf) {
  780. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  781. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  782. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  783. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  784. dmae.src_addr_lo = vf_addr_lo;
  785. dmae.src_addr_hi = vf_addr_hi;
  786. dmae.dst_addr_lo = U64_LO(pf_addr);
  787. dmae.dst_addr_hi = U64_HI(pf_addr);
  788. } else {
  789. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  790. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  791. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  792. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  793. dmae.src_addr_lo = U64_LO(pf_addr);
  794. dmae.src_addr_hi = U64_HI(pf_addr);
  795. dmae.dst_addr_lo = vf_addr_lo;
  796. dmae.dst_addr_hi = vf_addr_hi;
  797. }
  798. dmae.len = len32;
  799. /* issue the command and wait for completion */
  800. return bnx2x_issue_dmae_with_comp(bp, &dmae);
  801. }
  802. static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
  803. {
  804. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  805. u64 vf_addr;
  806. dma_addr_t pf_addr;
  807. u16 length, type;
  808. int rc;
  809. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  810. /* prepare response */
  811. type = mbx->first_tlv.tl.type;
  812. length = type == CHANNEL_TLV_ACQUIRE ?
  813. sizeof(struct pfvf_acquire_resp_tlv) :
  814. sizeof(struct pfvf_general_resp_tlv);
  815. bnx2x_add_tlv(bp, resp, 0, type, length);
  816. resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
  817. bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
  818. sizeof(struct channel_list_end_tlv));
  819. bnx2x_dp_tlv_list(bp, resp);
  820. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  821. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  822. /* send response */
  823. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  824. mbx->first_tlv.resp_msg_offset;
  825. pf_addr = mbx->msg_mapping +
  826. offsetof(struct bnx2x_vf_mbx_msg, resp);
  827. /* copy the response body, if there is one, before the header, as the vf
  828. * is sensitive to the header being written
  829. */
  830. if (resp->hdr.tl.length > sizeof(u64)) {
  831. length = resp->hdr.tl.length - sizeof(u64);
  832. vf_addr += sizeof(u64);
  833. pf_addr += sizeof(u64);
  834. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  835. U64_HI(vf_addr),
  836. U64_LO(vf_addr),
  837. length/4);
  838. if (rc) {
  839. BNX2X_ERR("Failed to copy response body to VF %d\n",
  840. vf->abs_vfid);
  841. goto mbx_error;
  842. }
  843. vf_addr -= sizeof(u64);
  844. pf_addr -= sizeof(u64);
  845. }
  846. /* ack the FW */
  847. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  848. mmiowb();
  849. /* initiate dmae to send the response */
  850. mbx->flags &= ~VF_MSG_INPROCESS;
  851. /* copy the response header including status-done field,
  852. * must be last dmae, must be after FW is acked
  853. */
  854. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  855. U64_HI(vf_addr),
  856. U64_LO(vf_addr),
  857. sizeof(u64)/4);
  858. /* unlock channel mutex */
  859. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  860. if (rc) {
  861. BNX2X_ERR("Failed to copy response status to VF %d\n",
  862. vf->abs_vfid);
  863. goto mbx_error;
  864. }
  865. return;
  866. mbx_error:
  867. bnx2x_vf_release(bp, vf, false); /* non blocking */
  868. }
  869. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  870. struct bnx2x_vf_mbx *mbx, int vfop_status)
  871. {
  872. int i;
  873. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  874. struct pf_vf_resc *resc = &resp->resc;
  875. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  876. memset(resp, 0, sizeof(*resp));
  877. /* fill in pfdev info */
  878. resp->pfdev_info.chip_num = bp->common.chip_id;
  879. resp->pfdev_info.db_size = bp->db_size;
  880. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  881. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  882. /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
  883. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  884. sizeof(resp->pfdev_info.fw_ver));
  885. if (status == PFVF_STATUS_NO_RESOURCE ||
  886. status == PFVF_STATUS_SUCCESS) {
  887. /* set resources numbers, if status equals NO_RESOURCE these
  888. * are max possible numbers
  889. */
  890. resc->num_rxqs = vf_rxq_count(vf) ? :
  891. bnx2x_vf_max_queue_cnt(bp, vf);
  892. resc->num_txqs = vf_txq_count(vf) ? :
  893. bnx2x_vf_max_queue_cnt(bp, vf);
  894. resc->num_sbs = vf_sb_count(vf);
  895. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  896. resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
  897. resc->num_mc_filters = 0;
  898. if (status == PFVF_STATUS_SUCCESS) {
  899. /* fill in the allocated resources */
  900. struct pf_vf_bulletin_content *bulletin =
  901. BP_VF_BULLETIN(bp, vf->index);
  902. for_each_vfq(vf, i)
  903. resc->hw_qid[i] =
  904. vfq_qzone_id(vf, vfq_get(vf, i));
  905. for_each_vf_sb(vf, i) {
  906. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  907. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  908. }
  909. /* if a mac has been set for this vf, supply it */
  910. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  911. memcpy(resc->current_mac_addr, bulletin->mac,
  912. ETH_ALEN);
  913. }
  914. }
  915. }
  916. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  917. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  918. vf->abs_vfid,
  919. resp->pfdev_info.chip_num,
  920. resp->pfdev_info.db_size,
  921. resp->pfdev_info.indices_per_sb,
  922. resp->pfdev_info.pf_cap,
  923. resc->num_rxqs,
  924. resc->num_txqs,
  925. resc->num_sbs,
  926. resc->num_mac_filters,
  927. resc->num_vlan_filters,
  928. resc->num_mc_filters,
  929. resp->pfdev_info.fw_ver);
  930. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  931. for (i = 0; i < vf_rxq_count(vf); i++)
  932. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  933. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  934. for (i = 0; i < vf_sb_count(vf); i++)
  935. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  936. resc->hw_sbs[i].hw_sb_id,
  937. resc->hw_sbs[i].sb_qid);
  938. DP_CONT(BNX2X_MSG_IOV, "]\n");
  939. /* send the response */
  940. vf->op_rc = vfop_status;
  941. bnx2x_vf_mbx_resp(bp, vf);
  942. }
  943. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  944. struct bnx2x_vf_mbx *mbx)
  945. {
  946. int rc;
  947. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  948. /* log vfdef info */
  949. DP(BNX2X_MSG_IOV,
  950. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  951. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  952. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  953. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  954. acquire->resc_request.num_vlan_filters,
  955. acquire->resc_request.num_mc_filters);
  956. /* acquire the resources */
  957. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  958. /* store address of vf's bulletin board */
  959. vf->bulletin_map = acquire->bulletin_addr;
  960. /* response */
  961. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  962. }
  963. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  964. struct bnx2x_vf_mbx *mbx)
  965. {
  966. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  967. /* record ghost addresses from vf message */
  968. vf->spq_map = init->spq_addr;
  969. vf->fw_stat_map = init->stats_addr;
  970. vf->stats_stride = init->stats_stride;
  971. vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  972. /* set VF multiqueue statistics collection mode */
  973. if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
  974. vf->cfg_flags |= VF_CFG_STATS_COALESCE;
  975. /* response */
  976. bnx2x_vf_mbx_resp(bp, vf);
  977. }
  978. /* convert MBX queue-flags to standard SP queue-flags */
  979. static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
  980. unsigned long *sp_q_flags)
  981. {
  982. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  983. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  984. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  985. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  986. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  987. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  988. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  989. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  990. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  991. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  992. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  993. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  994. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  995. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  996. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  997. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  998. if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
  999. __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
  1000. /* outer vlan removal is set according to PF's multi function mode */
  1001. if (IS_MF_SD(bp))
  1002. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  1003. }
  1004. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1005. struct bnx2x_vf_mbx *mbx)
  1006. {
  1007. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  1008. struct bnx2x_vfop_cmd cmd = {
  1009. .done = bnx2x_vf_mbx_resp,
  1010. .block = false,
  1011. };
  1012. /* verify vf_qid */
  1013. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  1014. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  1015. setup_q->vf_qid, vf_rxq_count(vf));
  1016. vf->op_rc = -EINVAL;
  1017. goto response;
  1018. }
  1019. /* tx queues must be setup alongside rx queues thus if the rx queue
  1020. * is not marked as valid there's nothing to do.
  1021. */
  1022. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  1023. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  1024. unsigned long q_type = 0;
  1025. struct bnx2x_queue_init_params *init_p;
  1026. struct bnx2x_queue_setup_params *setup_p;
  1027. if (bnx2x_vfq_is_leading(q))
  1028. bnx2x_leading_vfq_init(bp, vf, q);
  1029. /* re-init the VF operation context */
  1030. memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
  1031. setup_p = &vf->op_params.qctor.prep_qsetup;
  1032. init_p = &vf->op_params.qctor.qstate.params.init;
  1033. /* activate immediately */
  1034. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  1035. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  1036. struct bnx2x_txq_setup_params *txq_params =
  1037. &setup_p->txq_params;
  1038. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  1039. /* save sb resource index */
  1040. q->sb_idx = setup_q->txq.vf_sb;
  1041. /* tx init */
  1042. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  1043. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  1044. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1045. &init_p->tx.flags);
  1046. /* tx setup - flags */
  1047. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1048. &setup_p->flags);
  1049. /* tx setup - general, nothing */
  1050. /* tx setup - tx */
  1051. txq_params->dscr_map = setup_q->txq.txq_addr;
  1052. txq_params->sb_cq_index = setup_q->txq.sb_index;
  1053. txq_params->traffic_type = setup_q->txq.traffic_type;
  1054. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  1055. q->index, q->sb_idx);
  1056. }
  1057. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  1058. struct bnx2x_rxq_setup_params *rxq_params =
  1059. &setup_p->rxq_params;
  1060. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  1061. /* Note: there is no support for different SBs
  1062. * for TX and RX
  1063. */
  1064. q->sb_idx = setup_q->rxq.vf_sb;
  1065. /* rx init */
  1066. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  1067. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  1068. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1069. &init_p->rx.flags);
  1070. /* rx setup - flags */
  1071. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1072. &setup_p->flags);
  1073. /* rx setup - general */
  1074. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  1075. /* rx setup - rx */
  1076. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  1077. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  1078. rxq_params->sge_map = setup_q->rxq.sge_addr;
  1079. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  1080. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  1081. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  1082. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  1083. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  1084. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  1085. rxq_params->cache_line_log =
  1086. setup_q->rxq.cache_line_log;
  1087. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  1088. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  1089. q->index, q->sb_idx);
  1090. }
  1091. /* complete the preparations */
  1092. bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
  1093. vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
  1094. if (vf->op_rc)
  1095. goto response;
  1096. return;
  1097. }
  1098. response:
  1099. bnx2x_vf_mbx_resp(bp, vf);
  1100. }
  1101. enum bnx2x_vfop_filters_state {
  1102. BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  1103. BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
  1104. BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
  1105. BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
  1106. BNX2X_VFOP_MBX_Q_FILTERS_DONE
  1107. };
  1108. static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
  1109. struct bnx2x_virtf *vf,
  1110. struct vfpf_set_q_filters_tlv *tlv,
  1111. struct bnx2x_vfop_filters **pfl,
  1112. u32 type_flag)
  1113. {
  1114. int i, j;
  1115. struct bnx2x_vfop_filters *fl = NULL;
  1116. size_t fsz;
  1117. fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
  1118. sizeof(struct bnx2x_vfop_filters);
  1119. fl = kzalloc(fsz, GFP_KERNEL);
  1120. if (!fl)
  1121. return -ENOMEM;
  1122. INIT_LIST_HEAD(&fl->head);
  1123. for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
  1124. struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
  1125. if ((msg_filter->flags & type_flag) != type_flag)
  1126. continue;
  1127. if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
  1128. fl->filters[j].mac = msg_filter->mac;
  1129. fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
  1130. } else {
  1131. fl->filters[j].vid = msg_filter->vlan_tag;
  1132. fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
  1133. }
  1134. fl->filters[j].add =
  1135. (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
  1136. true : false;
  1137. list_add_tail(&fl->filters[j++].link, &fl->head);
  1138. }
  1139. if (list_empty(&fl->head))
  1140. kfree(fl);
  1141. else
  1142. *pfl = fl;
  1143. return 0;
  1144. }
  1145. static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
  1146. struct vfpf_q_mac_vlan_filter *filter)
  1147. {
  1148. DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
  1149. if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
  1150. DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
  1151. if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
  1152. DP_CONT(msglvl, ", MAC=%pM", filter->mac);
  1153. DP_CONT(msglvl, "\n");
  1154. }
  1155. static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
  1156. struct vfpf_set_q_filters_tlv *filters)
  1157. {
  1158. int i;
  1159. if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
  1160. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1161. bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
  1162. &filters->filters[i]);
  1163. if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
  1164. DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
  1165. if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
  1166. for (i = 0; i < filters->n_multicast; i++)
  1167. DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
  1168. }
  1169. #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
  1170. #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
  1171. static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
  1172. {
  1173. int rc;
  1174. struct vfpf_set_q_filters_tlv *msg =
  1175. &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
  1176. struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
  1177. enum bnx2x_vfop_filters_state state = vfop->state;
  1178. struct bnx2x_vfop_cmd cmd = {
  1179. .done = bnx2x_vfop_mbx_qfilters,
  1180. .block = false,
  1181. };
  1182. DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
  1183. if (vfop->rc < 0)
  1184. goto op_err;
  1185. switch (state) {
  1186. case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
  1187. /* next state */
  1188. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
  1189. /* check for any vlan/mac changes */
  1190. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1191. /* build mac list */
  1192. struct bnx2x_vfop_filters *fl = NULL;
  1193. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1194. VFPF_MAC_FILTER);
  1195. if (vfop->rc)
  1196. goto op_err;
  1197. if (fl) {
  1198. /* set mac list */
  1199. rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
  1200. msg->vf_qid,
  1201. false);
  1202. if (rc) {
  1203. vfop->rc = rc;
  1204. goto op_err;
  1205. }
  1206. return;
  1207. }
  1208. }
  1209. /* fall through */
  1210. case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
  1211. /* next state */
  1212. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
  1213. /* check for any vlan/mac changes */
  1214. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1215. /* build vlan list */
  1216. struct bnx2x_vfop_filters *fl = NULL;
  1217. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1218. VFPF_VLAN_FILTER);
  1219. if (vfop->rc)
  1220. goto op_err;
  1221. if (fl) {
  1222. /* set vlan list */
  1223. rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
  1224. msg->vf_qid,
  1225. false);
  1226. if (rc) {
  1227. vfop->rc = rc;
  1228. goto op_err;
  1229. }
  1230. return;
  1231. }
  1232. }
  1233. /* fall through */
  1234. case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
  1235. /* next state */
  1236. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
  1237. if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  1238. unsigned long accept = 0;
  1239. /* covert VF-PF if mask to bnx2x accept flags */
  1240. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
  1241. __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
  1242. if (msg->rx_mask &
  1243. VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
  1244. __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
  1245. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
  1246. __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
  1247. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
  1248. __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
  1249. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
  1250. __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  1251. /* A packet arriving the vf's mac should be accepted
  1252. * with any vlan
  1253. */
  1254. __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  1255. /* set rx-mode */
  1256. rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
  1257. msg->vf_qid, accept);
  1258. if (rc) {
  1259. vfop->rc = rc;
  1260. goto op_err;
  1261. }
  1262. return;
  1263. }
  1264. /* fall through */
  1265. case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
  1266. /* next state */
  1267. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
  1268. if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
  1269. /* set mcasts */
  1270. rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
  1271. msg->n_multicast, false);
  1272. if (rc) {
  1273. vfop->rc = rc;
  1274. goto op_err;
  1275. }
  1276. return;
  1277. }
  1278. /* fall through */
  1279. op_done:
  1280. case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
  1281. bnx2x_vfop_end(bp, vf, vfop);
  1282. return;
  1283. op_err:
  1284. BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
  1285. vf->abs_vfid, msg->vf_qid, vfop->rc);
  1286. goto op_done;
  1287. default:
  1288. bnx2x_vfop_default(state);
  1289. }
  1290. }
  1291. static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
  1292. struct bnx2x_virtf *vf,
  1293. struct bnx2x_vfop_cmd *cmd)
  1294. {
  1295. struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
  1296. if (vfop) {
  1297. bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  1298. bnx2x_vfop_mbx_qfilters, cmd->done);
  1299. return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
  1300. cmd->block);
  1301. }
  1302. return -ENOMEM;
  1303. }
  1304. static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
  1305. struct bnx2x_virtf *vf,
  1306. struct bnx2x_vf_mbx *mbx)
  1307. {
  1308. struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
  1309. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1310. struct bnx2x_vfop_cmd cmd = {
  1311. .done = bnx2x_vf_mbx_resp,
  1312. .block = false,
  1313. };
  1314. /* if a mac was already set for this VF via the set vf mac ndo, we only
  1315. * accept mac configurations of that mac. Why accept them at all?
  1316. * because PF may have been unable to configure the mac at the time
  1317. * since queue was not set up.
  1318. */
  1319. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1320. /* once a mac was set by ndo can only accept a single mac... */
  1321. if (filters->n_mac_vlan_filters > 1) {
  1322. BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
  1323. vf->abs_vfid);
  1324. vf->op_rc = -EPERM;
  1325. goto response;
  1326. }
  1327. /* ...and only the mac set by the ndo */
  1328. if (filters->n_mac_vlan_filters == 1 &&
  1329. memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
  1330. BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  1331. vf->abs_vfid);
  1332. vf->op_rc = -EPERM;
  1333. goto response;
  1334. }
  1335. }
  1336. /* verify vf_qid */
  1337. if (filters->vf_qid > vf_rxq_count(vf))
  1338. goto response;
  1339. DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
  1340. vf->abs_vfid,
  1341. filters->vf_qid);
  1342. /* print q_filter message */
  1343. bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
  1344. vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
  1345. if (vf->op_rc)
  1346. goto response;
  1347. return;
  1348. response:
  1349. bnx2x_vf_mbx_resp(bp, vf);
  1350. }
  1351. static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1352. struct bnx2x_vf_mbx *mbx)
  1353. {
  1354. int qid = mbx->msg->req.q_op.vf_qid;
  1355. struct bnx2x_vfop_cmd cmd = {
  1356. .done = bnx2x_vf_mbx_resp,
  1357. .block = false,
  1358. };
  1359. DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
  1360. vf->abs_vfid, qid);
  1361. vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
  1362. if (vf->op_rc)
  1363. bnx2x_vf_mbx_resp(bp, vf);
  1364. }
  1365. static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1366. struct bnx2x_vf_mbx *mbx)
  1367. {
  1368. struct bnx2x_vfop_cmd cmd = {
  1369. .done = bnx2x_vf_mbx_resp,
  1370. .block = false,
  1371. };
  1372. DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
  1373. vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
  1374. if (vf->op_rc)
  1375. bnx2x_vf_mbx_resp(bp, vf);
  1376. }
  1377. static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1378. struct bnx2x_vf_mbx *mbx)
  1379. {
  1380. struct bnx2x_vfop_cmd cmd = {
  1381. .done = bnx2x_vf_mbx_resp,
  1382. .block = false,
  1383. };
  1384. DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
  1385. vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
  1386. if (vf->op_rc)
  1387. bnx2x_vf_mbx_resp(bp, vf);
  1388. }
  1389. static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1390. struct bnx2x_vf_mbx *mbx)
  1391. {
  1392. struct bnx2x_vfop_cmd cmd = {
  1393. .done = bnx2x_vf_mbx_resp,
  1394. .block = false,
  1395. };
  1396. struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
  1397. struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
  1398. if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
  1399. rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
  1400. BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
  1401. vf->index);
  1402. vf->op_rc = -EINVAL;
  1403. goto mbx_resp;
  1404. }
  1405. /* set vfop params according to rss tlv */
  1406. memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
  1407. T_ETH_INDIRECTION_TABLE_SIZE);
  1408. memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
  1409. sizeof(rss_tlv->rss_key));
  1410. vf_op_params->rss_obj = &vf->rss_conf_obj;
  1411. vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
  1412. /* flags handled individually for backward/forward compatability */
  1413. if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
  1414. __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
  1415. if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
  1416. __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
  1417. if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
  1418. __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
  1419. if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
  1420. __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
  1421. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
  1422. __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
  1423. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
  1424. __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
  1425. if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
  1426. __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
  1427. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
  1428. __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
  1429. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
  1430. __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
  1431. if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
  1432. rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
  1433. (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
  1434. rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
  1435. BNX2X_ERR("about to hit a FW assert. aborting...\n");
  1436. vf->op_rc = -EINVAL;
  1437. goto mbx_resp;
  1438. }
  1439. vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
  1440. mbx_resp:
  1441. if (vf->op_rc)
  1442. bnx2x_vf_mbx_resp(bp, vf);
  1443. }
  1444. /* dispatch request */
  1445. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1446. struct bnx2x_vf_mbx *mbx)
  1447. {
  1448. int i;
  1449. /* check if tlv type is known */
  1450. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  1451. /* Lock the per vf op mutex and note the locker's identity.
  1452. * The unlock will take place in mbx response.
  1453. */
  1454. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1455. /* switch on the opcode */
  1456. switch (mbx->first_tlv.tl.type) {
  1457. case CHANNEL_TLV_ACQUIRE:
  1458. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  1459. break;
  1460. case CHANNEL_TLV_INIT:
  1461. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  1462. break;
  1463. case CHANNEL_TLV_SETUP_Q:
  1464. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  1465. break;
  1466. case CHANNEL_TLV_SET_Q_FILTERS:
  1467. bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
  1468. break;
  1469. case CHANNEL_TLV_TEARDOWN_Q:
  1470. bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
  1471. break;
  1472. case CHANNEL_TLV_CLOSE:
  1473. bnx2x_vf_mbx_close_vf(bp, vf, mbx);
  1474. break;
  1475. case CHANNEL_TLV_RELEASE:
  1476. bnx2x_vf_mbx_release_vf(bp, vf, mbx);
  1477. break;
  1478. case CHANNEL_TLV_UPDATE_RSS:
  1479. bnx2x_vf_mbx_update_rss(bp, vf, mbx);
  1480. break;
  1481. }
  1482. } else {
  1483. /* unknown TLV - this may belong to a VF driver from the future
  1484. * - a version written after this PF driver was written, which
  1485. * supports features unknown as of yet. Too bad since we don't
  1486. * support them. Or this may be because someone wrote a crappy
  1487. * VF driver and is sending garbage over the channel.
  1488. */
  1489. BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
  1490. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
  1491. vf->state);
  1492. for (i = 0; i < 20; i++)
  1493. DP_CONT(BNX2X_MSG_IOV, "%x ",
  1494. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  1495. /* test whether we can respond to the VF (do we have an address
  1496. * for it?)
  1497. */
  1498. if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
  1499. /* mbx_resp uses the op_rc of the VF */
  1500. vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
  1501. /* notify the VF that we do not support this request */
  1502. bnx2x_vf_mbx_resp(bp, vf);
  1503. } else {
  1504. /* can't send a response since this VF is unknown to us
  1505. * just ack the FW to release the mailbox and unlock
  1506. * the channel.
  1507. */
  1508. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  1509. mmiowb();
  1510. bnx2x_unlock_vf_pf_channel(bp, vf,
  1511. mbx->first_tlv.tl.type);
  1512. }
  1513. }
  1514. }
  1515. /* handle new vf-pf message */
  1516. void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
  1517. {
  1518. struct bnx2x_virtf *vf;
  1519. struct bnx2x_vf_mbx *mbx;
  1520. u8 vf_idx;
  1521. int rc;
  1522. DP(BNX2X_MSG_IOV,
  1523. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  1524. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  1525. /* Sanity checks consider removing later */
  1526. /* check if the vf_id is valid */
  1527. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  1528. BNX2X_NR_VIRTFN(bp)) {
  1529. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  1530. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  1531. goto mbx_done;
  1532. }
  1533. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  1534. mbx = BP_VF_MBX(bp, vf_idx);
  1535. /* verify an event is not currently being processed -
  1536. * debug failsafe only
  1537. */
  1538. if (mbx->flags & VF_MSG_INPROCESS) {
  1539. BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
  1540. vfpf_event->vf_id);
  1541. goto mbx_done;
  1542. }
  1543. vf = BP_VF(bp, vf_idx);
  1544. /* save the VF message address */
  1545. mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
  1546. mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
  1547. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  1548. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  1549. /* dmae to get the VF request */
  1550. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
  1551. mbx->vf_addr_hi, mbx->vf_addr_lo,
  1552. sizeof(union vfpf_tlvs)/4);
  1553. if (rc) {
  1554. BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
  1555. goto mbx_error;
  1556. }
  1557. /* process the VF message header */
  1558. mbx->first_tlv = mbx->msg->req.first_tlv;
  1559. /* dispatch the request (will prepare the response) */
  1560. bnx2x_vf_mbx_request(bp, vf, mbx);
  1561. goto mbx_done;
  1562. mbx_error:
  1563. bnx2x_vf_release(bp, vf, false); /* non blocking */
  1564. mbx_done:
  1565. return;
  1566. }
  1567. /* propagate local bulletin board to vf */
  1568. int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
  1569. {
  1570. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
  1571. dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
  1572. vf * BULLETIN_CONTENT_SIZE;
  1573. dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
  1574. int rc;
  1575. /* can only update vf after init took place */
  1576. if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
  1577. bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
  1578. return 0;
  1579. /* increment bulletin board version and compute crc */
  1580. bulletin->version++;
  1581. bulletin->length = BULLETIN_CONTENT_SIZE;
  1582. bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
  1583. /* propagate bulletin board via dmae to vm memory */
  1584. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
  1585. bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
  1586. U64_LO(vf_addr), bulletin->length / 4);
  1587. return rc;
  1588. }