bnx2x_sriov.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953
  1. /* bnx2x_sriov.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2012 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. *
  19. */
  20. #include "bnx2x.h"
  21. #include "bnx2x_init.h"
  22. #include "bnx2x_cmn.h"
  23. #include "bnx2x_sriov.h"
  24. /* General service functions */
  25. static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  26. u16 pf_id)
  27. {
  28. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  29. pf_id);
  30. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  31. pf_id);
  32. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  33. pf_id);
  34. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  35. pf_id);
  36. }
  37. static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  38. u8 enable)
  39. {
  40. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  41. enable);
  42. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  43. enable);
  44. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  45. enable);
  46. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  47. enable);
  48. }
  49. int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  50. {
  51. int idx;
  52. for_each_vf(bp, idx)
  53. if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  54. break;
  55. return idx;
  56. }
  57. static
  58. struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  59. {
  60. u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  61. return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  62. }
  63. static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
  64. u8 igu_sb_id, u8 segment, u16 index, u8 op,
  65. u8 update)
  66. {
  67. /* acking a VF sb through the PF - use the GRC */
  68. u32 ctl;
  69. u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
  70. u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
  71. u32 func_encode = vf->abs_vfid;
  72. u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
  73. struct igu_regular cmd_data = {0};
  74. cmd_data.sb_id_and_flags =
  75. ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
  76. (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
  77. (update << IGU_REGULAR_BUPDATE_SHIFT) |
  78. (op << IGU_REGULAR_ENABLE_INT_SHIFT));
  79. ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
  80. func_encode << IGU_CTRL_REG_FID_SHIFT |
  81. IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
  82. DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
  83. cmd_data.sb_id_and_flags, igu_addr_data);
  84. REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
  85. mmiowb();
  86. barrier();
  87. DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
  88. ctl, igu_addr_ctl);
  89. REG_WR(bp, igu_addr_ctl, ctl);
  90. mmiowb();
  91. barrier();
  92. }
  93. /* VFOP - VF slow-path operation support */
  94. /* VFOP operations states */
  95. enum bnx2x_vfop_qctor_state {
  96. BNX2X_VFOP_QCTOR_INIT,
  97. BNX2X_VFOP_QCTOR_SETUP,
  98. BNX2X_VFOP_QCTOR_INT_EN
  99. };
  100. enum bnx2x_vfop_vlan_mac_state {
  101. BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
  102. BNX2X_VFOP_VLAN_MAC_CLEAR,
  103. BNX2X_VFOP_VLAN_MAC_CHK_DONE,
  104. BNX2X_VFOP_MAC_CONFIG_LIST,
  105. BNX2X_VFOP_VLAN_CONFIG_LIST,
  106. BNX2X_VFOP_VLAN_CONFIG_LIST_0
  107. };
  108. enum bnx2x_vfop_qsetup_state {
  109. BNX2X_VFOP_QSETUP_CTOR,
  110. BNX2X_VFOP_QSETUP_VLAN0,
  111. BNX2X_VFOP_QSETUP_DONE
  112. };
  113. #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
  114. void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
  115. struct bnx2x_queue_init_params *init_params,
  116. struct bnx2x_queue_setup_params *setup_params,
  117. u16 q_idx, u16 sb_idx)
  118. {
  119. DP(BNX2X_MSG_IOV,
  120. "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
  121. vf->abs_vfid,
  122. q_idx,
  123. sb_idx,
  124. init_params->tx.sb_cq_index,
  125. init_params->tx.hc_rate,
  126. setup_params->flags,
  127. setup_params->txq_params.traffic_type);
  128. }
  129. void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
  130. struct bnx2x_queue_init_params *init_params,
  131. struct bnx2x_queue_setup_params *setup_params,
  132. u16 q_idx, u16 sb_idx)
  133. {
  134. struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
  135. DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
  136. "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
  137. vf->abs_vfid,
  138. q_idx,
  139. sb_idx,
  140. init_params->rx.sb_cq_index,
  141. init_params->rx.hc_rate,
  142. setup_params->gen_params.mtu,
  143. rxq_params->buf_sz,
  144. rxq_params->sge_buf_sz,
  145. rxq_params->max_sges_pkt,
  146. rxq_params->tpa_agg_sz,
  147. setup_params->flags,
  148. rxq_params->drop_flags,
  149. rxq_params->cache_line_log);
  150. }
  151. void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
  152. struct bnx2x_virtf *vf,
  153. struct bnx2x_vf_queue *q,
  154. struct bnx2x_vfop_qctor_params *p,
  155. unsigned long q_type)
  156. {
  157. struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
  158. struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
  159. /* INIT */
  160. /* Enable host coalescing in the transition to INIT state */
  161. if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
  162. __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
  163. if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
  164. __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
  165. /* FW SB ID */
  166. init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
  167. init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
  168. /* context */
  169. init_p->cxts[0] = q->cxt;
  170. /* SETUP */
  171. /* Setup-op general parameters */
  172. setup_p->gen_params.spcl_id = vf->sp_cl_id;
  173. setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
  174. /* Setup-op pause params:
  175. * Nothing to do, the pause thresholds are set by default to 0 which
  176. * effectively turns off the feature for this queue. We don't want
  177. * one queue (VF) to interfering with another queue (another VF)
  178. */
  179. if (vf->cfg_flags & VF_CFG_FW_FC)
  180. BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
  181. vf->abs_vfid);
  182. /* Setup-op flags:
  183. * collect statistics, zero statistics, local-switching, security,
  184. * OV for Flex10, RSS and MCAST for leading
  185. */
  186. if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
  187. __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
  188. /* for VFs, enable tx switching, bd coherency, and mac address
  189. * anti-spoofing
  190. */
  191. __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
  192. __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
  193. __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
  194. if (vfq_is_leading(q)) {
  195. __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
  196. __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
  197. }
  198. /* Setup-op rx parameters */
  199. if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
  200. struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
  201. rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
  202. rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
  203. rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
  204. if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
  205. rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
  206. }
  207. /* Setup-op tx parameters */
  208. if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
  209. setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
  210. setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
  211. }
  212. }
  213. /* VFOP queue construction */
  214. static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
  215. {
  216. struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
  217. struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
  218. struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
  219. enum bnx2x_vfop_qctor_state state = vfop->state;
  220. bnx2x_vfop_reset_wq(vf);
  221. if (vfop->rc < 0)
  222. goto op_err;
  223. DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
  224. switch (state) {
  225. case BNX2X_VFOP_QCTOR_INIT:
  226. /* has this queue already been opened? */
  227. if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
  228. BNX2X_Q_LOGICAL_STATE_ACTIVE) {
  229. DP(BNX2X_MSG_IOV,
  230. "Entered qctor but queue was already up. Aborting gracefully\n");
  231. goto op_done;
  232. }
  233. /* next state */
  234. vfop->state = BNX2X_VFOP_QCTOR_SETUP;
  235. q_params->cmd = BNX2X_Q_CMD_INIT;
  236. vfop->rc = bnx2x_queue_state_change(bp, q_params);
  237. bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
  238. case BNX2X_VFOP_QCTOR_SETUP:
  239. /* next state */
  240. vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
  241. /* copy pre-prepared setup params to the queue-state params */
  242. vfop->op_p->qctor.qstate.params.setup =
  243. vfop->op_p->qctor.prep_qsetup;
  244. q_params->cmd = BNX2X_Q_CMD_SETUP;
  245. vfop->rc = bnx2x_queue_state_change(bp, q_params);
  246. bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
  247. case BNX2X_VFOP_QCTOR_INT_EN:
  248. /* enable interrupts */
  249. bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
  250. USTORM_ID, 0, IGU_INT_ENABLE, 0);
  251. goto op_done;
  252. default:
  253. bnx2x_vfop_default(state);
  254. }
  255. op_err:
  256. BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
  257. vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
  258. op_done:
  259. bnx2x_vfop_end(bp, vf, vfop);
  260. op_pending:
  261. return;
  262. }
  263. static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
  264. struct bnx2x_virtf *vf,
  265. struct bnx2x_vfop_cmd *cmd,
  266. int qid)
  267. {
  268. struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
  269. if (vfop) {
  270. vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
  271. vfop->args.qctor.qid = qid;
  272. vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
  273. bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
  274. bnx2x_vfop_qctor, cmd->done);
  275. return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
  276. cmd->block);
  277. }
  278. return -ENOMEM;
  279. }
  280. static void
  281. bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
  282. {
  283. struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  284. if (vf) {
  285. if (!vf_sb_count(vf))
  286. vf->igu_base_id = igu_sb_id;
  287. ++vf_sb_count(vf);
  288. }
  289. }
  290. /* VFOP MAC/VLAN helpers */
  291. static inline void bnx2x_vfop_credit(struct bnx2x *bp,
  292. struct bnx2x_vfop *vfop,
  293. struct bnx2x_vlan_mac_obj *obj)
  294. {
  295. struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
  296. /* update credit only if there is no error
  297. * and a valid credit counter
  298. */
  299. if (!vfop->rc && args->credit) {
  300. int cnt = 0;
  301. struct list_head *pos;
  302. list_for_each(pos, &obj->head)
  303. cnt++;
  304. atomic_set(args->credit, cnt);
  305. }
  306. }
  307. static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
  308. struct bnx2x_vfop_filter *pos,
  309. struct bnx2x_vlan_mac_data *user_req)
  310. {
  311. user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
  312. BNX2X_VLAN_MAC_DEL;
  313. switch (pos->type) {
  314. case BNX2X_VFOP_FILTER_MAC:
  315. memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
  316. break;
  317. case BNX2X_VFOP_FILTER_VLAN:
  318. user_req->u.vlan.vlan = pos->vid;
  319. break;
  320. default:
  321. BNX2X_ERR("Invalid filter type, skipping\n");
  322. return 1;
  323. }
  324. return 0;
  325. }
  326. static int
  327. bnx2x_vfop_config_vlan0(struct bnx2x *bp,
  328. struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
  329. bool add)
  330. {
  331. int rc;
  332. vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
  333. BNX2X_VLAN_MAC_DEL;
  334. vlan_mac->user_req.u.vlan.vlan = 0;
  335. rc = bnx2x_config_vlan_mac(bp, vlan_mac);
  336. if (rc == -EEXIST)
  337. rc = 0;
  338. return rc;
  339. }
  340. static int bnx2x_vfop_config_list(struct bnx2x *bp,
  341. struct bnx2x_vfop_filters *filters,
  342. struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
  343. {
  344. struct bnx2x_vfop_filter *pos, *tmp;
  345. struct list_head rollback_list, *filters_list = &filters->head;
  346. struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
  347. int rc = 0, cnt = 0;
  348. INIT_LIST_HEAD(&rollback_list);
  349. list_for_each_entry_safe(pos, tmp, filters_list, link) {
  350. if (bnx2x_vfop_set_user_req(bp, pos, user_req))
  351. continue;
  352. rc = bnx2x_config_vlan_mac(bp, vlan_mac);
  353. if (rc >= 0) {
  354. cnt += pos->add ? 1 : -1;
  355. list_del(&pos->link);
  356. list_add(&pos->link, &rollback_list);
  357. rc = 0;
  358. } else if (rc == -EEXIST) {
  359. rc = 0;
  360. } else {
  361. BNX2X_ERR("Failed to add a new vlan_mac command\n");
  362. break;
  363. }
  364. }
  365. /* rollback if error or too many rules added */
  366. if (rc || cnt > filters->add_cnt) {
  367. BNX2X_ERR("error or too many rules added. Performing rollback\n");
  368. list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
  369. pos->add = !pos->add; /* reverse op */
  370. bnx2x_vfop_set_user_req(bp, pos, user_req);
  371. bnx2x_config_vlan_mac(bp, vlan_mac);
  372. list_del(&pos->link);
  373. }
  374. cnt = 0;
  375. if (!rc)
  376. rc = -EINVAL;
  377. }
  378. filters->add_cnt = cnt;
  379. return rc;
  380. }
  381. /* VFOP set VLAN/MAC */
  382. static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
  383. {
  384. struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
  385. struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
  386. struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
  387. struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
  388. enum bnx2x_vfop_vlan_mac_state state = vfop->state;
  389. if (vfop->rc < 0)
  390. goto op_err;
  391. DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
  392. bnx2x_vfop_reset_wq(vf);
  393. switch (state) {
  394. case BNX2X_VFOP_VLAN_MAC_CLEAR:
  395. /* next state */
  396. vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
  397. /* do delete */
  398. vfop->rc = obj->delete_all(bp, obj,
  399. &vlan_mac->user_req.vlan_mac_flags,
  400. &vlan_mac->ramrod_flags);
  401. bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
  402. case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
  403. /* next state */
  404. vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
  405. /* do config */
  406. vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
  407. if (vfop->rc == -EEXIST)
  408. vfop->rc = 0;
  409. bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
  410. case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
  411. vfop->rc = !!obj->raw.check_pending(&obj->raw);
  412. bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
  413. case BNX2X_VFOP_MAC_CONFIG_LIST:
  414. /* next state */
  415. vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
  416. /* do list config */
  417. vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
  418. if (vfop->rc)
  419. goto op_err;
  420. set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
  421. vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
  422. bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
  423. case BNX2X_VFOP_VLAN_CONFIG_LIST:
  424. /* next state */
  425. vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
  426. /* remove vlan0 - could be no-op */
  427. vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
  428. if (vfop->rc)
  429. goto op_err;
  430. /* Do vlan list config. if this operation fails we try to
  431. * restore vlan0 to keep the queue is working order
  432. */
  433. vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
  434. if (!vfop->rc) {
  435. set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
  436. vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
  437. }
  438. bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
  439. case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
  440. /* next state */
  441. vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
  442. if (list_empty(&obj->head))
  443. /* add vlan0 */
  444. vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
  445. bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
  446. default:
  447. bnx2x_vfop_default(state);
  448. }
  449. op_err:
  450. BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
  451. op_done:
  452. kfree(filters);
  453. bnx2x_vfop_credit(bp, vfop, obj);
  454. bnx2x_vfop_end(bp, vf, vfop);
  455. op_pending:
  456. return;
  457. }
  458. struct bnx2x_vfop_vlan_mac_flags {
  459. bool drv_only;
  460. bool dont_consume;
  461. bool single_cmd;
  462. bool add;
  463. };
  464. static void
  465. bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
  466. struct bnx2x_vfop_vlan_mac_flags *flags)
  467. {
  468. struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
  469. memset(ramrod, 0, sizeof(*ramrod));
  470. /* ramrod flags */
  471. if (flags->drv_only)
  472. set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
  473. if (flags->single_cmd)
  474. set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
  475. /* mac_vlan flags */
  476. if (flags->dont_consume)
  477. set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
  478. /* cmd */
  479. ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
  480. }
  481. int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
  482. struct bnx2x_virtf *vf,
  483. struct bnx2x_vfop_cmd *cmd,
  484. int qid, u16 vid, bool add)
  485. {
  486. struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
  487. if (vfop) {
  488. struct bnx2x_vfop_args_filters filters = {
  489. .multi_filter = NULL, /* single command */
  490. .credit = &bnx2x_vfq(vf, qid, vlan_count),
  491. };
  492. struct bnx2x_vfop_vlan_mac_flags flags = {
  493. .drv_only = false,
  494. .dont_consume = (filters.credit != NULL),
  495. .single_cmd = true,
  496. .add = add,
  497. };
  498. struct bnx2x_vlan_mac_ramrod_params *ramrod =
  499. &vf->op_params.vlan_mac;
  500. /* set ramrod params */
  501. bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
  502. ramrod->user_req.u.vlan.vlan = vid;
  503. /* set object */
  504. ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
  505. /* set extra args */
  506. vfop->args.filters = filters;
  507. bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
  508. bnx2x_vfop_vlan_mac, cmd->done);
  509. return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
  510. cmd->block);
  511. }
  512. return -ENOMEM;
  513. }
  514. /* VFOP queue setup (queue constructor + set vlan 0) */
  515. static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
  516. {
  517. struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
  518. int qid = vfop->args.qctor.qid;
  519. enum bnx2x_vfop_qsetup_state state = vfop->state;
  520. struct bnx2x_vfop_cmd cmd = {
  521. .done = bnx2x_vfop_qsetup,
  522. .block = false,
  523. };
  524. if (vfop->rc < 0)
  525. goto op_err;
  526. DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
  527. switch (state) {
  528. case BNX2X_VFOP_QSETUP_CTOR:
  529. /* init the queue ctor command */
  530. vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
  531. vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
  532. if (vfop->rc)
  533. goto op_err;
  534. return;
  535. case BNX2X_VFOP_QSETUP_VLAN0:
  536. /* skip if non-leading or FPGA/EMU*/
  537. if (qid)
  538. goto op_done;
  539. /* init the queue set-vlan command (for vlan 0) */
  540. vfop->state = BNX2X_VFOP_QSETUP_DONE;
  541. vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
  542. if (vfop->rc)
  543. goto op_err;
  544. return;
  545. op_err:
  546. BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
  547. op_done:
  548. case BNX2X_VFOP_QSETUP_DONE:
  549. bnx2x_vfop_end(bp, vf, vfop);
  550. return;
  551. default:
  552. bnx2x_vfop_default(state);
  553. }
  554. }
  555. int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
  556. struct bnx2x_virtf *vf,
  557. struct bnx2x_vfop_cmd *cmd,
  558. int qid)
  559. {
  560. struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
  561. if (vfop) {
  562. vfop->args.qctor.qid = qid;
  563. bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
  564. bnx2x_vfop_qsetup, cmd->done);
  565. return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
  566. cmd->block);
  567. }
  568. return -ENOMEM;
  569. }
  570. /* VF enable primitives
  571. * when pretend is required the caller is responsible
  572. * for calling pretend prior to calling these routines
  573. */
  574. /* called only on E1H or E2.
  575. * When pretending to be PF, the pretend value is the function number 0...7
  576. * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
  577. * combination
  578. */
  579. int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
  580. {
  581. u32 pretend_reg;
  582. if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
  583. return -1;
  584. /* get my own pretend register */
  585. pretend_reg = bnx2x_get_pretend_reg(bp);
  586. REG_WR(bp, pretend_reg, pretend_func_val);
  587. REG_RD(bp, pretend_reg);
  588. return 0;
  589. }
  590. /* internal vf enable - until vf is enabled internally all transactions
  591. * are blocked. this routine should always be called last with pretend.
  592. */
  593. static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
  594. {
  595. REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
  596. }
  597. /* clears vf error in all semi blocks */
  598. static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
  599. {
  600. REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
  601. REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
  602. REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
  603. REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
  604. }
  605. static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
  606. {
  607. u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
  608. u32 was_err_reg = 0;
  609. switch (was_err_group) {
  610. case 0:
  611. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
  612. break;
  613. case 1:
  614. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
  615. break;
  616. case 2:
  617. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
  618. break;
  619. case 3:
  620. was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
  621. break;
  622. }
  623. REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
  624. }
  625. static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
  626. {
  627. int i;
  628. u32 val;
  629. /* Set VF masks and configuration - pretend */
  630. bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
  631. REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
  632. REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
  633. REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
  634. REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
  635. REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
  636. REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
  637. val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
  638. val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
  639. if (vf->cfg_flags & VF_CFG_INT_SIMD)
  640. val |= IGU_VF_CONF_SINGLE_ISR_EN;
  641. val &= ~IGU_VF_CONF_PARENT_MASK;
  642. val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
  643. REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
  644. DP(BNX2X_MSG_IOV,
  645. "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
  646. vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
  647. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  648. /* iterate over all queues, clear sb consumer */
  649. for (i = 0; i < vf_sb_count(vf); i++) {
  650. u8 igu_sb_id = vf_igu_sb(vf, i);
  651. /* zero prod memory */
  652. REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
  653. /* clear sb state machine */
  654. bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
  655. false /* VF */);
  656. /* disable + update */
  657. bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
  658. IGU_INT_DISABLE, 1);
  659. }
  660. }
  661. void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
  662. {
  663. /* set the VF-PF association in the FW */
  664. storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
  665. storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
  666. /* clear vf errors*/
  667. bnx2x_vf_semi_clear_err(bp, abs_vfid);
  668. bnx2x_vf_pglue_clear_err(bp, abs_vfid);
  669. /* internal vf-enable - pretend */
  670. bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
  671. DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
  672. bnx2x_vf_enable_internal(bp, true);
  673. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  674. }
  675. static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
  676. {
  677. /* Reset vf in IGU interrupts are still disabled */
  678. bnx2x_vf_igu_reset(bp, vf);
  679. /* pretend to enable the vf with the PBF */
  680. bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
  681. REG_WR(bp, PBF_REG_DISABLE_VF, 0);
  682. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  683. }
  684. static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
  685. {
  686. struct pci_dev *dev;
  687. struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  688. if (!vf)
  689. goto unknown_dev;
  690. dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
  691. if (dev)
  692. return bnx2x_is_pcie_pending(dev);
  693. unknown_dev:
  694. BNX2X_ERR("Unknown device\n");
  695. return false;
  696. }
  697. int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
  698. {
  699. /* Wait 100ms */
  700. msleep(100);
  701. /* Verify no pending pci transactions */
  702. if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
  703. BNX2X_ERR("PCIE Transactions still pending\n");
  704. return 0;
  705. }
  706. /* must be called after the number of PF queues and the number of VFs are
  707. * both known
  708. */
  709. static void
  710. bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
  711. {
  712. u16 vlan_count = 0;
  713. /* will be set only during VF-ACQUIRE */
  714. resc->num_rxqs = 0;
  715. resc->num_txqs = 0;
  716. /* no credit calculcis for macs (just yet) */
  717. resc->num_mac_filters = 1;
  718. /* divvy up vlan rules */
  719. vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
  720. vlan_count = 1 << ilog2(vlan_count);
  721. resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
  722. /* no real limitation */
  723. resc->num_mc_filters = 0;
  724. /* num_sbs already set */
  725. }
  726. /* IOV global initialization routines */
  727. void bnx2x_iov_init_dq(struct bnx2x *bp)
  728. {
  729. if (!IS_SRIOV(bp))
  730. return;
  731. /* Set the DQ such that the CID reflect the abs_vfid */
  732. REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
  733. REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
  734. /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
  735. * the PF L2 queues
  736. */
  737. REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
  738. /* The VF window size is the log2 of the max number of CIDs per VF */
  739. REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
  740. /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
  741. * the Pf doorbell size although the 2 are independent.
  742. */
  743. REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
  744. BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
  745. /* No security checks for now -
  746. * configure single rule (out of 16) mask = 0x1, value = 0x0,
  747. * CID range 0 - 0x1ffff
  748. */
  749. REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
  750. REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
  751. REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
  752. REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
  753. /* set the number of VF alllowed doorbells to the full DQ range */
  754. REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
  755. /* set the VF doorbell threshold */
  756. REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
  757. }
  758. void bnx2x_iov_init_dmae(struct bnx2x *bp)
  759. {
  760. DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
  761. if (!IS_SRIOV(bp))
  762. return;
  763. REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
  764. }
  765. static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
  766. {
  767. struct pci_dev *dev = bp->pdev;
  768. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  769. return dev->bus->number + ((dev->devfn + iov->offset +
  770. iov->stride * vfid) >> 8);
  771. }
  772. static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
  773. {
  774. struct pci_dev *dev = bp->pdev;
  775. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  776. return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
  777. }
  778. static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
  779. {
  780. int i, n;
  781. struct pci_dev *dev = bp->pdev;
  782. struct bnx2x_sriov *iov = &bp->vfdb->sriov;
  783. for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
  784. u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
  785. u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
  786. do_div(size, iov->total);
  787. vf->bars[n].bar = start + size * vf->abs_vfid;
  788. vf->bars[n].size = size;
  789. }
  790. }
  791. static int bnx2x_ari_enabled(struct pci_dev *dev)
  792. {
  793. return dev->bus->self && dev->bus->self->ari_enabled;
  794. }
  795. static void
  796. bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
  797. {
  798. int sb_id;
  799. u32 val;
  800. u8 fid;
  801. /* IGU in normal mode - read CAM */
  802. for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
  803. val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
  804. if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
  805. continue;
  806. fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
  807. if (!(fid & IGU_FID_ENCODE_IS_PF))
  808. bnx2x_vf_set_igu_info(bp, sb_id,
  809. (fid & IGU_FID_VF_NUM_MASK));
  810. DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
  811. ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
  812. ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
  813. (fid & IGU_FID_VF_NUM_MASK)), sb_id,
  814. GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
  815. }
  816. }
  817. static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
  818. {
  819. if (bp->vfdb) {
  820. kfree(bp->vfdb->vfqs);
  821. kfree(bp->vfdb->vfs);
  822. kfree(bp->vfdb);
  823. }
  824. bp->vfdb = NULL;
  825. }
  826. static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  827. {
  828. int pos;
  829. struct pci_dev *dev = bp->pdev;
  830. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
  831. if (!pos) {
  832. BNX2X_ERR("failed to find SRIOV capability in device\n");
  833. return -ENODEV;
  834. }
  835. iov->pos = pos;
  836. DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
  837. pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
  838. pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
  839. pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
  840. pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
  841. pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
  842. pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
  843. pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
  844. pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  845. return 0;
  846. }
  847. static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
  848. {
  849. u32 val;
  850. /* read the SRIOV capability structure
  851. * The fields can be read via configuration read or
  852. * directly from the device (starting at offset PCICFG_OFFSET)
  853. */
  854. if (bnx2x_sriov_pci_cfg_info(bp, iov))
  855. return -ENODEV;
  856. /* get the number of SRIOV bars */
  857. iov->nres = 0;
  858. /* read the first_vfid */
  859. val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
  860. iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
  861. * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
  862. DP(BNX2X_MSG_IOV,
  863. "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
  864. BP_FUNC(bp),
  865. iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
  866. iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
  867. return 0;
  868. }
  869. static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
  870. {
  871. int i;
  872. u8 queue_count = 0;
  873. if (IS_SRIOV(bp))
  874. for_each_vf(bp, i)
  875. queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
  876. return queue_count;
  877. }
  878. /* must be called after PF bars are mapped */
  879. int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
  880. int num_vfs_param)
  881. {
  882. int err, i, qcount;
  883. struct bnx2x_sriov *iov;
  884. struct pci_dev *dev = bp->pdev;
  885. bp->vfdb = NULL;
  886. /* verify is pf */
  887. if (IS_VF(bp))
  888. return 0;
  889. /* verify sriov capability is present in configuration space */
  890. if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
  891. return 0;
  892. /* verify chip revision */
  893. if (CHIP_IS_E1x(bp))
  894. return 0;
  895. /* check if SRIOV support is turned off */
  896. if (!num_vfs_param)
  897. return 0;
  898. /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
  899. if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
  900. BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
  901. BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
  902. return 0;
  903. }
  904. /* SRIOV can be enabled only with MSIX */
  905. if (int_mode_param == BNX2X_INT_MODE_MSI ||
  906. int_mode_param == BNX2X_INT_MODE_INTX)
  907. BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
  908. err = -EIO;
  909. /* verify ari is enabled */
  910. if (!bnx2x_ari_enabled(bp->pdev)) {
  911. BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
  912. return err;
  913. }
  914. /* verify igu is in normal mode */
  915. if (CHIP_INT_MODE_IS_BC(bp)) {
  916. BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
  917. return err;
  918. }
  919. /* allocate the vfs database */
  920. bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
  921. if (!bp->vfdb) {
  922. BNX2X_ERR("failed to allocate vf database\n");
  923. err = -ENOMEM;
  924. goto failed;
  925. }
  926. /* get the sriov info - Linux already collected all the pertinent
  927. * information, however the sriov structure is for the private use
  928. * of the pci module. Also we want this information regardless
  929. * of the hyper-visor.
  930. */
  931. iov = &(bp->vfdb->sriov);
  932. err = bnx2x_sriov_info(bp, iov);
  933. if (err)
  934. goto failed;
  935. /* SR-IOV capability was enabled but there are no VFs*/
  936. if (iov->total == 0)
  937. goto failed;
  938. /* calculate the actual number of VFs */
  939. iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
  940. /* allocate the vf array */
  941. bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
  942. BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
  943. if (!bp->vfdb->vfs) {
  944. BNX2X_ERR("failed to allocate vf array\n");
  945. err = -ENOMEM;
  946. goto failed;
  947. }
  948. /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
  949. for_each_vf(bp, i) {
  950. bnx2x_vf(bp, i, index) = i;
  951. bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
  952. bnx2x_vf(bp, i, state) = VF_FREE;
  953. INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
  954. mutex_init(&bnx2x_vf(bp, i, op_mutex));
  955. bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
  956. }
  957. /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
  958. bnx2x_get_vf_igu_cam_info(bp);
  959. /* get the total queue count and allocate the global queue arrays */
  960. qcount = bnx2x_iov_get_max_queue_count(bp);
  961. /* allocate the queue arrays for all VFs */
  962. bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
  963. GFP_KERNEL);
  964. if (!bp->vfdb->vfqs) {
  965. BNX2X_ERR("failed to allocate vf queue array\n");
  966. err = -ENOMEM;
  967. goto failed;
  968. }
  969. return 0;
  970. failed:
  971. DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
  972. __bnx2x_iov_free_vfdb(bp);
  973. return err;
  974. }
  975. void bnx2x_iov_remove_one(struct bnx2x *bp)
  976. {
  977. /* if SRIOV is not enabled there's nothing to do */
  978. if (!IS_SRIOV(bp))
  979. return;
  980. /* free vf database */
  981. __bnx2x_iov_free_vfdb(bp);
  982. }
  983. void bnx2x_iov_free_mem(struct bnx2x *bp)
  984. {
  985. int i;
  986. if (!IS_SRIOV(bp))
  987. return;
  988. /* free vfs hw contexts */
  989. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  990. struct hw_dma *cxt = &bp->vfdb->context[i];
  991. BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
  992. }
  993. BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
  994. BP_VFDB(bp)->sp_dma.mapping,
  995. BP_VFDB(bp)->sp_dma.size);
  996. BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
  997. BP_VF_MBX_DMA(bp)->mapping,
  998. BP_VF_MBX_DMA(bp)->size);
  999. }
  1000. int bnx2x_iov_alloc_mem(struct bnx2x *bp)
  1001. {
  1002. size_t tot_size;
  1003. int i, rc = 0;
  1004. if (!IS_SRIOV(bp))
  1005. return rc;
  1006. /* allocate vfs hw contexts */
  1007. tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
  1008. BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
  1009. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  1010. struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
  1011. cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
  1012. if (cxt->size) {
  1013. BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
  1014. } else {
  1015. cxt->addr = NULL;
  1016. cxt->mapping = 0;
  1017. }
  1018. tot_size -= cxt->size;
  1019. }
  1020. /* allocate vfs ramrods dma memory - client_init and set_mac */
  1021. tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
  1022. BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
  1023. tot_size);
  1024. BP_VFDB(bp)->sp_dma.size = tot_size;
  1025. /* allocate mailboxes */
  1026. tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
  1027. BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
  1028. tot_size);
  1029. BP_VF_MBX_DMA(bp)->size = tot_size;
  1030. return 0;
  1031. alloc_mem_err:
  1032. return -ENOMEM;
  1033. }
  1034. static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1035. struct bnx2x_vf_queue *q)
  1036. {
  1037. u8 cl_id = vfq_cl_id(vf, q);
  1038. u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
  1039. unsigned long q_type = 0;
  1040. set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  1041. set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  1042. /* Queue State object */
  1043. bnx2x_init_queue_obj(bp, &q->sp_obj,
  1044. cl_id, &q->cid, 1, func_id,
  1045. bnx2x_vf_sp(bp, vf, q_data),
  1046. bnx2x_vf_sp_map(bp, vf, q_data),
  1047. q_type);
  1048. DP(BNX2X_MSG_IOV,
  1049. "initialized vf %d's queue object. func id set to %d\n",
  1050. vf->abs_vfid, q->sp_obj.func_id);
  1051. /* mac/vlan objects are per queue, but only those
  1052. * that belong to the leading queue are initialized
  1053. */
  1054. if (vfq_is_leading(q)) {
  1055. /* mac */
  1056. bnx2x_init_mac_obj(bp, &q->mac_obj,
  1057. cl_id, q->cid, func_id,
  1058. bnx2x_vf_sp(bp, vf, mac_rdata),
  1059. bnx2x_vf_sp_map(bp, vf, mac_rdata),
  1060. BNX2X_FILTER_MAC_PENDING,
  1061. &vf->filter_state,
  1062. BNX2X_OBJ_TYPE_RX_TX,
  1063. &bp->macs_pool);
  1064. /* vlan */
  1065. bnx2x_init_vlan_obj(bp, &q->vlan_obj,
  1066. cl_id, q->cid, func_id,
  1067. bnx2x_vf_sp(bp, vf, vlan_rdata),
  1068. bnx2x_vf_sp_map(bp, vf, vlan_rdata),
  1069. BNX2X_FILTER_VLAN_PENDING,
  1070. &vf->filter_state,
  1071. BNX2X_OBJ_TYPE_RX_TX,
  1072. &bp->vlans_pool);
  1073. /* mcast */
  1074. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
  1075. q->cid, func_id, func_id,
  1076. bnx2x_vf_sp(bp, vf, mcast_rdata),
  1077. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  1078. BNX2X_FILTER_MCAST_PENDING,
  1079. &vf->filter_state,
  1080. BNX2X_OBJ_TYPE_RX_TX);
  1081. vf->leading_rss = cl_id;
  1082. }
  1083. }
  1084. /* called by bnx2x_nic_load */
  1085. int bnx2x_iov_nic_init(struct bnx2x *bp)
  1086. {
  1087. int vfid, qcount, i;
  1088. if (!IS_SRIOV(bp)) {
  1089. DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
  1090. return 0;
  1091. }
  1092. DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
  1093. /* initialize vf database */
  1094. for_each_vf(bp, vfid) {
  1095. struct bnx2x_virtf *vf = BP_VF(bp, vfid);
  1096. int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
  1097. BNX2X_CIDS_PER_VF;
  1098. union cdu_context *base_cxt = (union cdu_context *)
  1099. BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
  1100. (base_vf_cid & (ILT_PAGE_CIDS-1));
  1101. DP(BNX2X_MSG_IOV,
  1102. "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
  1103. vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
  1104. BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
  1105. /* init statically provisioned resources */
  1106. bnx2x_iov_static_resc(bp, &vf->alloc_resc);
  1107. /* queues are initialized during VF-ACQUIRE */
  1108. /* reserve the vf vlan credit */
  1109. bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
  1110. vf->filter_state = 0;
  1111. vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
  1112. /* init mcast object - This object will be re-initialized
  1113. * during VF-ACQUIRE with the proper cl_id and cid.
  1114. * It needs to be initialized here so that it can be safely
  1115. * handled by a subsequent FLR flow.
  1116. */
  1117. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
  1118. 0xFF, 0xFF, 0xFF,
  1119. bnx2x_vf_sp(bp, vf, mcast_rdata),
  1120. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  1121. BNX2X_FILTER_MCAST_PENDING,
  1122. &vf->filter_state,
  1123. BNX2X_OBJ_TYPE_RX_TX);
  1124. /* set the mailbox message addresses */
  1125. BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
  1126. (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
  1127. MBX_MSG_ALIGNED_SIZE);
  1128. BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
  1129. vfid * MBX_MSG_ALIGNED_SIZE;
  1130. /* Enable vf mailbox */
  1131. bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
  1132. }
  1133. /* Final VF init */
  1134. qcount = 0;
  1135. for_each_vf(bp, i) {
  1136. struct bnx2x_virtf *vf = BP_VF(bp, i);
  1137. /* fill in the BDF and bars */
  1138. vf->bus = bnx2x_vf_bus(bp, i);
  1139. vf->devfn = bnx2x_vf_devfn(bp, i);
  1140. bnx2x_vf_set_bars(bp, vf);
  1141. DP(BNX2X_MSG_IOV,
  1142. "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
  1143. vf->abs_vfid, vf->bus, vf->devfn,
  1144. (unsigned)vf->bars[0].bar, vf->bars[0].size,
  1145. (unsigned)vf->bars[1].bar, vf->bars[1].size,
  1146. (unsigned)vf->bars[2].bar, vf->bars[2].size);
  1147. /* set local queue arrays */
  1148. vf->vfqs = &bp->vfdb->vfqs[qcount];
  1149. qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
  1150. }
  1151. return 0;
  1152. }
  1153. /* called by bnx2x_init_hw_func, returns the next ilt line */
  1154. int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
  1155. {
  1156. int i;
  1157. struct bnx2x_ilt *ilt = BP_ILT(bp);
  1158. if (!IS_SRIOV(bp))
  1159. return line;
  1160. /* set vfs ilt lines */
  1161. for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
  1162. struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
  1163. ilt->lines[line+i].page = hw_cxt->addr;
  1164. ilt->lines[line+i].page_mapping = hw_cxt->mapping;
  1165. ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
  1166. }
  1167. return line + i;
  1168. }
  1169. static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
  1170. {
  1171. return ((cid >= BNX2X_FIRST_VF_CID) &&
  1172. ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
  1173. }
  1174. static
  1175. void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
  1176. struct bnx2x_vf_queue *vfq,
  1177. union event_ring_elem *elem)
  1178. {
  1179. unsigned long ramrod_flags = 0;
  1180. int rc = 0;
  1181. /* Always push next commands out, don't wait here */
  1182. set_bit(RAMROD_CONT, &ramrod_flags);
  1183. switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
  1184. case BNX2X_FILTER_MAC_PENDING:
  1185. rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
  1186. &ramrod_flags);
  1187. break;
  1188. case BNX2X_FILTER_VLAN_PENDING:
  1189. rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
  1190. &ramrod_flags);
  1191. break;
  1192. default:
  1193. BNX2X_ERR("Unsupported classification command: %d\n",
  1194. elem->message.data.eth_event.echo);
  1195. return;
  1196. }
  1197. if (rc < 0)
  1198. BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
  1199. else if (rc > 0)
  1200. DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
  1201. }
  1202. static
  1203. void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
  1204. struct bnx2x_virtf *vf)
  1205. {
  1206. struct bnx2x_mcast_ramrod_params rparam = {NULL};
  1207. int rc;
  1208. rparam.mcast_obj = &vf->mcast_obj;
  1209. vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
  1210. /* If there are pending mcast commands - send them */
  1211. if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
  1212. rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
  1213. if (rc < 0)
  1214. BNX2X_ERR("Failed to send pending mcast commands: %d\n",
  1215. rc);
  1216. }
  1217. }
  1218. static
  1219. void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
  1220. struct bnx2x_virtf *vf)
  1221. {
  1222. smp_mb__before_clear_bit();
  1223. clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
  1224. smp_mb__after_clear_bit();
  1225. }
  1226. int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
  1227. {
  1228. struct bnx2x_virtf *vf;
  1229. int qidx = 0, abs_vfid;
  1230. u8 opcode;
  1231. u16 cid = 0xffff;
  1232. if (!IS_SRIOV(bp))
  1233. return 1;
  1234. /* first get the cid - the only events we handle here are cfc-delete
  1235. * and set-mac completion
  1236. */
  1237. opcode = elem->message.opcode;
  1238. switch (opcode) {
  1239. case EVENT_RING_OPCODE_CFC_DEL:
  1240. cid = SW_CID((__force __le32)
  1241. elem->message.data.cfc_del_event.cid);
  1242. DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
  1243. break;
  1244. case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
  1245. case EVENT_RING_OPCODE_MULTICAST_RULES:
  1246. case EVENT_RING_OPCODE_FILTERS_RULES:
  1247. cid = (elem->message.data.eth_event.echo &
  1248. BNX2X_SWCID_MASK);
  1249. DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
  1250. break;
  1251. case EVENT_RING_OPCODE_VF_FLR:
  1252. abs_vfid = elem->message.data.vf_flr_event.vf_id;
  1253. DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
  1254. abs_vfid);
  1255. goto get_vf;
  1256. case EVENT_RING_OPCODE_MALICIOUS_VF:
  1257. abs_vfid = elem->message.data.malicious_vf_event.vf_id;
  1258. DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
  1259. abs_vfid);
  1260. goto get_vf;
  1261. default:
  1262. return 1;
  1263. }
  1264. /* check if the cid is the VF range */
  1265. if (!bnx2x_iov_is_vf_cid(bp, cid)) {
  1266. DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
  1267. return 1;
  1268. }
  1269. /* extract vf and rxq index from vf_cid - relies on the following:
  1270. * 1. vfid on cid reflects the true abs_vfid
  1271. * 2. the max number of VFs (per path) is 64
  1272. */
  1273. qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
  1274. abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
  1275. get_vf:
  1276. vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
  1277. if (!vf) {
  1278. BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
  1279. cid, abs_vfid);
  1280. return 0;
  1281. }
  1282. switch (opcode) {
  1283. case EVENT_RING_OPCODE_CFC_DEL:
  1284. DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
  1285. vf->abs_vfid, qidx);
  1286. vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
  1287. &vfq_get(vf,
  1288. qidx)->sp_obj,
  1289. BNX2X_Q_CMD_CFC_DEL);
  1290. break;
  1291. case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
  1292. DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
  1293. vf->abs_vfid, qidx);
  1294. bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
  1295. break;
  1296. case EVENT_RING_OPCODE_MULTICAST_RULES:
  1297. DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
  1298. vf->abs_vfid, qidx);
  1299. bnx2x_vf_handle_mcast_eqe(bp, vf);
  1300. break;
  1301. case EVENT_RING_OPCODE_FILTERS_RULES:
  1302. DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
  1303. vf->abs_vfid, qidx);
  1304. bnx2x_vf_handle_filters_eqe(bp, vf);
  1305. break;
  1306. case EVENT_RING_OPCODE_VF_FLR:
  1307. DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
  1308. vf->abs_vfid);
  1309. /* Do nothing for now */
  1310. break;
  1311. case EVENT_RING_OPCODE_MALICIOUS_VF:
  1312. DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
  1313. vf->abs_vfid);
  1314. /* Do nothing for now */
  1315. break;
  1316. }
  1317. /* SRIOV: reschedule any 'in_progress' operations */
  1318. bnx2x_iov_sp_event(bp, cid, false);
  1319. return 0;
  1320. }
  1321. static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
  1322. {
  1323. /* extract the vf from vf_cid - relies on the following:
  1324. * 1. vfid on cid reflects the true abs_vfid
  1325. * 2. the max number of VFs (per path) is 64
  1326. */
  1327. int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
  1328. return bnx2x_vf_by_abs_fid(bp, abs_vfid);
  1329. }
  1330. void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
  1331. struct bnx2x_queue_sp_obj **q_obj)
  1332. {
  1333. struct bnx2x_virtf *vf;
  1334. if (!IS_SRIOV(bp))
  1335. return;
  1336. vf = bnx2x_vf_by_cid(bp, vf_cid);
  1337. if (vf) {
  1338. /* extract queue index from vf_cid - relies on the following:
  1339. * 1. vfid on cid reflects the true abs_vfid
  1340. * 2. the max number of VFs (per path) is 64
  1341. */
  1342. int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
  1343. *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
  1344. } else {
  1345. BNX2X_ERR("No vf matching cid %d\n", vf_cid);
  1346. }
  1347. }
  1348. void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
  1349. {
  1350. struct bnx2x_virtf *vf;
  1351. /* check if the cid is the VF range */
  1352. if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
  1353. return;
  1354. vf = bnx2x_vf_by_cid(bp, vf_cid);
  1355. if (vf) {
  1356. /* set in_progress flag */
  1357. atomic_set(&vf->op_in_progress, 1);
  1358. if (queue_work)
  1359. queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  1360. }
  1361. }
  1362. void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
  1363. {
  1364. int i;
  1365. int first_queue_query_index, num_queues_req;
  1366. dma_addr_t cur_data_offset;
  1367. struct stats_query_entry *cur_query_entry;
  1368. u8 stats_count = 0;
  1369. bool is_fcoe = false;
  1370. if (!IS_SRIOV(bp))
  1371. return;
  1372. if (!NO_FCOE(bp))
  1373. is_fcoe = true;
  1374. /* fcoe adds one global request and one queue request */
  1375. num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
  1376. first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
  1377. (is_fcoe ? 0 : 1);
  1378. DP(BNX2X_MSG_IOV,
  1379. "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
  1380. BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
  1381. first_queue_query_index + num_queues_req);
  1382. cur_data_offset = bp->fw_stats_data_mapping +
  1383. offsetof(struct bnx2x_fw_stats_data, queue_stats) +
  1384. num_queues_req * sizeof(struct per_queue_stats);
  1385. cur_query_entry = &bp->fw_stats_req->
  1386. query[first_queue_query_index + num_queues_req];
  1387. for_each_vf(bp, i) {
  1388. int j;
  1389. struct bnx2x_virtf *vf = BP_VF(bp, i);
  1390. if (vf->state != VF_ENABLED) {
  1391. DP(BNX2X_MSG_IOV,
  1392. "vf %d not enabled so no stats for it\n",
  1393. vf->abs_vfid);
  1394. continue;
  1395. }
  1396. DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
  1397. for_each_vfq(vf, j) {
  1398. struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
  1399. /* collect stats fro active queues only */
  1400. if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
  1401. BNX2X_Q_LOGICAL_STATE_STOPPED)
  1402. continue;
  1403. /* create stats query entry for this queue */
  1404. cur_query_entry->kind = STATS_TYPE_QUEUE;
  1405. cur_query_entry->index = vfq_cl_id(vf, rxq);
  1406. cur_query_entry->funcID =
  1407. cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
  1408. cur_query_entry->address.hi =
  1409. cpu_to_le32(U64_HI(vf->fw_stat_map));
  1410. cur_query_entry->address.lo =
  1411. cpu_to_le32(U64_LO(vf->fw_stat_map));
  1412. DP(BNX2X_MSG_IOV,
  1413. "added address %x %x for vf %d queue %d client %d\n",
  1414. cur_query_entry->address.hi,
  1415. cur_query_entry->address.lo, cur_query_entry->funcID,
  1416. j, cur_query_entry->index);
  1417. cur_query_entry++;
  1418. cur_data_offset += sizeof(struct per_queue_stats);
  1419. stats_count++;
  1420. }
  1421. }
  1422. bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
  1423. }
  1424. void bnx2x_iov_sp_task(struct bnx2x *bp)
  1425. {
  1426. int i;
  1427. if (!IS_SRIOV(bp))
  1428. return;
  1429. /* Iterate over all VFs and invoke state transition for VFs with
  1430. * 'in-progress' slow-path operations
  1431. */
  1432. DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
  1433. for_each_vf(bp, i) {
  1434. struct bnx2x_virtf *vf = BP_VF(bp, i);
  1435. if (!list_empty(&vf->op_list_head) &&
  1436. atomic_read(&vf->op_in_progress)) {
  1437. DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
  1438. bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
  1439. }
  1440. }
  1441. }
  1442. static inline
  1443. struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
  1444. {
  1445. int i;
  1446. struct bnx2x_virtf *vf = NULL;
  1447. for_each_vf(bp, i) {
  1448. vf = BP_VF(bp, i);
  1449. if (stat_id >= vf->igu_base_id &&
  1450. stat_id < vf->igu_base_id + vf_sb_count(vf))
  1451. break;
  1452. }
  1453. return vf;
  1454. }
  1455. /* VF API helpers */
  1456. static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
  1457. u8 enable)
  1458. {
  1459. u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
  1460. u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
  1461. REG_WR(bp, reg, val);
  1462. }
  1463. u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
  1464. {
  1465. return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
  1466. BNX2X_VF_MAX_QUEUES);
  1467. }
  1468. static
  1469. int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1470. struct vf_pf_resc_request *req_resc)
  1471. {
  1472. u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
  1473. u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
  1474. return ((req_resc->num_rxqs <= rxq_cnt) &&
  1475. (req_resc->num_txqs <= txq_cnt) &&
  1476. (req_resc->num_sbs <= vf_sb_count(vf)) &&
  1477. (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
  1478. (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
  1479. }
  1480. /* CORE VF API */
  1481. int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1482. struct vf_pf_resc_request *resc)
  1483. {
  1484. int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
  1485. BNX2X_CIDS_PER_VF;
  1486. union cdu_context *base_cxt = (union cdu_context *)
  1487. BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
  1488. (base_vf_cid & (ILT_PAGE_CIDS-1));
  1489. int i;
  1490. /* if state is 'acquired' the VF was not released or FLR'd, in
  1491. * this case the returned resources match the acquired already
  1492. * acquired resources. Verify that the requested numbers do
  1493. * not exceed the already acquired numbers.
  1494. */
  1495. if (vf->state == VF_ACQUIRED) {
  1496. DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
  1497. vf->abs_vfid);
  1498. if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
  1499. BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
  1500. vf->abs_vfid);
  1501. return -EINVAL;
  1502. }
  1503. return 0;
  1504. }
  1505. /* Otherwise vf state must be 'free' or 'reset' */
  1506. if (vf->state != VF_FREE && vf->state != VF_RESET) {
  1507. BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
  1508. vf->abs_vfid, vf->state);
  1509. return -EINVAL;
  1510. }
  1511. /* static allocation:
  1512. * the global maximum number are fixed per VF. fail the request if
  1513. * requested number exceed these globals
  1514. */
  1515. if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
  1516. DP(BNX2X_MSG_IOV,
  1517. "cannot fulfill vf resource request. Placing maximal available values in response\n");
  1518. /* set the max resource in the vf */
  1519. return -ENOMEM;
  1520. }
  1521. /* Set resources counters - 0 request means max available */
  1522. vf_sb_count(vf) = resc->num_sbs;
  1523. vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
  1524. vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
  1525. if (resc->num_mac_filters)
  1526. vf_mac_rules_cnt(vf) = resc->num_mac_filters;
  1527. if (resc->num_vlan_filters)
  1528. vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
  1529. DP(BNX2X_MSG_IOV,
  1530. "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
  1531. vf_sb_count(vf), vf_rxq_count(vf),
  1532. vf_txq_count(vf), vf_mac_rules_cnt(vf),
  1533. vf_vlan_rules_cnt(vf));
  1534. /* Initialize the queues */
  1535. if (!vf->vfqs) {
  1536. DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
  1537. return -EINVAL;
  1538. }
  1539. for_each_vfq(vf, i) {
  1540. struct bnx2x_vf_queue *q = vfq_get(vf, i);
  1541. if (!q) {
  1542. DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
  1543. return -EINVAL;
  1544. }
  1545. q->index = i;
  1546. q->cxt = &((base_cxt + i)->eth);
  1547. q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
  1548. DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
  1549. vf->abs_vfid, i, q->index, q->cid, q->cxt);
  1550. /* init SP objects */
  1551. bnx2x_vfq_init(bp, vf, q);
  1552. }
  1553. vf->state = VF_ACQUIRED;
  1554. return 0;
  1555. }
  1556. int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
  1557. {
  1558. struct bnx2x_func_init_params func_init = {0};
  1559. u16 flags = 0;
  1560. int i;
  1561. /* the sb resources are initialized at this point, do the
  1562. * FW/HW initializations
  1563. */
  1564. for_each_vf_sb(vf, i)
  1565. bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
  1566. vf_igu_sb(vf, i), vf_igu_sb(vf, i));
  1567. /* Sanity checks */
  1568. if (vf->state != VF_ACQUIRED) {
  1569. DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
  1570. vf->abs_vfid, vf->state);
  1571. return -EINVAL;
  1572. }
  1573. /* FLR cleanup epilogue */
  1574. if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
  1575. return -EBUSY;
  1576. /* reset IGU VF statistics: MSIX */
  1577. REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
  1578. /* vf init */
  1579. if (vf->cfg_flags & VF_CFG_STATS)
  1580. flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
  1581. if (vf->cfg_flags & VF_CFG_TPA)
  1582. flags |= FUNC_FLG_TPA;
  1583. if (is_vf_multi(vf))
  1584. flags |= FUNC_FLG_RSS;
  1585. /* function setup */
  1586. func_init.func_flgs = flags;
  1587. func_init.pf_id = BP_FUNC(bp);
  1588. func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
  1589. func_init.fw_stat_map = vf->fw_stat_map;
  1590. func_init.spq_map = vf->spq_map;
  1591. func_init.spq_prod = 0;
  1592. bnx2x_func_init(bp, &func_init);
  1593. /* Enable the vf */
  1594. bnx2x_vf_enable_access(bp, vf->abs_vfid);
  1595. bnx2x_vf_enable_traffic(bp, vf);
  1596. /* queue protection table */
  1597. for_each_vfq(vf, i)
  1598. bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
  1599. vfq_qzone_id(vf, vfq_get(vf, i)), true);
  1600. vf->state = VF_ENABLED;
  1601. return 0;
  1602. }
  1603. void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1604. enum channel_tlvs tlv)
  1605. {
  1606. /* lock the channel */
  1607. mutex_lock(&vf->op_mutex);
  1608. /* record the locking op */
  1609. vf->op_current = tlv;
  1610. /* log the lock */
  1611. DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
  1612. vf->abs_vfid, tlv);
  1613. }
  1614. void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1615. enum channel_tlvs expected_tlv)
  1616. {
  1617. WARN(expected_tlv != vf->op_current,
  1618. "lock mismatch: expected %d found %d", expected_tlv,
  1619. vf->op_current);
  1620. /* lock the channel */
  1621. mutex_unlock(&vf->op_mutex);
  1622. /* log the unlock */
  1623. DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
  1624. vf->abs_vfid, vf->op_current);
  1625. /* record the locking op */
  1626. vf->op_current = CHANNEL_TLV_NONE;
  1627. }