i40e_virtchnl_pf.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc.,
  17. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18. *
  19. * The full GNU General Public License is included in this distribution in
  20. * the file called "COPYING".
  21. *
  22. * Contact Information:
  23. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25. *
  26. ******************************************************************************/
  27. #include "i40e.h"
  28. /***********************misc routines*****************************/
  29. /**
  30. * i40e_vc_isvalid_vsi_id
  31. * @vf: pointer to the vf info
  32. * @vsi_id: vf relative vsi id
  33. *
  34. * check for the valid vsi id
  35. **/
  36. static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
  37. {
  38. struct i40e_pf *pf = vf->pf;
  39. return pf->vsi[vsi_id]->vf_id == vf->vf_id;
  40. }
  41. /**
  42. * i40e_vc_isvalid_queue_id
  43. * @vf: pointer to the vf info
  44. * @vsi_id: vsi id
  45. * @qid: vsi relative queue id
  46. *
  47. * check for the valid queue id
  48. **/
  49. static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
  50. u8 qid)
  51. {
  52. struct i40e_pf *pf = vf->pf;
  53. return qid < pf->vsi[vsi_id]->num_queue_pairs;
  54. }
  55. /**
  56. * i40e_vc_isvalid_vector_id
  57. * @vf: pointer to the vf info
  58. * @vector_id: vf relative vector id
  59. *
  60. * check for the valid vector id
  61. **/
  62. static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
  63. {
  64. struct i40e_pf *pf = vf->pf;
  65. return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
  66. }
  67. /***********************vf resource mgmt routines*****************/
  68. /**
  69. * i40e_vc_get_pf_queue_id
  70. * @vf: pointer to the vf info
  71. * @vsi_idx: index of VSI in PF struct
  72. * @vsi_queue_id: vsi relative queue id
  73. *
  74. * return pf relative queue id
  75. **/
  76. static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
  77. u8 vsi_queue_id)
  78. {
  79. struct i40e_pf *pf = vf->pf;
  80. struct i40e_vsi *vsi = pf->vsi[vsi_idx];
  81. u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
  82. if (le16_to_cpu(vsi->info.mapping_flags) &
  83. I40E_AQ_VSI_QUE_MAP_NONCONTIG)
  84. pf_queue_id =
  85. le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
  86. else
  87. pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
  88. vsi_queue_id;
  89. return pf_queue_id;
  90. }
  91. /**
  92. * i40e_ctrl_vsi_tx_queue
  93. * @vf: pointer to the vf info
  94. * @vsi_idx: index of VSI in PF struct
  95. * @vsi_queue_id: vsi relative queue index
  96. * @ctrl: control flags
  97. *
  98. * enable/disable/enable check/disable check
  99. **/
  100. static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
  101. u16 vsi_queue_id,
  102. enum i40e_queue_ctrl ctrl)
  103. {
  104. struct i40e_pf *pf = vf->pf;
  105. struct i40e_hw *hw = &pf->hw;
  106. bool writeback = false;
  107. u16 pf_queue_id;
  108. int ret = 0;
  109. u32 reg;
  110. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  111. reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
  112. switch (ctrl) {
  113. case I40E_QUEUE_CTRL_ENABLE:
  114. reg |= I40E_QTX_ENA_QENA_REQ_MASK;
  115. writeback = true;
  116. break;
  117. case I40E_QUEUE_CTRL_ENABLECHECK:
  118. ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
  119. break;
  120. case I40E_QUEUE_CTRL_DISABLE:
  121. reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
  122. writeback = true;
  123. break;
  124. case I40E_QUEUE_CTRL_DISABLECHECK:
  125. ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
  126. break;
  127. case I40E_QUEUE_CTRL_FASTDISABLE:
  128. reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
  129. writeback = true;
  130. break;
  131. case I40E_QUEUE_CTRL_FASTDISABLECHECK:
  132. ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
  133. if (!ret) {
  134. reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
  135. writeback = true;
  136. }
  137. break;
  138. default:
  139. ret = -EINVAL;
  140. break;
  141. }
  142. if (writeback) {
  143. wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
  144. i40e_flush(hw);
  145. }
  146. return ret;
  147. }
  148. /**
  149. * i40e_ctrl_vsi_rx_queue
  150. * @vf: pointer to the vf info
  151. * @vsi_idx: index of VSI in PF struct
  152. * @vsi_queue_id: vsi relative queue index
  153. * @ctrl: control flags
  154. *
  155. * enable/disable/enable check/disable check
  156. **/
  157. static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
  158. u16 vsi_queue_id,
  159. enum i40e_queue_ctrl ctrl)
  160. {
  161. struct i40e_pf *pf = vf->pf;
  162. struct i40e_hw *hw = &pf->hw;
  163. bool writeback = false;
  164. u16 pf_queue_id;
  165. int ret = 0;
  166. u32 reg;
  167. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  168. reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
  169. switch (ctrl) {
  170. case I40E_QUEUE_CTRL_ENABLE:
  171. reg |= I40E_QRX_ENA_QENA_REQ_MASK;
  172. writeback = true;
  173. break;
  174. case I40E_QUEUE_CTRL_ENABLECHECK:
  175. ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
  176. break;
  177. case I40E_QUEUE_CTRL_DISABLE:
  178. reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
  179. writeback = true;
  180. break;
  181. case I40E_QUEUE_CTRL_DISABLECHECK:
  182. ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
  183. break;
  184. case I40E_QUEUE_CTRL_FASTDISABLE:
  185. reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
  186. writeback = true;
  187. break;
  188. case I40E_QUEUE_CTRL_FASTDISABLECHECK:
  189. ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
  190. if (!ret) {
  191. reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
  192. writeback = true;
  193. }
  194. break;
  195. default:
  196. ret = -EINVAL;
  197. break;
  198. }
  199. if (writeback) {
  200. wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
  201. i40e_flush(hw);
  202. }
  203. return ret;
  204. }
  205. /**
  206. * i40e_config_irq_link_list
  207. * @vf: pointer to the vf info
  208. * @vsi_idx: index of VSI in PF struct
  209. * @vecmap: irq map info
  210. *
  211. * configure irq link list from the map
  212. **/
  213. static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
  214. struct i40e_virtchnl_vector_map *vecmap)
  215. {
  216. unsigned long linklistmap = 0, tempmap;
  217. struct i40e_pf *pf = vf->pf;
  218. struct i40e_hw *hw = &pf->hw;
  219. u16 vsi_queue_id, pf_queue_id;
  220. enum i40e_queue_type qtype;
  221. u16 next_q, vector_id;
  222. u32 reg, reg_idx;
  223. u16 itr_idx = 0;
  224. vector_id = vecmap->vector_id;
  225. /* setup the head */
  226. if (0 == vector_id)
  227. reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
  228. else
  229. reg_idx = I40E_VPINT_LNKLSTN(
  230. ((pf->hw.func_caps.num_msix_vectors_vf - 1)
  231. * vf->vf_id) + (vector_id - 1));
  232. if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
  233. /* Special case - No queues mapped on this vector */
  234. wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
  235. goto irq_list_done;
  236. }
  237. tempmap = vecmap->rxq_map;
  238. vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  239. while (vsi_queue_id < I40E_MAX_VSI_QP) {
  240. linklistmap |= (1 <<
  241. (I40E_VIRTCHNL_SUPPORTED_QTYPES *
  242. vsi_queue_id));
  243. vsi_queue_id =
  244. find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
  245. }
  246. tempmap = vecmap->txq_map;
  247. vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  248. while (vsi_queue_id < I40E_MAX_VSI_QP) {
  249. linklistmap |= (1 <<
  250. (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
  251. + 1));
  252. vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  253. vsi_queue_id + 1);
  254. }
  255. next_q = find_first_bit(&linklistmap,
  256. (I40E_MAX_VSI_QP *
  257. I40E_VIRTCHNL_SUPPORTED_QTYPES));
  258. vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
  259. qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
  260. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  261. reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
  262. wr32(hw, reg_idx, reg);
  263. while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
  264. switch (qtype) {
  265. case I40E_QUEUE_TYPE_RX:
  266. reg_idx = I40E_QINT_RQCTL(pf_queue_id);
  267. itr_idx = vecmap->rxitr_idx;
  268. break;
  269. case I40E_QUEUE_TYPE_TX:
  270. reg_idx = I40E_QINT_TQCTL(pf_queue_id);
  271. itr_idx = vecmap->txitr_idx;
  272. break;
  273. default:
  274. break;
  275. }
  276. next_q = find_next_bit(&linklistmap,
  277. (I40E_MAX_VSI_QP *
  278. I40E_VIRTCHNL_SUPPORTED_QTYPES),
  279. next_q + 1);
  280. if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
  281. vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
  282. qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
  283. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
  284. vsi_queue_id);
  285. } else {
  286. pf_queue_id = I40E_QUEUE_END_OF_LIST;
  287. qtype = 0;
  288. }
  289. /* format for the RQCTL & TQCTL regs is same */
  290. reg = (vector_id) |
  291. (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
  292. (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
  293. (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
  294. (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
  295. wr32(hw, reg_idx, reg);
  296. }
  297. irq_list_done:
  298. i40e_flush(hw);
  299. }
  300. /**
  301. * i40e_config_vsi_tx_queue
  302. * @vf: pointer to the vf info
  303. * @vsi_idx: index of VSI in PF struct
  304. * @vsi_queue_id: vsi relative queue index
  305. * @info: config. info
  306. *
  307. * configure tx queue
  308. **/
  309. static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
  310. u16 vsi_queue_id,
  311. struct i40e_virtchnl_txq_info *info)
  312. {
  313. struct i40e_pf *pf = vf->pf;
  314. struct i40e_hw *hw = &pf->hw;
  315. struct i40e_hmc_obj_txq tx_ctx;
  316. u16 pf_queue_id;
  317. u32 qtx_ctl;
  318. int ret = 0;
  319. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  320. /* clear the context structure first */
  321. memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
  322. /* only set the required fields */
  323. tx_ctx.base = info->dma_ring_addr / 128;
  324. tx_ctx.qlen = info->ring_len;
  325. tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
  326. tx_ctx.rdylist_act = 0;
  327. /* clear the context in the HMC */
  328. ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
  329. if (ret) {
  330. dev_err(&pf->pdev->dev,
  331. "Failed to clear VF LAN Tx queue context %d, error: %d\n",
  332. pf_queue_id, ret);
  333. ret = -ENOENT;
  334. goto error_context;
  335. }
  336. /* set the context in the HMC */
  337. ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
  338. if (ret) {
  339. dev_err(&pf->pdev->dev,
  340. "Failed to set VF LAN Tx queue context %d error: %d\n",
  341. pf_queue_id, ret);
  342. ret = -ENOENT;
  343. goto error_context;
  344. }
  345. /* associate this queue with the PCI VF function */
  346. qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
  347. qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
  348. & I40E_QTX_CTL_PF_INDX_MASK);
  349. qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
  350. << I40E_QTX_CTL_VFVM_INDX_SHIFT)
  351. & I40E_QTX_CTL_VFVM_INDX_MASK);
  352. wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
  353. i40e_flush(hw);
  354. error_context:
  355. return ret;
  356. }
  357. /**
  358. * i40e_config_vsi_rx_queue
  359. * @vf: pointer to the vf info
  360. * @vsi_idx: index of VSI in PF struct
  361. * @vsi_queue_id: vsi relative queue index
  362. * @info: config. info
  363. *
  364. * configure rx queue
  365. **/
  366. static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
  367. u16 vsi_queue_id,
  368. struct i40e_virtchnl_rxq_info *info)
  369. {
  370. struct i40e_pf *pf = vf->pf;
  371. struct i40e_hw *hw = &pf->hw;
  372. struct i40e_hmc_obj_rxq rx_ctx;
  373. u16 pf_queue_id;
  374. int ret = 0;
  375. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
  376. /* clear the context structure first */
  377. memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
  378. /* only set the required fields */
  379. rx_ctx.base = info->dma_ring_addr / 128;
  380. rx_ctx.qlen = info->ring_len;
  381. if (info->splithdr_enabled) {
  382. rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
  383. I40E_RX_SPLIT_IP |
  384. I40E_RX_SPLIT_TCP_UDP |
  385. I40E_RX_SPLIT_SCTP;
  386. /* header length validation */
  387. if (info->hdr_size > ((2 * 1024) - 64)) {
  388. ret = -EINVAL;
  389. goto error_param;
  390. }
  391. rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
  392. /* set splitalways mode 10b */
  393. rx_ctx.dtype = 0x2;
  394. }
  395. /* databuffer length validation */
  396. if (info->databuffer_size > ((16 * 1024) - 128)) {
  397. ret = -EINVAL;
  398. goto error_param;
  399. }
  400. rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
  401. /* max pkt. length validation */
  402. if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
  403. ret = -EINVAL;
  404. goto error_param;
  405. }
  406. rx_ctx.rxmax = info->max_pkt_size;
  407. /* enable 32bytes desc always */
  408. rx_ctx.dsize = 1;
  409. /* default values */
  410. rx_ctx.tphrdesc_ena = 1;
  411. rx_ctx.tphwdesc_ena = 1;
  412. rx_ctx.tphdata_ena = 1;
  413. rx_ctx.tphhead_ena = 1;
  414. rx_ctx.lrxqthresh = 2;
  415. rx_ctx.crcstrip = 1;
  416. /* clear the context in the HMC */
  417. ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
  418. if (ret) {
  419. dev_err(&pf->pdev->dev,
  420. "Failed to clear VF LAN Rx queue context %d, error: %d\n",
  421. pf_queue_id, ret);
  422. ret = -ENOENT;
  423. goto error_param;
  424. }
  425. /* set the context in the HMC */
  426. ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
  427. if (ret) {
  428. dev_err(&pf->pdev->dev,
  429. "Failed to set VF LAN Rx queue context %d error: %d\n",
  430. pf_queue_id, ret);
  431. ret = -ENOENT;
  432. goto error_param;
  433. }
  434. error_param:
  435. return ret;
  436. }
  437. /**
  438. * i40e_alloc_vsi_res
  439. * @vf: pointer to the vf info
  440. * @type: type of VSI to allocate
  441. *
  442. * alloc vf vsi context & resources
  443. **/
  444. static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
  445. {
  446. struct i40e_mac_filter *f = NULL;
  447. struct i40e_pf *pf = vf->pf;
  448. struct i40e_hw *hw = &pf->hw;
  449. struct i40e_vsi *vsi;
  450. int ret = 0;
  451. vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
  452. if (!vsi) {
  453. dev_err(&pf->pdev->dev,
  454. "add vsi failed for vf %d, aq_err %d\n",
  455. vf->vf_id, pf->hw.aq.asq_last_status);
  456. ret = -ENOENT;
  457. goto error_alloc_vsi_res;
  458. }
  459. if (type == I40E_VSI_SRIOV) {
  460. vf->lan_vsi_index = vsi->idx;
  461. vf->lan_vsi_id = vsi->id;
  462. dev_info(&pf->pdev->dev,
  463. "LAN VSI index %d, VSI id %d\n",
  464. vsi->idx, vsi->id);
  465. f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
  466. 0, true, false);
  467. }
  468. if (!f) {
  469. dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
  470. ret = -ENOMEM;
  471. goto error_alloc_vsi_res;
  472. }
  473. /* program mac filter */
  474. ret = i40e_sync_vsi_filters(vsi);
  475. if (ret) {
  476. dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
  477. goto error_alloc_vsi_res;
  478. }
  479. /* accept bcast pkts. by default */
  480. ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
  481. if (ret) {
  482. dev_err(&pf->pdev->dev,
  483. "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
  484. vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
  485. ret = -EINVAL;
  486. }
  487. error_alloc_vsi_res:
  488. return ret;
  489. }
  490. /**
  491. * i40e_reset_vf
  492. * @vf: pointer to the vf structure
  493. * @flr: VFLR was issued or not
  494. *
  495. * reset the vf
  496. **/
  497. int i40e_reset_vf(struct i40e_vf *vf, bool flr)
  498. {
  499. int ret = -ENOENT;
  500. struct i40e_pf *pf = vf->pf;
  501. struct i40e_hw *hw = &pf->hw;
  502. u32 reg, reg_idx, msix_vf;
  503. bool rsd = false;
  504. u16 pf_queue_id;
  505. int i, j;
  506. /* warn the VF */
  507. wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
  508. clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
  509. /* PF triggers VFR only when VF requests, in case of
  510. * VFLR, HW triggers VFR
  511. */
  512. if (!flr) {
  513. /* reset vf using VPGEN_VFRTRIG reg */
  514. reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
  515. wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
  516. i40e_flush(hw);
  517. }
  518. /* poll VPGEN_VFRSTAT reg to make sure
  519. * that reset is complete
  520. */
  521. for (i = 0; i < 4; i++) {
  522. /* vf reset requires driver to first reset the
  523. * vf & than poll the status register to make sure
  524. * that the requested op was completed
  525. * successfully
  526. */
  527. udelay(10);
  528. reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
  529. if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
  530. rsd = true;
  531. break;
  532. }
  533. }
  534. if (!rsd)
  535. dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
  536. vf->vf_id);
  537. /* fast disable qps */
  538. for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
  539. ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
  540. I40E_QUEUE_CTRL_FASTDISABLE);
  541. ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
  542. I40E_QUEUE_CTRL_FASTDISABLE);
  543. }
  544. /* Queue enable/disable requires driver to
  545. * first reset the vf & than poll the status register
  546. * to make sure that the requested op was completed
  547. * successfully
  548. */
  549. udelay(10);
  550. for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
  551. ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
  552. I40E_QUEUE_CTRL_FASTDISABLECHECK);
  553. if (ret)
  554. dev_info(&pf->pdev->dev,
  555. "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
  556. vf->lan_vsi_index, j, vf->vf_id);
  557. ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
  558. I40E_QUEUE_CTRL_FASTDISABLECHECK);
  559. if (ret)
  560. dev_info(&pf->pdev->dev,
  561. "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
  562. vf->lan_vsi_index, j, vf->vf_id);
  563. }
  564. /* clear the irq settings */
  565. msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
  566. for (i = 0; i < msix_vf; i++) {
  567. /* format is same for both registers */
  568. if (0 == i)
  569. reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
  570. else
  571. reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
  572. (vf->vf_id))
  573. + (i - 1));
  574. reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
  575. I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
  576. wr32(hw, reg_idx, reg);
  577. i40e_flush(hw);
  578. }
  579. /* disable interrupts so the VF starts in a known state */
  580. for (i = 0; i < msix_vf; i++) {
  581. /* format is same for both registers */
  582. if (0 == i)
  583. reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
  584. else
  585. reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
  586. (vf->vf_id))
  587. + (i - 1));
  588. wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
  589. i40e_flush(hw);
  590. }
  591. /* set the defaults for the rqctl & tqctl registers */
  592. reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
  593. I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
  594. for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
  595. pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
  596. wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
  597. wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
  598. }
  599. /* clear the reset bit in the VPGEN_VFRTRIG reg */
  600. reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
  601. reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
  602. wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
  603. /* tell the VF the reset is done */
  604. wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
  605. i40e_flush(hw);
  606. return ret;
  607. }
  608. /**
  609. * i40e_enable_vf_mappings
  610. * @vf: pointer to the vf info
  611. *
  612. * enable vf mappings
  613. **/
  614. static void i40e_enable_vf_mappings(struct i40e_vf *vf)
  615. {
  616. struct i40e_pf *pf = vf->pf;
  617. struct i40e_hw *hw = &pf->hw;
  618. u32 reg, total_queue_pairs = 0;
  619. int j;
  620. /* Tell the hardware we're using noncontiguous mapping. HW requires
  621. * that VF queues be mapped using this method, even when they are
  622. * contiguous in real life
  623. */
  624. wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
  625. I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
  626. /* enable VF vplan_qtable mappings */
  627. reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
  628. wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
  629. /* map PF queues to VF queues */
  630. for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
  631. u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
  632. reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
  633. wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
  634. total_queue_pairs++;
  635. }
  636. /* map PF queues to VSI */
  637. for (j = 0; j < 7; j++) {
  638. if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
  639. reg = 0x07FF07FF; /* unused */
  640. } else {
  641. u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
  642. j * 2);
  643. reg = qid;
  644. qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
  645. (j * 2) + 1);
  646. reg |= qid << 16;
  647. }
  648. wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
  649. }
  650. i40e_flush(hw);
  651. }
  652. /**
  653. * i40e_disable_vf_mappings
  654. * @vf: pointer to the vf info
  655. *
  656. * disable vf mappings
  657. **/
  658. static void i40e_disable_vf_mappings(struct i40e_vf *vf)
  659. {
  660. struct i40e_pf *pf = vf->pf;
  661. struct i40e_hw *hw = &pf->hw;
  662. int i;
  663. /* disable qp mappings */
  664. wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
  665. for (i = 0; i < I40E_MAX_VSI_QP; i++)
  666. wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
  667. I40E_QUEUE_END_OF_LIST);
  668. i40e_flush(hw);
  669. }
  670. /**
  671. * i40e_free_vf_res
  672. * @vf: pointer to the vf info
  673. *
  674. * free vf resources
  675. **/
  676. static void i40e_free_vf_res(struct i40e_vf *vf)
  677. {
  678. struct i40e_pf *pf = vf->pf;
  679. /* free vsi & disconnect it from the parent uplink */
  680. if (vf->lan_vsi_index) {
  681. i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
  682. vf->lan_vsi_index = 0;
  683. vf->lan_vsi_id = 0;
  684. }
  685. /* reset some of the state varibles keeping
  686. * track of the resources
  687. */
  688. vf->num_queue_pairs = 0;
  689. vf->vf_states = 0;
  690. }
  691. /**
  692. * i40e_alloc_vf_res
  693. * @vf: pointer to the vf info
  694. *
  695. * allocate vf resources
  696. **/
  697. static int i40e_alloc_vf_res(struct i40e_vf *vf)
  698. {
  699. struct i40e_pf *pf = vf->pf;
  700. int total_queue_pairs = 0;
  701. int ret;
  702. /* allocate hw vsi context & associated resources */
  703. ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
  704. if (ret)
  705. goto error_alloc;
  706. total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
  707. set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
  708. /* store the total qps number for the runtime
  709. * vf req validation
  710. */
  711. vf->num_queue_pairs = total_queue_pairs;
  712. /* vf is now completely initialized */
  713. set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
  714. error_alloc:
  715. if (ret)
  716. i40e_free_vf_res(vf);
  717. return ret;
  718. }
  719. /**
  720. * i40e_vfs_are_assigned
  721. * @pf: pointer to the pf structure
  722. *
  723. * Determine if any VFs are assigned to VMs
  724. **/
  725. static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
  726. {
  727. struct pci_dev *pdev = pf->pdev;
  728. struct pci_dev *vfdev;
  729. /* loop through all the VFs to see if we own any that are assigned */
  730. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
  731. while (vfdev) {
  732. /* if we don't own it we don't care */
  733. if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
  734. /* if it is assigned we cannot release it */
  735. if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
  736. return true;
  737. }
  738. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  739. I40E_VF_DEVICE_ID,
  740. vfdev);
  741. }
  742. return false;
  743. }
  744. /**
  745. * i40e_free_vfs
  746. * @pf: pointer to the pf structure
  747. *
  748. * free vf resources
  749. **/
  750. void i40e_free_vfs(struct i40e_pf *pf)
  751. {
  752. struct i40e_hw *hw = &pf->hw;
  753. int i;
  754. if (!pf->vf)
  755. return;
  756. /* Disable interrupt 0 so we don't try to handle the VFLR. */
  757. wr32(hw, I40E_PFINT_DYN_CTL0, 0);
  758. i40e_flush(hw);
  759. /* free up vf resources */
  760. for (i = 0; i < pf->num_alloc_vfs; i++) {
  761. if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
  762. i40e_free_vf_res(&pf->vf[i]);
  763. /* disable qp mappings */
  764. i40e_disable_vf_mappings(&pf->vf[i]);
  765. }
  766. kfree(pf->vf);
  767. pf->vf = NULL;
  768. pf->num_alloc_vfs = 0;
  769. if (!i40e_vfs_are_assigned(pf))
  770. pci_disable_sriov(pf->pdev);
  771. else
  772. dev_warn(&pf->pdev->dev,
  773. "unable to disable SR-IOV because VFs are assigned.\n");
  774. /* Re-enable interrupt 0. */
  775. wr32(hw, I40E_PFINT_DYN_CTL0,
  776. I40E_PFINT_DYN_CTL0_INTENA_MASK |
  777. I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
  778. (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
  779. i40e_flush(hw);
  780. }
  781. #ifdef CONFIG_PCI_IOV
  782. /**
  783. * i40e_alloc_vfs
  784. * @pf: pointer to the pf structure
  785. * @num_alloc_vfs: number of vfs to allocate
  786. *
  787. * allocate vf resources
  788. **/
  789. static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
  790. {
  791. struct i40e_vf *vfs;
  792. int i, ret = 0;
  793. ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
  794. if (ret) {
  795. dev_err(&pf->pdev->dev,
  796. "pci_enable_sriov failed with error %d!\n", ret);
  797. pf->num_alloc_vfs = 0;
  798. goto err_iov;
  799. }
  800. /* allocate memory */
  801. vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
  802. if (!vfs) {
  803. ret = -ENOMEM;
  804. goto err_alloc;
  805. }
  806. /* apply default profile */
  807. for (i = 0; i < num_alloc_vfs; i++) {
  808. vfs[i].pf = pf;
  809. vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
  810. vfs[i].vf_id = i;
  811. /* assign default capabilities */
  812. set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
  813. ret = i40e_alloc_vf_res(&vfs[i]);
  814. i40e_reset_vf(&vfs[i], true);
  815. if (ret)
  816. break;
  817. /* enable vf vplan_qtable mappings */
  818. i40e_enable_vf_mappings(&vfs[i]);
  819. }
  820. pf->vf = vfs;
  821. pf->num_alloc_vfs = num_alloc_vfs;
  822. err_alloc:
  823. if (ret)
  824. i40e_free_vfs(pf);
  825. err_iov:
  826. return ret;
  827. }
  828. #endif
  829. /**
  830. * i40e_pci_sriov_enable
  831. * @pdev: pointer to a pci_dev structure
  832. * @num_vfs: number of vfs to allocate
  833. *
  834. * Enable or change the number of VFs
  835. **/
  836. static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
  837. {
  838. #ifdef CONFIG_PCI_IOV
  839. struct i40e_pf *pf = pci_get_drvdata(pdev);
  840. int pre_existing_vfs = pci_num_vf(pdev);
  841. int err = 0;
  842. dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
  843. if (pre_existing_vfs && pre_existing_vfs != num_vfs)
  844. i40e_free_vfs(pf);
  845. else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
  846. goto out;
  847. if (num_vfs > pf->num_req_vfs) {
  848. err = -EPERM;
  849. goto err_out;
  850. }
  851. err = i40e_alloc_vfs(pf, num_vfs);
  852. if (err) {
  853. dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
  854. goto err_out;
  855. }
  856. out:
  857. return num_vfs;
  858. err_out:
  859. return err;
  860. #endif
  861. return 0;
  862. }
  863. /**
  864. * i40e_pci_sriov_configure
  865. * @pdev: pointer to a pci_dev structure
  866. * @num_vfs: number of vfs to allocate
  867. *
  868. * Enable or change the number of VFs. Called when the user updates the number
  869. * of VFs in sysfs.
  870. **/
  871. int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
  872. {
  873. struct i40e_pf *pf = pci_get_drvdata(pdev);
  874. if (num_vfs)
  875. return i40e_pci_sriov_enable(pdev, num_vfs);
  876. i40e_free_vfs(pf);
  877. return 0;
  878. }
  879. /***********************virtual channel routines******************/
  880. /**
  881. * i40e_vc_send_msg_to_vf
  882. * @vf: pointer to the vf info
  883. * @v_opcode: virtual channel opcode
  884. * @v_retval: virtual channel return value
  885. * @msg: pointer to the msg buffer
  886. * @msglen: msg length
  887. *
  888. * send msg to vf
  889. **/
  890. static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
  891. u32 v_retval, u8 *msg, u16 msglen)
  892. {
  893. struct i40e_pf *pf = vf->pf;
  894. struct i40e_hw *hw = &pf->hw;
  895. i40e_status aq_ret;
  896. /* single place to detect unsuccessful return values */
  897. if (v_retval) {
  898. vf->num_invalid_msgs++;
  899. dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
  900. v_opcode, v_retval);
  901. if (vf->num_invalid_msgs >
  902. I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
  903. dev_err(&pf->pdev->dev,
  904. "Number of invalid messages exceeded for VF %d\n",
  905. vf->vf_id);
  906. dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
  907. set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
  908. }
  909. } else {
  910. vf->num_valid_msgs++;
  911. }
  912. aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
  913. msg, msglen, NULL);
  914. if (aq_ret) {
  915. dev_err(&pf->pdev->dev,
  916. "Unable to send the message to VF %d aq_err %d\n",
  917. vf->vf_id, pf->hw.aq.asq_last_status);
  918. return -EIO;
  919. }
  920. return 0;
  921. }
  922. /**
  923. * i40e_vc_send_resp_to_vf
  924. * @vf: pointer to the vf info
  925. * @opcode: operation code
  926. * @retval: return value
  927. *
  928. * send resp msg to vf
  929. **/
  930. static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
  931. enum i40e_virtchnl_ops opcode,
  932. i40e_status retval)
  933. {
  934. return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
  935. }
  936. /**
  937. * i40e_vc_get_version_msg
  938. * @vf: pointer to the vf info
  939. *
  940. * called from the vf to request the API version used by the PF
  941. **/
  942. static int i40e_vc_get_version_msg(struct i40e_vf *vf)
  943. {
  944. struct i40e_virtchnl_version_info info = {
  945. I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
  946. };
  947. return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
  948. I40E_SUCCESS, (u8 *)&info,
  949. sizeof(struct
  950. i40e_virtchnl_version_info));
  951. }
  952. /**
  953. * i40e_vc_get_vf_resources_msg
  954. * @vf: pointer to the vf info
  955. * @msg: pointer to the msg buffer
  956. * @msglen: msg length
  957. *
  958. * called from the vf to request its resources
  959. **/
  960. static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
  961. {
  962. struct i40e_virtchnl_vf_resource *vfres = NULL;
  963. struct i40e_pf *pf = vf->pf;
  964. i40e_status aq_ret = 0;
  965. struct i40e_vsi *vsi;
  966. int i = 0, len = 0;
  967. int num_vsis = 1;
  968. int ret;
  969. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  970. aq_ret = I40E_ERR_PARAM;
  971. goto err;
  972. }
  973. len = (sizeof(struct i40e_virtchnl_vf_resource) +
  974. sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
  975. vfres = kzalloc(len, GFP_KERNEL);
  976. if (!vfres) {
  977. aq_ret = I40E_ERR_NO_MEMORY;
  978. len = 0;
  979. goto err;
  980. }
  981. vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
  982. vsi = pf->vsi[vf->lan_vsi_index];
  983. if (!vsi->info.pvid)
  984. vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
  985. vfres->num_vsis = num_vsis;
  986. vfres->num_queue_pairs = vf->num_queue_pairs;
  987. vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
  988. if (vf->lan_vsi_index) {
  989. vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
  990. vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
  991. vfres->vsi_res[i].num_queue_pairs =
  992. pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
  993. memcpy(vfres->vsi_res[i].default_mac_addr,
  994. vf->default_lan_addr.addr, ETH_ALEN);
  995. i++;
  996. }
  997. set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
  998. err:
  999. /* send the response back to the vf */
  1000. ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
  1001. aq_ret, (u8 *)vfres, len);
  1002. kfree(vfres);
  1003. return ret;
  1004. }
  1005. /**
  1006. * i40e_vc_reset_vf_msg
  1007. * @vf: pointer to the vf info
  1008. * @msg: pointer to the msg buffer
  1009. * @msglen: msg length
  1010. *
  1011. * called from the vf to reset itself,
  1012. * unlike other virtchnl messages, pf driver
  1013. * doesn't send the response back to the vf
  1014. **/
  1015. static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
  1016. {
  1017. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
  1018. return -ENOENT;
  1019. return i40e_reset_vf(vf, false);
  1020. }
  1021. /**
  1022. * i40e_vc_config_promiscuous_mode_msg
  1023. * @vf: pointer to the vf info
  1024. * @msg: pointer to the msg buffer
  1025. * @msglen: msg length
  1026. *
  1027. * called from the vf to configure the promiscuous mode of
  1028. * vf vsis
  1029. **/
  1030. static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
  1031. u8 *msg, u16 msglen)
  1032. {
  1033. struct i40e_virtchnl_promisc_info *info =
  1034. (struct i40e_virtchnl_promisc_info *)msg;
  1035. struct i40e_pf *pf = vf->pf;
  1036. struct i40e_hw *hw = &pf->hw;
  1037. bool allmulti = false;
  1038. bool promisc = false;
  1039. i40e_status aq_ret;
  1040. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1041. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1042. !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
  1043. (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
  1044. aq_ret = I40E_ERR_PARAM;
  1045. goto error_param;
  1046. }
  1047. if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
  1048. promisc = true;
  1049. aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
  1050. promisc, NULL);
  1051. if (aq_ret)
  1052. goto error_param;
  1053. if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
  1054. allmulti = true;
  1055. aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
  1056. allmulti, NULL);
  1057. error_param:
  1058. /* send the response to the vf */
  1059. return i40e_vc_send_resp_to_vf(vf,
  1060. I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
  1061. aq_ret);
  1062. }
  1063. /**
  1064. * i40e_vc_config_queues_msg
  1065. * @vf: pointer to the vf info
  1066. * @msg: pointer to the msg buffer
  1067. * @msglen: msg length
  1068. *
  1069. * called from the vf to configure the rx/tx
  1070. * queues
  1071. **/
  1072. static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1073. {
  1074. struct i40e_virtchnl_vsi_queue_config_info *qci =
  1075. (struct i40e_virtchnl_vsi_queue_config_info *)msg;
  1076. struct i40e_virtchnl_queue_pair_info *qpi;
  1077. u16 vsi_id, vsi_queue_id;
  1078. i40e_status aq_ret = 0;
  1079. int i;
  1080. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1081. aq_ret = I40E_ERR_PARAM;
  1082. goto error_param;
  1083. }
  1084. vsi_id = qci->vsi_id;
  1085. if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1086. aq_ret = I40E_ERR_PARAM;
  1087. goto error_param;
  1088. }
  1089. for (i = 0; i < qci->num_queue_pairs; i++) {
  1090. qpi = &qci->qpair[i];
  1091. vsi_queue_id = qpi->txq.queue_id;
  1092. if ((qpi->txq.vsi_id != vsi_id) ||
  1093. (qpi->rxq.vsi_id != vsi_id) ||
  1094. (qpi->rxq.queue_id != vsi_queue_id) ||
  1095. !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
  1096. aq_ret = I40E_ERR_PARAM;
  1097. goto error_param;
  1098. }
  1099. if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
  1100. &qpi->rxq) ||
  1101. i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
  1102. &qpi->txq)) {
  1103. aq_ret = I40E_ERR_PARAM;
  1104. goto error_param;
  1105. }
  1106. }
  1107. error_param:
  1108. /* send the response to the vf */
  1109. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
  1110. aq_ret);
  1111. }
  1112. /**
  1113. * i40e_vc_config_irq_map_msg
  1114. * @vf: pointer to the vf info
  1115. * @msg: pointer to the msg buffer
  1116. * @msglen: msg length
  1117. *
  1118. * called from the vf to configure the irq to
  1119. * queue map
  1120. **/
  1121. static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1122. {
  1123. struct i40e_virtchnl_irq_map_info *irqmap_info =
  1124. (struct i40e_virtchnl_irq_map_info *)msg;
  1125. struct i40e_virtchnl_vector_map *map;
  1126. u16 vsi_id, vsi_queue_id, vector_id;
  1127. i40e_status aq_ret = 0;
  1128. unsigned long tempmap;
  1129. int i;
  1130. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1131. aq_ret = I40E_ERR_PARAM;
  1132. goto error_param;
  1133. }
  1134. for (i = 0; i < irqmap_info->num_vectors; i++) {
  1135. map = &irqmap_info->vecmap[i];
  1136. vector_id = map->vector_id;
  1137. vsi_id = map->vsi_id;
  1138. /* validate msg params */
  1139. if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
  1140. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1141. aq_ret = I40E_ERR_PARAM;
  1142. goto error_param;
  1143. }
  1144. /* lookout for the invalid queue index */
  1145. tempmap = map->rxq_map;
  1146. vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1147. while (vsi_queue_id < I40E_MAX_VSI_QP) {
  1148. if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
  1149. vsi_queue_id)) {
  1150. aq_ret = I40E_ERR_PARAM;
  1151. goto error_param;
  1152. }
  1153. vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1154. vsi_queue_id + 1);
  1155. }
  1156. tempmap = map->txq_map;
  1157. vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1158. while (vsi_queue_id < I40E_MAX_VSI_QP) {
  1159. if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
  1160. vsi_queue_id)) {
  1161. aq_ret = I40E_ERR_PARAM;
  1162. goto error_param;
  1163. }
  1164. vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1165. vsi_queue_id + 1);
  1166. }
  1167. i40e_config_irq_link_list(vf, vsi_id, map);
  1168. }
  1169. error_param:
  1170. /* send the response to the vf */
  1171. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
  1172. aq_ret);
  1173. }
  1174. /**
  1175. * i40e_vc_enable_queues_msg
  1176. * @vf: pointer to the vf info
  1177. * @msg: pointer to the msg buffer
  1178. * @msglen: msg length
  1179. *
  1180. * called from the vf to enable all or specific queue(s)
  1181. **/
  1182. static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1183. {
  1184. struct i40e_virtchnl_queue_select *vqs =
  1185. (struct i40e_virtchnl_queue_select *)msg;
  1186. struct i40e_pf *pf = vf->pf;
  1187. u16 vsi_id = vqs->vsi_id;
  1188. i40e_status aq_ret = 0;
  1189. unsigned long tempmap;
  1190. u16 queue_id;
  1191. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1192. aq_ret = I40E_ERR_PARAM;
  1193. goto error_param;
  1194. }
  1195. if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1196. aq_ret = I40E_ERR_PARAM;
  1197. goto error_param;
  1198. }
  1199. if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
  1200. aq_ret = I40E_ERR_PARAM;
  1201. goto error_param;
  1202. }
  1203. tempmap = vqs->rx_queues;
  1204. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1205. while (queue_id < I40E_MAX_VSI_QP) {
  1206. if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
  1207. aq_ret = I40E_ERR_PARAM;
  1208. goto error_param;
  1209. }
  1210. i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
  1211. I40E_QUEUE_CTRL_ENABLE);
  1212. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1213. queue_id + 1);
  1214. }
  1215. tempmap = vqs->tx_queues;
  1216. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1217. while (queue_id < I40E_MAX_VSI_QP) {
  1218. if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
  1219. aq_ret = I40E_ERR_PARAM;
  1220. goto error_param;
  1221. }
  1222. i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
  1223. I40E_QUEUE_CTRL_ENABLE);
  1224. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1225. queue_id + 1);
  1226. }
  1227. /* Poll the status register to make sure that the
  1228. * requested op was completed successfully
  1229. */
  1230. udelay(10);
  1231. tempmap = vqs->rx_queues;
  1232. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1233. while (queue_id < I40E_MAX_VSI_QP) {
  1234. if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
  1235. I40E_QUEUE_CTRL_ENABLECHECK)) {
  1236. dev_err(&pf->pdev->dev,
  1237. "Queue control check failed on RX queue %d of VSI %d VF %d\n",
  1238. queue_id, vsi_id, vf->vf_id);
  1239. }
  1240. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1241. queue_id + 1);
  1242. }
  1243. tempmap = vqs->tx_queues;
  1244. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1245. while (queue_id < I40E_MAX_VSI_QP) {
  1246. if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
  1247. I40E_QUEUE_CTRL_ENABLECHECK)) {
  1248. dev_err(&pf->pdev->dev,
  1249. "Queue control check failed on TX queue %d of VSI %d VF %d\n",
  1250. queue_id, vsi_id, vf->vf_id);
  1251. }
  1252. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1253. queue_id + 1);
  1254. }
  1255. error_param:
  1256. /* send the response to the vf */
  1257. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
  1258. aq_ret);
  1259. }
  1260. /**
  1261. * i40e_vc_disable_queues_msg
  1262. * @vf: pointer to the vf info
  1263. * @msg: pointer to the msg buffer
  1264. * @msglen: msg length
  1265. *
  1266. * called from the vf to disable all or specific
  1267. * queue(s)
  1268. **/
  1269. static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1270. {
  1271. struct i40e_virtchnl_queue_select *vqs =
  1272. (struct i40e_virtchnl_queue_select *)msg;
  1273. struct i40e_pf *pf = vf->pf;
  1274. u16 vsi_id = vqs->vsi_id;
  1275. i40e_status aq_ret = 0;
  1276. unsigned long tempmap;
  1277. u16 queue_id;
  1278. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1279. aq_ret = I40E_ERR_PARAM;
  1280. goto error_param;
  1281. }
  1282. if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
  1283. aq_ret = I40E_ERR_PARAM;
  1284. goto error_param;
  1285. }
  1286. if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
  1287. aq_ret = I40E_ERR_PARAM;
  1288. goto error_param;
  1289. }
  1290. tempmap = vqs->rx_queues;
  1291. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1292. while (queue_id < I40E_MAX_VSI_QP) {
  1293. if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
  1294. aq_ret = I40E_ERR_PARAM;
  1295. goto error_param;
  1296. }
  1297. i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
  1298. I40E_QUEUE_CTRL_DISABLE);
  1299. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1300. queue_id + 1);
  1301. }
  1302. tempmap = vqs->tx_queues;
  1303. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1304. while (queue_id < I40E_MAX_VSI_QP) {
  1305. if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
  1306. aq_ret = I40E_ERR_PARAM;
  1307. goto error_param;
  1308. }
  1309. i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
  1310. I40E_QUEUE_CTRL_DISABLE);
  1311. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1312. queue_id + 1);
  1313. }
  1314. /* Poll the status register to make sure that the
  1315. * requested op was completed successfully
  1316. */
  1317. udelay(10);
  1318. tempmap = vqs->rx_queues;
  1319. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1320. while (queue_id < I40E_MAX_VSI_QP) {
  1321. if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
  1322. I40E_QUEUE_CTRL_DISABLECHECK)) {
  1323. dev_err(&pf->pdev->dev,
  1324. "Queue control check failed on RX queue %d of VSI %d VF %d\n",
  1325. queue_id, vsi_id, vf->vf_id);
  1326. }
  1327. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1328. queue_id + 1);
  1329. }
  1330. tempmap = vqs->tx_queues;
  1331. queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
  1332. while (queue_id < I40E_MAX_VSI_QP) {
  1333. if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
  1334. I40E_QUEUE_CTRL_DISABLECHECK)) {
  1335. dev_err(&pf->pdev->dev,
  1336. "Queue control check failed on TX queue %d of VSI %d VF %d\n",
  1337. queue_id, vsi_id, vf->vf_id);
  1338. }
  1339. queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
  1340. queue_id + 1);
  1341. }
  1342. error_param:
  1343. /* send the response to the vf */
  1344. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
  1345. aq_ret);
  1346. }
  1347. /**
  1348. * i40e_vc_get_stats_msg
  1349. * @vf: pointer to the vf info
  1350. * @msg: pointer to the msg buffer
  1351. * @msglen: msg length
  1352. *
  1353. * called from the vf to get vsi stats
  1354. **/
  1355. static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1356. {
  1357. struct i40e_virtchnl_queue_select *vqs =
  1358. (struct i40e_virtchnl_queue_select *)msg;
  1359. struct i40e_pf *pf = vf->pf;
  1360. struct i40e_eth_stats stats;
  1361. i40e_status aq_ret = 0;
  1362. struct i40e_vsi *vsi;
  1363. memset(&stats, 0, sizeof(struct i40e_eth_stats));
  1364. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
  1365. aq_ret = I40E_ERR_PARAM;
  1366. goto error_param;
  1367. }
  1368. if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
  1369. aq_ret = I40E_ERR_PARAM;
  1370. goto error_param;
  1371. }
  1372. vsi = pf->vsi[vqs->vsi_id];
  1373. if (!vsi) {
  1374. aq_ret = I40E_ERR_PARAM;
  1375. goto error_param;
  1376. }
  1377. i40e_update_eth_stats(vsi);
  1378. memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
  1379. error_param:
  1380. /* send the response back to the vf */
  1381. return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
  1382. (u8 *)&stats, sizeof(stats));
  1383. }
  1384. /**
  1385. * i40e_vc_add_mac_addr_msg
  1386. * @vf: pointer to the vf info
  1387. * @msg: pointer to the msg buffer
  1388. * @msglen: msg length
  1389. *
  1390. * add guest mac address filter
  1391. **/
  1392. static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1393. {
  1394. struct i40e_virtchnl_ether_addr_list *al =
  1395. (struct i40e_virtchnl_ether_addr_list *)msg;
  1396. struct i40e_pf *pf = vf->pf;
  1397. struct i40e_vsi *vsi = NULL;
  1398. u16 vsi_id = al->vsi_id;
  1399. i40e_status aq_ret = 0;
  1400. int i;
  1401. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1402. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1403. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1404. aq_ret = I40E_ERR_PARAM;
  1405. goto error_param;
  1406. }
  1407. for (i = 0; i < al->num_elements; i++) {
  1408. if (is_broadcast_ether_addr(al->list[i].addr) ||
  1409. is_zero_ether_addr(al->list[i].addr)) {
  1410. dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
  1411. al->list[i].addr);
  1412. aq_ret = I40E_ERR_PARAM;
  1413. goto error_param;
  1414. }
  1415. }
  1416. vsi = pf->vsi[vsi_id];
  1417. /* add new addresses to the list */
  1418. for (i = 0; i < al->num_elements; i++) {
  1419. struct i40e_mac_filter *f;
  1420. f = i40e_find_mac(vsi, al->list[i].addr, true, false);
  1421. if (f) {
  1422. if (i40e_is_vsi_in_vlan(vsi))
  1423. f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
  1424. true, false);
  1425. else
  1426. f = i40e_add_filter(vsi, al->list[i].addr, -1,
  1427. true, false);
  1428. }
  1429. if (!f) {
  1430. dev_err(&pf->pdev->dev,
  1431. "Unable to add VF MAC filter\n");
  1432. aq_ret = I40E_ERR_PARAM;
  1433. goto error_param;
  1434. }
  1435. }
  1436. /* program the updated filter list */
  1437. if (i40e_sync_vsi_filters(vsi))
  1438. dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
  1439. error_param:
  1440. /* send the response to the vf */
  1441. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
  1442. aq_ret);
  1443. }
  1444. /**
  1445. * i40e_vc_del_mac_addr_msg
  1446. * @vf: pointer to the vf info
  1447. * @msg: pointer to the msg buffer
  1448. * @msglen: msg length
  1449. *
  1450. * remove guest mac address filter
  1451. **/
  1452. static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1453. {
  1454. struct i40e_virtchnl_ether_addr_list *al =
  1455. (struct i40e_virtchnl_ether_addr_list *)msg;
  1456. struct i40e_pf *pf = vf->pf;
  1457. struct i40e_vsi *vsi = NULL;
  1458. u16 vsi_id = al->vsi_id;
  1459. i40e_status aq_ret = 0;
  1460. int i;
  1461. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1462. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1463. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1464. aq_ret = I40E_ERR_PARAM;
  1465. goto error_param;
  1466. }
  1467. vsi = pf->vsi[vsi_id];
  1468. /* delete addresses from the list */
  1469. for (i = 0; i < al->num_elements; i++)
  1470. i40e_del_filter(vsi, al->list[i].addr,
  1471. I40E_VLAN_ANY, true, false);
  1472. /* program the updated filter list */
  1473. if (i40e_sync_vsi_filters(vsi))
  1474. dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
  1475. error_param:
  1476. /* send the response to the vf */
  1477. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
  1478. aq_ret);
  1479. }
  1480. /**
  1481. * i40e_vc_add_vlan_msg
  1482. * @vf: pointer to the vf info
  1483. * @msg: pointer to the msg buffer
  1484. * @msglen: msg length
  1485. *
  1486. * program guest vlan id
  1487. **/
  1488. static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1489. {
  1490. struct i40e_virtchnl_vlan_filter_list *vfl =
  1491. (struct i40e_virtchnl_vlan_filter_list *)msg;
  1492. struct i40e_pf *pf = vf->pf;
  1493. struct i40e_vsi *vsi = NULL;
  1494. u16 vsi_id = vfl->vsi_id;
  1495. i40e_status aq_ret = 0;
  1496. int i;
  1497. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1498. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1499. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1500. aq_ret = I40E_ERR_PARAM;
  1501. goto error_param;
  1502. }
  1503. for (i = 0; i < vfl->num_elements; i++) {
  1504. if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
  1505. aq_ret = I40E_ERR_PARAM;
  1506. dev_err(&pf->pdev->dev,
  1507. "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
  1508. goto error_param;
  1509. }
  1510. }
  1511. vsi = pf->vsi[vsi_id];
  1512. if (vsi->info.pvid) {
  1513. aq_ret = I40E_ERR_PARAM;
  1514. goto error_param;
  1515. }
  1516. i40e_vlan_stripping_enable(vsi);
  1517. for (i = 0; i < vfl->num_elements; i++) {
  1518. /* add new VLAN filter */
  1519. int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
  1520. if (ret)
  1521. dev_err(&pf->pdev->dev,
  1522. "Unable to add VF vlan filter %d, error %d\n",
  1523. vfl->vlan_id[i], ret);
  1524. }
  1525. error_param:
  1526. /* send the response to the vf */
  1527. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
  1528. }
  1529. /**
  1530. * i40e_vc_remove_vlan_msg
  1531. * @vf: pointer to the vf info
  1532. * @msg: pointer to the msg buffer
  1533. * @msglen: msg length
  1534. *
  1535. * remove programmed guest vlan id
  1536. **/
  1537. static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1538. {
  1539. struct i40e_virtchnl_vlan_filter_list *vfl =
  1540. (struct i40e_virtchnl_vlan_filter_list *)msg;
  1541. struct i40e_pf *pf = vf->pf;
  1542. struct i40e_vsi *vsi = NULL;
  1543. u16 vsi_id = vfl->vsi_id;
  1544. i40e_status aq_ret = 0;
  1545. int i;
  1546. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1547. !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
  1548. !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
  1549. aq_ret = I40E_ERR_PARAM;
  1550. goto error_param;
  1551. }
  1552. for (i = 0; i < vfl->num_elements; i++) {
  1553. if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
  1554. aq_ret = I40E_ERR_PARAM;
  1555. goto error_param;
  1556. }
  1557. }
  1558. vsi = pf->vsi[vsi_id];
  1559. if (vsi->info.pvid) {
  1560. aq_ret = I40E_ERR_PARAM;
  1561. goto error_param;
  1562. }
  1563. for (i = 0; i < vfl->num_elements; i++) {
  1564. int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
  1565. if (ret)
  1566. dev_err(&pf->pdev->dev,
  1567. "Unable to delete VF vlan filter %d, error %d\n",
  1568. vfl->vlan_id[i], ret);
  1569. }
  1570. error_param:
  1571. /* send the response to the vf */
  1572. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
  1573. }
  1574. /**
  1575. * i40e_vc_fcoe_msg
  1576. * @vf: pointer to the vf info
  1577. * @msg: pointer to the msg buffer
  1578. * @msglen: msg length
  1579. *
  1580. * called from the vf for the fcoe msgs
  1581. **/
  1582. static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
  1583. {
  1584. i40e_status aq_ret = 0;
  1585. if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
  1586. !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
  1587. aq_ret = I40E_ERR_PARAM;
  1588. goto error_param;
  1589. }
  1590. aq_ret = I40E_ERR_NOT_IMPLEMENTED;
  1591. error_param:
  1592. /* send the response to the vf */
  1593. return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
  1594. }
  1595. /**
  1596. * i40e_vc_validate_vf_msg
  1597. * @vf: pointer to the vf info
  1598. * @msg: pointer to the msg buffer
  1599. * @msglen: msg length
  1600. * @msghndl: msg handle
  1601. *
  1602. * validate msg
  1603. **/
  1604. static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
  1605. u32 v_retval, u8 *msg, u16 msglen)
  1606. {
  1607. bool err_msg_format = false;
  1608. int valid_len;
  1609. /* Check if VF is disabled. */
  1610. if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
  1611. return I40E_ERR_PARAM;
  1612. /* Validate message length. */
  1613. switch (v_opcode) {
  1614. case I40E_VIRTCHNL_OP_VERSION:
  1615. valid_len = sizeof(struct i40e_virtchnl_version_info);
  1616. break;
  1617. case I40E_VIRTCHNL_OP_RESET_VF:
  1618. case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
  1619. valid_len = 0;
  1620. break;
  1621. case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
  1622. valid_len = sizeof(struct i40e_virtchnl_txq_info);
  1623. break;
  1624. case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
  1625. valid_len = sizeof(struct i40e_virtchnl_rxq_info);
  1626. break;
  1627. case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
  1628. valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
  1629. if (msglen >= valid_len) {
  1630. struct i40e_virtchnl_vsi_queue_config_info *vqc =
  1631. (struct i40e_virtchnl_vsi_queue_config_info *)msg;
  1632. valid_len += (vqc->num_queue_pairs *
  1633. sizeof(struct
  1634. i40e_virtchnl_queue_pair_info));
  1635. if (vqc->num_queue_pairs == 0)
  1636. err_msg_format = true;
  1637. }
  1638. break;
  1639. case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
  1640. valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
  1641. if (msglen >= valid_len) {
  1642. struct i40e_virtchnl_irq_map_info *vimi =
  1643. (struct i40e_virtchnl_irq_map_info *)msg;
  1644. valid_len += (vimi->num_vectors *
  1645. sizeof(struct i40e_virtchnl_vector_map));
  1646. if (vimi->num_vectors == 0)
  1647. err_msg_format = true;
  1648. }
  1649. break;
  1650. case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
  1651. case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
  1652. valid_len = sizeof(struct i40e_virtchnl_queue_select);
  1653. break;
  1654. case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
  1655. case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
  1656. valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
  1657. if (msglen >= valid_len) {
  1658. struct i40e_virtchnl_ether_addr_list *veal =
  1659. (struct i40e_virtchnl_ether_addr_list *)msg;
  1660. valid_len += veal->num_elements *
  1661. sizeof(struct i40e_virtchnl_ether_addr);
  1662. if (veal->num_elements == 0)
  1663. err_msg_format = true;
  1664. }
  1665. break;
  1666. case I40E_VIRTCHNL_OP_ADD_VLAN:
  1667. case I40E_VIRTCHNL_OP_DEL_VLAN:
  1668. valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
  1669. if (msglen >= valid_len) {
  1670. struct i40e_virtchnl_vlan_filter_list *vfl =
  1671. (struct i40e_virtchnl_vlan_filter_list *)msg;
  1672. valid_len += vfl->num_elements * sizeof(u16);
  1673. if (vfl->num_elements == 0)
  1674. err_msg_format = true;
  1675. }
  1676. break;
  1677. case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
  1678. valid_len = sizeof(struct i40e_virtchnl_promisc_info);
  1679. break;
  1680. case I40E_VIRTCHNL_OP_GET_STATS:
  1681. valid_len = sizeof(struct i40e_virtchnl_queue_select);
  1682. break;
  1683. /* These are always errors coming from the VF. */
  1684. case I40E_VIRTCHNL_OP_EVENT:
  1685. case I40E_VIRTCHNL_OP_UNKNOWN:
  1686. default:
  1687. return -EPERM;
  1688. break;
  1689. }
  1690. /* few more checks */
  1691. if ((valid_len != msglen) || (err_msg_format)) {
  1692. i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
  1693. return -EINVAL;
  1694. } else {
  1695. return 0;
  1696. }
  1697. }
  1698. /**
  1699. * i40e_vc_process_vf_msg
  1700. * @pf: pointer to the pf structure
  1701. * @vf_id: source vf id
  1702. * @msg: pointer to the msg buffer
  1703. * @msglen: msg length
  1704. * @msghndl: msg handle
  1705. *
  1706. * called from the common aeq/arq handler to
  1707. * process request from vf
  1708. **/
  1709. int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
  1710. u32 v_retval, u8 *msg, u16 msglen)
  1711. {
  1712. struct i40e_vf *vf = &(pf->vf[vf_id]);
  1713. struct i40e_hw *hw = &pf->hw;
  1714. int ret;
  1715. pf->vf_aq_requests++;
  1716. /* perform basic checks on the msg */
  1717. ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
  1718. if (ret) {
  1719. dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
  1720. return ret;
  1721. }
  1722. wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
  1723. switch (v_opcode) {
  1724. case I40E_VIRTCHNL_OP_VERSION:
  1725. ret = i40e_vc_get_version_msg(vf);
  1726. break;
  1727. case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
  1728. ret = i40e_vc_get_vf_resources_msg(vf);
  1729. break;
  1730. case I40E_VIRTCHNL_OP_RESET_VF:
  1731. ret = i40e_vc_reset_vf_msg(vf);
  1732. break;
  1733. case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
  1734. ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
  1735. break;
  1736. case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
  1737. ret = i40e_vc_config_queues_msg(vf, msg, msglen);
  1738. break;
  1739. case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
  1740. ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
  1741. break;
  1742. case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
  1743. ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
  1744. break;
  1745. case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
  1746. ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
  1747. break;
  1748. case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
  1749. ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
  1750. break;
  1751. case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
  1752. ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
  1753. break;
  1754. case I40E_VIRTCHNL_OP_ADD_VLAN:
  1755. ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
  1756. break;
  1757. case I40E_VIRTCHNL_OP_DEL_VLAN:
  1758. ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
  1759. break;
  1760. case I40E_VIRTCHNL_OP_GET_STATS:
  1761. ret = i40e_vc_get_stats_msg(vf, msg, msglen);
  1762. break;
  1763. case I40E_VIRTCHNL_OP_FCOE:
  1764. ret = i40e_vc_fcoe_msg(vf, msg, msglen);
  1765. break;
  1766. case I40E_VIRTCHNL_OP_UNKNOWN:
  1767. default:
  1768. dev_err(&pf->pdev->dev,
  1769. "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
  1770. ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
  1771. I40E_ERR_NOT_IMPLEMENTED);
  1772. break;
  1773. }
  1774. return ret;
  1775. }
  1776. /**
  1777. * i40e_vc_process_vflr_event
  1778. * @pf: pointer to the pf structure
  1779. *
  1780. * called from the vlfr irq handler to
  1781. * free up vf resources and state variables
  1782. **/
  1783. int i40e_vc_process_vflr_event(struct i40e_pf *pf)
  1784. {
  1785. u32 reg, reg_idx, bit_idx, vf_id;
  1786. struct i40e_hw *hw = &pf->hw;
  1787. struct i40e_vf *vf;
  1788. if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
  1789. return 0;
  1790. clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
  1791. for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
  1792. reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
  1793. bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
  1794. /* read GLGEN_VFLRSTAT register to find out the flr vfs */
  1795. vf = &pf->vf[vf_id];
  1796. reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
  1797. if (reg & (1 << bit_idx)) {
  1798. /* clear the bit in GLGEN_VFLRSTAT */
  1799. wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
  1800. if (i40e_reset_vf(vf, true))
  1801. dev_err(&pf->pdev->dev,
  1802. "Unable to reset the VF %d\n", vf_id);
  1803. /* free up vf resources to destroy vsi state */
  1804. i40e_free_vf_res(vf);
  1805. /* allocate new vf resources with the default state */
  1806. if (i40e_alloc_vf_res(vf))
  1807. dev_err(&pf->pdev->dev,
  1808. "Unable to allocate VF resources %d\n",
  1809. vf_id);
  1810. i40e_enable_vf_mappings(vf);
  1811. }
  1812. }
  1813. /* re-enable vflr interrupt cause */
  1814. reg = rd32(hw, I40E_PFINT_ICR0_ENA);
  1815. reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
  1816. wr32(hw, I40E_PFINT_ICR0_ENA, reg);
  1817. i40e_flush(hw);
  1818. return 0;
  1819. }
  1820. /**
  1821. * i40e_vc_vf_broadcast
  1822. * @pf: pointer to the pf structure
  1823. * @opcode: operation code
  1824. * @retval: return value
  1825. * @msg: pointer to the msg buffer
  1826. * @msglen: msg length
  1827. *
  1828. * send a message to all VFs on a given PF
  1829. **/
  1830. static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  1831. enum i40e_virtchnl_ops v_opcode,
  1832. i40e_status v_retval, u8 *msg,
  1833. u16 msglen)
  1834. {
  1835. struct i40e_hw *hw = &pf->hw;
  1836. struct i40e_vf *vf = pf->vf;
  1837. int i;
  1838. for (i = 0; i < pf->num_alloc_vfs; i++) {
  1839. /* Ignore return value on purpose - a given VF may fail, but
  1840. * we need to keep going and send to all of them
  1841. */
  1842. i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
  1843. msg, msglen, NULL);
  1844. vf++;
  1845. }
  1846. }
  1847. /**
  1848. * i40e_vc_notify_link_state
  1849. * @pf: pointer to the pf structure
  1850. *
  1851. * send a link status message to all VFs on a given PF
  1852. **/
  1853. void i40e_vc_notify_link_state(struct i40e_pf *pf)
  1854. {
  1855. struct i40e_virtchnl_pf_event pfe;
  1856. pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
  1857. pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
  1858. pfe.event_data.link_event.link_status =
  1859. pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
  1860. pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
  1861. i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
  1862. (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
  1863. }
  1864. /**
  1865. * i40e_vc_notify_reset
  1866. * @pf: pointer to the pf structure
  1867. *
  1868. * indicate a pending reset to all VFs on a given PF
  1869. **/
  1870. void i40e_vc_notify_reset(struct i40e_pf *pf)
  1871. {
  1872. struct i40e_virtchnl_pf_event pfe;
  1873. pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
  1874. pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
  1875. i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
  1876. (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
  1877. }
  1878. /**
  1879. * i40e_vc_notify_vf_reset
  1880. * @vf: pointer to the vf structure
  1881. *
  1882. * indicate a pending reset to the given VF
  1883. **/
  1884. void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
  1885. {
  1886. struct i40e_virtchnl_pf_event pfe;
  1887. pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
  1888. pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
  1889. i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
  1890. I40E_SUCCESS, (u8 *)&pfe,
  1891. sizeof(struct i40e_virtchnl_pf_event), NULL);
  1892. }
  1893. /**
  1894. * i40e_ndo_set_vf_mac
  1895. * @netdev: network interface device structure
  1896. * @vf_id: vf identifier
  1897. * @mac: mac address
  1898. *
  1899. * program vf mac address
  1900. **/
  1901. int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
  1902. {
  1903. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1904. struct i40e_vsi *vsi = np->vsi;
  1905. struct i40e_pf *pf = vsi->back;
  1906. struct i40e_mac_filter *f;
  1907. struct i40e_vf *vf;
  1908. int ret = 0;
  1909. /* validate the request */
  1910. if (vf_id >= pf->num_alloc_vfs) {
  1911. dev_err(&pf->pdev->dev,
  1912. "Invalid VF Identifier %d\n", vf_id);
  1913. ret = -EINVAL;
  1914. goto error_param;
  1915. }
  1916. vf = &(pf->vf[vf_id]);
  1917. vsi = pf->vsi[vf->lan_vsi_index];
  1918. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  1919. dev_err(&pf->pdev->dev,
  1920. "Uninitialized VF %d\n", vf_id);
  1921. ret = -EINVAL;
  1922. goto error_param;
  1923. }
  1924. if (!is_valid_ether_addr(mac)) {
  1925. dev_err(&pf->pdev->dev,
  1926. "Invalid VF ethernet address\n");
  1927. ret = -EINVAL;
  1928. goto error_param;
  1929. }
  1930. /* delete the temporary mac address */
  1931. i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
  1932. /* add the new mac address */
  1933. f = i40e_add_filter(vsi, mac, 0, true, false);
  1934. if (!f) {
  1935. dev_err(&pf->pdev->dev,
  1936. "Unable to add VF ucast filter\n");
  1937. ret = -ENOMEM;
  1938. goto error_param;
  1939. }
  1940. dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
  1941. /* program mac filter */
  1942. if (i40e_sync_vsi_filters(vsi)) {
  1943. dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
  1944. ret = -EIO;
  1945. goto error_param;
  1946. }
  1947. memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
  1948. dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
  1949. ret = 0;
  1950. error_param:
  1951. return ret;
  1952. }
  1953. /**
  1954. * i40e_ndo_set_vf_port_vlan
  1955. * @netdev: network interface device structure
  1956. * @vf_id: vf identifier
  1957. * @vlan_id: mac address
  1958. * @qos: priority setting
  1959. *
  1960. * program vf vlan id and/or qos
  1961. **/
  1962. int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
  1963. int vf_id, u16 vlan_id, u8 qos)
  1964. {
  1965. struct i40e_netdev_priv *np = netdev_priv(netdev);
  1966. struct i40e_pf *pf = np->vsi->back;
  1967. struct i40e_vsi *vsi;
  1968. struct i40e_vf *vf;
  1969. int ret = 0;
  1970. /* validate the request */
  1971. if (vf_id >= pf->num_alloc_vfs) {
  1972. dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
  1973. ret = -EINVAL;
  1974. goto error_pvid;
  1975. }
  1976. if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
  1977. dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
  1978. ret = -EINVAL;
  1979. goto error_pvid;
  1980. }
  1981. vf = &(pf->vf[vf_id]);
  1982. vsi = pf->vsi[vf->lan_vsi_index];
  1983. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  1984. dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
  1985. ret = -EINVAL;
  1986. goto error_pvid;
  1987. }
  1988. if (vsi->info.pvid) {
  1989. /* kill old VLAN */
  1990. ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
  1991. VLAN_VID_MASK));
  1992. if (ret) {
  1993. dev_info(&vsi->back->pdev->dev,
  1994. "remove VLAN failed, ret=%d, aq_err=%d\n",
  1995. ret, pf->hw.aq.asq_last_status);
  1996. }
  1997. }
  1998. if (vlan_id || qos)
  1999. ret = i40e_vsi_add_pvid(vsi,
  2000. vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
  2001. else
  2002. i40e_vlan_stripping_disable(vsi);
  2003. if (vlan_id) {
  2004. dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
  2005. vlan_id, qos, vf_id);
  2006. /* add new VLAN filter */
  2007. ret = i40e_vsi_add_vlan(vsi, vlan_id);
  2008. if (ret) {
  2009. dev_info(&vsi->back->pdev->dev,
  2010. "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
  2011. vsi->back->hw.aq.asq_last_status);
  2012. goto error_pvid;
  2013. }
  2014. }
  2015. if (ret) {
  2016. dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
  2017. goto error_pvid;
  2018. }
  2019. ret = 0;
  2020. error_pvid:
  2021. return ret;
  2022. }
  2023. /**
  2024. * i40e_ndo_set_vf_bw
  2025. * @netdev: network interface device structure
  2026. * @vf_id: vf identifier
  2027. * @tx_rate: tx rate
  2028. *
  2029. * configure vf tx rate
  2030. **/
  2031. int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
  2032. {
  2033. return -EOPNOTSUPP;
  2034. }
  2035. /**
  2036. * i40e_ndo_get_vf_config
  2037. * @netdev: network interface device structure
  2038. * @vf_id: vf identifier
  2039. * @ivi: vf configuration structure
  2040. *
  2041. * return vf configuration
  2042. **/
  2043. int i40e_ndo_get_vf_config(struct net_device *netdev,
  2044. int vf_id, struct ifla_vf_info *ivi)
  2045. {
  2046. struct i40e_netdev_priv *np = netdev_priv(netdev);
  2047. struct i40e_mac_filter *f, *ftmp;
  2048. struct i40e_vsi *vsi = np->vsi;
  2049. struct i40e_pf *pf = vsi->back;
  2050. struct i40e_vf *vf;
  2051. int ret = 0;
  2052. /* validate the request */
  2053. if (vf_id >= pf->num_alloc_vfs) {
  2054. dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
  2055. ret = -EINVAL;
  2056. goto error_param;
  2057. }
  2058. vf = &(pf->vf[vf_id]);
  2059. /* first vsi is always the LAN vsi */
  2060. vsi = pf->vsi[vf->lan_vsi_index];
  2061. if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
  2062. dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
  2063. ret = -EINVAL;
  2064. goto error_param;
  2065. }
  2066. ivi->vf = vf_id;
  2067. /* first entry of the list is the default ethernet address */
  2068. list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
  2069. memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
  2070. break;
  2071. }
  2072. ivi->tx_rate = 0;
  2073. ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
  2074. ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
  2075. I40E_VLAN_PRIORITY_SHIFT;
  2076. ret = 0;
  2077. error_param:
  2078. return ret;
  2079. }