resource_tracker.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
  4. * All rights reserved.
  5. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/sched.h>
  36. #include <linux/pci.h>
  37. #include <linux/errno.h>
  38. #include <linux/kernel.h>
  39. #include <linux/io.h>
  40. #include <linux/slab.h>
  41. #include <linux/mlx4/cmd.h>
  42. #include <linux/mlx4/qp.h>
  43. #include <linux/if_ether.h>
  44. #include <linux/etherdevice.h>
  45. #include "mlx4.h"
  46. #include "fw.h"
  47. #define MLX4_MAC_VALID (1ull << 63)
  48. struct mac_res {
  49. struct list_head list;
  50. u64 mac;
  51. u8 port;
  52. };
  53. struct res_common {
  54. struct list_head list;
  55. struct rb_node node;
  56. u64 res_id;
  57. int owner;
  58. int state;
  59. int from_state;
  60. int to_state;
  61. int removing;
  62. };
  63. enum {
  64. RES_ANY_BUSY = 1
  65. };
  66. struct res_gid {
  67. struct list_head list;
  68. u8 gid[16];
  69. enum mlx4_protocol prot;
  70. enum mlx4_steer_type steer;
  71. };
  72. enum res_qp_states {
  73. RES_QP_BUSY = RES_ANY_BUSY,
  74. /* QP number was allocated */
  75. RES_QP_RESERVED,
  76. /* ICM memory for QP context was mapped */
  77. RES_QP_MAPPED,
  78. /* QP is in hw ownership */
  79. RES_QP_HW
  80. };
  81. struct res_qp {
  82. struct res_common com;
  83. struct res_mtt *mtt;
  84. struct res_cq *rcq;
  85. struct res_cq *scq;
  86. struct res_srq *srq;
  87. struct list_head mcg_list;
  88. spinlock_t mcg_spl;
  89. int local_qpn;
  90. };
  91. enum res_mtt_states {
  92. RES_MTT_BUSY = RES_ANY_BUSY,
  93. RES_MTT_ALLOCATED,
  94. };
  95. static inline const char *mtt_states_str(enum res_mtt_states state)
  96. {
  97. switch (state) {
  98. case RES_MTT_BUSY: return "RES_MTT_BUSY";
  99. case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
  100. default: return "Unknown";
  101. }
  102. }
  103. struct res_mtt {
  104. struct res_common com;
  105. int order;
  106. atomic_t ref_count;
  107. };
  108. enum res_mpt_states {
  109. RES_MPT_BUSY = RES_ANY_BUSY,
  110. RES_MPT_RESERVED,
  111. RES_MPT_MAPPED,
  112. RES_MPT_HW,
  113. };
  114. struct res_mpt {
  115. struct res_common com;
  116. struct res_mtt *mtt;
  117. int key;
  118. };
  119. enum res_eq_states {
  120. RES_EQ_BUSY = RES_ANY_BUSY,
  121. RES_EQ_RESERVED,
  122. RES_EQ_HW,
  123. };
  124. struct res_eq {
  125. struct res_common com;
  126. struct res_mtt *mtt;
  127. };
  128. enum res_cq_states {
  129. RES_CQ_BUSY = RES_ANY_BUSY,
  130. RES_CQ_ALLOCATED,
  131. RES_CQ_HW,
  132. };
  133. struct res_cq {
  134. struct res_common com;
  135. struct res_mtt *mtt;
  136. atomic_t ref_count;
  137. };
  138. enum res_srq_states {
  139. RES_SRQ_BUSY = RES_ANY_BUSY,
  140. RES_SRQ_ALLOCATED,
  141. RES_SRQ_HW,
  142. };
  143. struct res_srq {
  144. struct res_common com;
  145. struct res_mtt *mtt;
  146. struct res_cq *cq;
  147. atomic_t ref_count;
  148. };
  149. enum res_counter_states {
  150. RES_COUNTER_BUSY = RES_ANY_BUSY,
  151. RES_COUNTER_ALLOCATED,
  152. };
  153. struct res_counter {
  154. struct res_common com;
  155. int port;
  156. };
  157. enum res_xrcdn_states {
  158. RES_XRCD_BUSY = RES_ANY_BUSY,
  159. RES_XRCD_ALLOCATED,
  160. };
  161. struct res_xrcdn {
  162. struct res_common com;
  163. int port;
  164. };
  165. enum res_fs_rule_states {
  166. RES_FS_RULE_BUSY = RES_ANY_BUSY,
  167. RES_FS_RULE_ALLOCATED,
  168. };
  169. struct res_fs_rule {
  170. struct res_common com;
  171. };
  172. static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
  173. {
  174. struct rb_node *node = root->rb_node;
  175. while (node) {
  176. struct res_common *res = container_of(node, struct res_common,
  177. node);
  178. if (res_id < res->res_id)
  179. node = node->rb_left;
  180. else if (res_id > res->res_id)
  181. node = node->rb_right;
  182. else
  183. return res;
  184. }
  185. return NULL;
  186. }
  187. static int res_tracker_insert(struct rb_root *root, struct res_common *res)
  188. {
  189. struct rb_node **new = &(root->rb_node), *parent = NULL;
  190. /* Figure out where to put new node */
  191. while (*new) {
  192. struct res_common *this = container_of(*new, struct res_common,
  193. node);
  194. parent = *new;
  195. if (res->res_id < this->res_id)
  196. new = &((*new)->rb_left);
  197. else if (res->res_id > this->res_id)
  198. new = &((*new)->rb_right);
  199. else
  200. return -EEXIST;
  201. }
  202. /* Add new node and rebalance tree. */
  203. rb_link_node(&res->node, parent, new);
  204. rb_insert_color(&res->node, root);
  205. return 0;
  206. }
  207. enum qp_transition {
  208. QP_TRANS_INIT2RTR,
  209. QP_TRANS_RTR2RTS,
  210. QP_TRANS_RTS2RTS,
  211. QP_TRANS_SQERR2RTS,
  212. QP_TRANS_SQD2SQD,
  213. QP_TRANS_SQD2RTS
  214. };
  215. /* For Debug uses */
  216. static const char *ResourceType(enum mlx4_resource rt)
  217. {
  218. switch (rt) {
  219. case RES_QP: return "RES_QP";
  220. case RES_CQ: return "RES_CQ";
  221. case RES_SRQ: return "RES_SRQ";
  222. case RES_MPT: return "RES_MPT";
  223. case RES_MTT: return "RES_MTT";
  224. case RES_MAC: return "RES_MAC";
  225. case RES_EQ: return "RES_EQ";
  226. case RES_COUNTER: return "RES_COUNTER";
  227. case RES_FS_RULE: return "RES_FS_RULE";
  228. case RES_XRCD: return "RES_XRCD";
  229. default: return "Unknown resource type !!!";
  230. };
  231. }
  232. int mlx4_init_resource_tracker(struct mlx4_dev *dev)
  233. {
  234. struct mlx4_priv *priv = mlx4_priv(dev);
  235. int i;
  236. int t;
  237. priv->mfunc.master.res_tracker.slave_list =
  238. kzalloc(dev->num_slaves * sizeof(struct slave_list),
  239. GFP_KERNEL);
  240. if (!priv->mfunc.master.res_tracker.slave_list)
  241. return -ENOMEM;
  242. for (i = 0 ; i < dev->num_slaves; i++) {
  243. for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
  244. INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
  245. slave_list[i].res_list[t]);
  246. mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
  247. }
  248. mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
  249. dev->num_slaves);
  250. for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
  251. priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
  252. spin_lock_init(&priv->mfunc.master.res_tracker.lock);
  253. return 0 ;
  254. }
  255. void mlx4_free_resource_tracker(struct mlx4_dev *dev,
  256. enum mlx4_res_tracker_free_type type)
  257. {
  258. struct mlx4_priv *priv = mlx4_priv(dev);
  259. int i;
  260. if (priv->mfunc.master.res_tracker.slave_list) {
  261. if (type != RES_TR_FREE_STRUCTS_ONLY)
  262. for (i = 0 ; i < dev->num_slaves; i++)
  263. if (type == RES_TR_FREE_ALL ||
  264. dev->caps.function != i)
  265. mlx4_delete_all_resources_for_slave(dev, i);
  266. if (type != RES_TR_FREE_SLAVES_ONLY) {
  267. kfree(priv->mfunc.master.res_tracker.slave_list);
  268. priv->mfunc.master.res_tracker.slave_list = NULL;
  269. }
  270. }
  271. }
  272. static void update_pkey_index(struct mlx4_dev *dev, int slave,
  273. struct mlx4_cmd_mailbox *inbox)
  274. {
  275. u8 sched = *(u8 *)(inbox->buf + 64);
  276. u8 orig_index = *(u8 *)(inbox->buf + 35);
  277. u8 new_index;
  278. struct mlx4_priv *priv = mlx4_priv(dev);
  279. int port;
  280. port = (sched >> 6 & 1) + 1;
  281. new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
  282. *(u8 *)(inbox->buf + 35) = new_index;
  283. mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
  284. "new pkey index = %d\n", port, orig_index, new_index);
  285. }
  286. static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
  287. u8 slave)
  288. {
  289. struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
  290. enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
  291. u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
  292. if (MLX4_QP_ST_UD == ts)
  293. qp_ctx->pri_path.mgid_index = 0x80 | slave;
  294. if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
  295. if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
  296. qp_ctx->pri_path.mgid_index = slave & 0x7F;
  297. if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
  298. qp_ctx->alt_path.mgid_index = slave & 0x7F;
  299. }
  300. mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
  301. slave, qp_ctx->pri_path.mgid_index);
  302. }
  303. static int mpt_mask(struct mlx4_dev *dev)
  304. {
  305. return dev->caps.num_mpts - 1;
  306. }
  307. static void *find_res(struct mlx4_dev *dev, int res_id,
  308. enum mlx4_resource type)
  309. {
  310. struct mlx4_priv *priv = mlx4_priv(dev);
  311. return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
  312. res_id);
  313. }
  314. static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
  315. enum mlx4_resource type,
  316. void *res)
  317. {
  318. struct res_common *r;
  319. int err = 0;
  320. spin_lock_irq(mlx4_tlock(dev));
  321. r = find_res(dev, res_id, type);
  322. if (!r) {
  323. err = -ENONET;
  324. goto exit;
  325. }
  326. if (r->state == RES_ANY_BUSY) {
  327. err = -EBUSY;
  328. goto exit;
  329. }
  330. if (r->owner != slave) {
  331. err = -EPERM;
  332. goto exit;
  333. }
  334. r->from_state = r->state;
  335. r->state = RES_ANY_BUSY;
  336. if (res)
  337. *((struct res_common **)res) = r;
  338. exit:
  339. spin_unlock_irq(mlx4_tlock(dev));
  340. return err;
  341. }
  342. int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
  343. enum mlx4_resource type,
  344. u64 res_id, int *slave)
  345. {
  346. struct res_common *r;
  347. int err = -ENOENT;
  348. int id = res_id;
  349. if (type == RES_QP)
  350. id &= 0x7fffff;
  351. spin_lock(mlx4_tlock(dev));
  352. r = find_res(dev, id, type);
  353. if (r) {
  354. *slave = r->owner;
  355. err = 0;
  356. }
  357. spin_unlock(mlx4_tlock(dev));
  358. return err;
  359. }
  360. static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
  361. enum mlx4_resource type)
  362. {
  363. struct res_common *r;
  364. spin_lock_irq(mlx4_tlock(dev));
  365. r = find_res(dev, res_id, type);
  366. if (r)
  367. r->state = r->from_state;
  368. spin_unlock_irq(mlx4_tlock(dev));
  369. }
  370. static struct res_common *alloc_qp_tr(int id)
  371. {
  372. struct res_qp *ret;
  373. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  374. if (!ret)
  375. return NULL;
  376. ret->com.res_id = id;
  377. ret->com.state = RES_QP_RESERVED;
  378. ret->local_qpn = id;
  379. INIT_LIST_HEAD(&ret->mcg_list);
  380. spin_lock_init(&ret->mcg_spl);
  381. return &ret->com;
  382. }
  383. static struct res_common *alloc_mtt_tr(int id, int order)
  384. {
  385. struct res_mtt *ret;
  386. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  387. if (!ret)
  388. return NULL;
  389. ret->com.res_id = id;
  390. ret->order = order;
  391. ret->com.state = RES_MTT_ALLOCATED;
  392. atomic_set(&ret->ref_count, 0);
  393. return &ret->com;
  394. }
  395. static struct res_common *alloc_mpt_tr(int id, int key)
  396. {
  397. struct res_mpt *ret;
  398. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  399. if (!ret)
  400. return NULL;
  401. ret->com.res_id = id;
  402. ret->com.state = RES_MPT_RESERVED;
  403. ret->key = key;
  404. return &ret->com;
  405. }
  406. static struct res_common *alloc_eq_tr(int id)
  407. {
  408. struct res_eq *ret;
  409. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  410. if (!ret)
  411. return NULL;
  412. ret->com.res_id = id;
  413. ret->com.state = RES_EQ_RESERVED;
  414. return &ret->com;
  415. }
  416. static struct res_common *alloc_cq_tr(int id)
  417. {
  418. struct res_cq *ret;
  419. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  420. if (!ret)
  421. return NULL;
  422. ret->com.res_id = id;
  423. ret->com.state = RES_CQ_ALLOCATED;
  424. atomic_set(&ret->ref_count, 0);
  425. return &ret->com;
  426. }
  427. static struct res_common *alloc_srq_tr(int id)
  428. {
  429. struct res_srq *ret;
  430. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  431. if (!ret)
  432. return NULL;
  433. ret->com.res_id = id;
  434. ret->com.state = RES_SRQ_ALLOCATED;
  435. atomic_set(&ret->ref_count, 0);
  436. return &ret->com;
  437. }
  438. static struct res_common *alloc_counter_tr(int id)
  439. {
  440. struct res_counter *ret;
  441. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  442. if (!ret)
  443. return NULL;
  444. ret->com.res_id = id;
  445. ret->com.state = RES_COUNTER_ALLOCATED;
  446. return &ret->com;
  447. }
  448. static struct res_common *alloc_xrcdn_tr(int id)
  449. {
  450. struct res_xrcdn *ret;
  451. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  452. if (!ret)
  453. return NULL;
  454. ret->com.res_id = id;
  455. ret->com.state = RES_XRCD_ALLOCATED;
  456. return &ret->com;
  457. }
  458. static struct res_common *alloc_fs_rule_tr(u64 id)
  459. {
  460. struct res_fs_rule *ret;
  461. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  462. if (!ret)
  463. return NULL;
  464. ret->com.res_id = id;
  465. ret->com.state = RES_FS_RULE_ALLOCATED;
  466. return &ret->com;
  467. }
  468. static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
  469. int extra)
  470. {
  471. struct res_common *ret;
  472. switch (type) {
  473. case RES_QP:
  474. ret = alloc_qp_tr(id);
  475. break;
  476. case RES_MPT:
  477. ret = alloc_mpt_tr(id, extra);
  478. break;
  479. case RES_MTT:
  480. ret = alloc_mtt_tr(id, extra);
  481. break;
  482. case RES_EQ:
  483. ret = alloc_eq_tr(id);
  484. break;
  485. case RES_CQ:
  486. ret = alloc_cq_tr(id);
  487. break;
  488. case RES_SRQ:
  489. ret = alloc_srq_tr(id);
  490. break;
  491. case RES_MAC:
  492. printk(KERN_ERR "implementation missing\n");
  493. return NULL;
  494. case RES_COUNTER:
  495. ret = alloc_counter_tr(id);
  496. break;
  497. case RES_XRCD:
  498. ret = alloc_xrcdn_tr(id);
  499. break;
  500. case RES_FS_RULE:
  501. ret = alloc_fs_rule_tr(id);
  502. break;
  503. default:
  504. return NULL;
  505. }
  506. if (ret)
  507. ret->owner = slave;
  508. return ret;
  509. }
  510. static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
  511. enum mlx4_resource type, int extra)
  512. {
  513. int i;
  514. int err;
  515. struct mlx4_priv *priv = mlx4_priv(dev);
  516. struct res_common **res_arr;
  517. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  518. struct rb_root *root = &tracker->res_tree[type];
  519. res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
  520. if (!res_arr)
  521. return -ENOMEM;
  522. for (i = 0; i < count; ++i) {
  523. res_arr[i] = alloc_tr(base + i, type, slave, extra);
  524. if (!res_arr[i]) {
  525. for (--i; i >= 0; --i)
  526. kfree(res_arr[i]);
  527. kfree(res_arr);
  528. return -ENOMEM;
  529. }
  530. }
  531. spin_lock_irq(mlx4_tlock(dev));
  532. for (i = 0; i < count; ++i) {
  533. if (find_res(dev, base + i, type)) {
  534. err = -EEXIST;
  535. goto undo;
  536. }
  537. err = res_tracker_insert(root, res_arr[i]);
  538. if (err)
  539. goto undo;
  540. list_add_tail(&res_arr[i]->list,
  541. &tracker->slave_list[slave].res_list[type]);
  542. }
  543. spin_unlock_irq(mlx4_tlock(dev));
  544. kfree(res_arr);
  545. return 0;
  546. undo:
  547. for (--i; i >= base; --i)
  548. rb_erase(&res_arr[i]->node, root);
  549. spin_unlock_irq(mlx4_tlock(dev));
  550. for (i = 0; i < count; ++i)
  551. kfree(res_arr[i]);
  552. kfree(res_arr);
  553. return err;
  554. }
  555. static int remove_qp_ok(struct res_qp *res)
  556. {
  557. if (res->com.state == RES_QP_BUSY)
  558. return -EBUSY;
  559. else if (res->com.state != RES_QP_RESERVED)
  560. return -EPERM;
  561. return 0;
  562. }
  563. static int remove_mtt_ok(struct res_mtt *res, int order)
  564. {
  565. if (res->com.state == RES_MTT_BUSY ||
  566. atomic_read(&res->ref_count)) {
  567. printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
  568. __func__, __LINE__,
  569. mtt_states_str(res->com.state),
  570. atomic_read(&res->ref_count));
  571. return -EBUSY;
  572. } else if (res->com.state != RES_MTT_ALLOCATED)
  573. return -EPERM;
  574. else if (res->order != order)
  575. return -EINVAL;
  576. return 0;
  577. }
  578. static int remove_mpt_ok(struct res_mpt *res)
  579. {
  580. if (res->com.state == RES_MPT_BUSY)
  581. return -EBUSY;
  582. else if (res->com.state != RES_MPT_RESERVED)
  583. return -EPERM;
  584. return 0;
  585. }
  586. static int remove_eq_ok(struct res_eq *res)
  587. {
  588. if (res->com.state == RES_MPT_BUSY)
  589. return -EBUSY;
  590. else if (res->com.state != RES_MPT_RESERVED)
  591. return -EPERM;
  592. return 0;
  593. }
  594. static int remove_counter_ok(struct res_counter *res)
  595. {
  596. if (res->com.state == RES_COUNTER_BUSY)
  597. return -EBUSY;
  598. else if (res->com.state != RES_COUNTER_ALLOCATED)
  599. return -EPERM;
  600. return 0;
  601. }
  602. static int remove_xrcdn_ok(struct res_xrcdn *res)
  603. {
  604. if (res->com.state == RES_XRCD_BUSY)
  605. return -EBUSY;
  606. else if (res->com.state != RES_XRCD_ALLOCATED)
  607. return -EPERM;
  608. return 0;
  609. }
  610. static int remove_fs_rule_ok(struct res_fs_rule *res)
  611. {
  612. if (res->com.state == RES_FS_RULE_BUSY)
  613. return -EBUSY;
  614. else if (res->com.state != RES_FS_RULE_ALLOCATED)
  615. return -EPERM;
  616. return 0;
  617. }
  618. static int remove_cq_ok(struct res_cq *res)
  619. {
  620. if (res->com.state == RES_CQ_BUSY)
  621. return -EBUSY;
  622. else if (res->com.state != RES_CQ_ALLOCATED)
  623. return -EPERM;
  624. return 0;
  625. }
  626. static int remove_srq_ok(struct res_srq *res)
  627. {
  628. if (res->com.state == RES_SRQ_BUSY)
  629. return -EBUSY;
  630. else if (res->com.state != RES_SRQ_ALLOCATED)
  631. return -EPERM;
  632. return 0;
  633. }
  634. static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
  635. {
  636. switch (type) {
  637. case RES_QP:
  638. return remove_qp_ok((struct res_qp *)res);
  639. case RES_CQ:
  640. return remove_cq_ok((struct res_cq *)res);
  641. case RES_SRQ:
  642. return remove_srq_ok((struct res_srq *)res);
  643. case RES_MPT:
  644. return remove_mpt_ok((struct res_mpt *)res);
  645. case RES_MTT:
  646. return remove_mtt_ok((struct res_mtt *)res, extra);
  647. case RES_MAC:
  648. return -ENOSYS;
  649. case RES_EQ:
  650. return remove_eq_ok((struct res_eq *)res);
  651. case RES_COUNTER:
  652. return remove_counter_ok((struct res_counter *)res);
  653. case RES_XRCD:
  654. return remove_xrcdn_ok((struct res_xrcdn *)res);
  655. case RES_FS_RULE:
  656. return remove_fs_rule_ok((struct res_fs_rule *)res);
  657. default:
  658. return -EINVAL;
  659. }
  660. }
  661. static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
  662. enum mlx4_resource type, int extra)
  663. {
  664. u64 i;
  665. int err;
  666. struct mlx4_priv *priv = mlx4_priv(dev);
  667. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  668. struct res_common *r;
  669. spin_lock_irq(mlx4_tlock(dev));
  670. for (i = base; i < base + count; ++i) {
  671. r = res_tracker_lookup(&tracker->res_tree[type], i);
  672. if (!r) {
  673. err = -ENOENT;
  674. goto out;
  675. }
  676. if (r->owner != slave) {
  677. err = -EPERM;
  678. goto out;
  679. }
  680. err = remove_ok(r, type, extra);
  681. if (err)
  682. goto out;
  683. }
  684. for (i = base; i < base + count; ++i) {
  685. r = res_tracker_lookup(&tracker->res_tree[type], i);
  686. rb_erase(&r->node, &tracker->res_tree[type]);
  687. list_del(&r->list);
  688. kfree(r);
  689. }
  690. err = 0;
  691. out:
  692. spin_unlock_irq(mlx4_tlock(dev));
  693. return err;
  694. }
  695. static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
  696. enum res_qp_states state, struct res_qp **qp,
  697. int alloc)
  698. {
  699. struct mlx4_priv *priv = mlx4_priv(dev);
  700. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  701. struct res_qp *r;
  702. int err = 0;
  703. spin_lock_irq(mlx4_tlock(dev));
  704. r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
  705. if (!r)
  706. err = -ENOENT;
  707. else if (r->com.owner != slave)
  708. err = -EPERM;
  709. else {
  710. switch (state) {
  711. case RES_QP_BUSY:
  712. mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
  713. __func__, r->com.res_id);
  714. err = -EBUSY;
  715. break;
  716. case RES_QP_RESERVED:
  717. if (r->com.state == RES_QP_MAPPED && !alloc)
  718. break;
  719. mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
  720. err = -EINVAL;
  721. break;
  722. case RES_QP_MAPPED:
  723. if ((r->com.state == RES_QP_RESERVED && alloc) ||
  724. r->com.state == RES_QP_HW)
  725. break;
  726. else {
  727. mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
  728. r->com.res_id);
  729. err = -EINVAL;
  730. }
  731. break;
  732. case RES_QP_HW:
  733. if (r->com.state != RES_QP_MAPPED)
  734. err = -EINVAL;
  735. break;
  736. default:
  737. err = -EINVAL;
  738. }
  739. if (!err) {
  740. r->com.from_state = r->com.state;
  741. r->com.to_state = state;
  742. r->com.state = RES_QP_BUSY;
  743. if (qp)
  744. *qp = r;
  745. }
  746. }
  747. spin_unlock_irq(mlx4_tlock(dev));
  748. return err;
  749. }
  750. static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
  751. enum res_mpt_states state, struct res_mpt **mpt)
  752. {
  753. struct mlx4_priv *priv = mlx4_priv(dev);
  754. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  755. struct res_mpt *r;
  756. int err = 0;
  757. spin_lock_irq(mlx4_tlock(dev));
  758. r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
  759. if (!r)
  760. err = -ENOENT;
  761. else if (r->com.owner != slave)
  762. err = -EPERM;
  763. else {
  764. switch (state) {
  765. case RES_MPT_BUSY:
  766. err = -EINVAL;
  767. break;
  768. case RES_MPT_RESERVED:
  769. if (r->com.state != RES_MPT_MAPPED)
  770. err = -EINVAL;
  771. break;
  772. case RES_MPT_MAPPED:
  773. if (r->com.state != RES_MPT_RESERVED &&
  774. r->com.state != RES_MPT_HW)
  775. err = -EINVAL;
  776. break;
  777. case RES_MPT_HW:
  778. if (r->com.state != RES_MPT_MAPPED)
  779. err = -EINVAL;
  780. break;
  781. default:
  782. err = -EINVAL;
  783. }
  784. if (!err) {
  785. r->com.from_state = r->com.state;
  786. r->com.to_state = state;
  787. r->com.state = RES_MPT_BUSY;
  788. if (mpt)
  789. *mpt = r;
  790. }
  791. }
  792. spin_unlock_irq(mlx4_tlock(dev));
  793. return err;
  794. }
  795. static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
  796. enum res_eq_states state, struct res_eq **eq)
  797. {
  798. struct mlx4_priv *priv = mlx4_priv(dev);
  799. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  800. struct res_eq *r;
  801. int err = 0;
  802. spin_lock_irq(mlx4_tlock(dev));
  803. r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
  804. if (!r)
  805. err = -ENOENT;
  806. else if (r->com.owner != slave)
  807. err = -EPERM;
  808. else {
  809. switch (state) {
  810. case RES_EQ_BUSY:
  811. err = -EINVAL;
  812. break;
  813. case RES_EQ_RESERVED:
  814. if (r->com.state != RES_EQ_HW)
  815. err = -EINVAL;
  816. break;
  817. case RES_EQ_HW:
  818. if (r->com.state != RES_EQ_RESERVED)
  819. err = -EINVAL;
  820. break;
  821. default:
  822. err = -EINVAL;
  823. }
  824. if (!err) {
  825. r->com.from_state = r->com.state;
  826. r->com.to_state = state;
  827. r->com.state = RES_EQ_BUSY;
  828. if (eq)
  829. *eq = r;
  830. }
  831. }
  832. spin_unlock_irq(mlx4_tlock(dev));
  833. return err;
  834. }
  835. static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
  836. enum res_cq_states state, struct res_cq **cq)
  837. {
  838. struct mlx4_priv *priv = mlx4_priv(dev);
  839. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  840. struct res_cq *r;
  841. int err;
  842. spin_lock_irq(mlx4_tlock(dev));
  843. r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
  844. if (!r)
  845. err = -ENOENT;
  846. else if (r->com.owner != slave)
  847. err = -EPERM;
  848. else {
  849. switch (state) {
  850. case RES_CQ_BUSY:
  851. err = -EBUSY;
  852. break;
  853. case RES_CQ_ALLOCATED:
  854. if (r->com.state != RES_CQ_HW)
  855. err = -EINVAL;
  856. else if (atomic_read(&r->ref_count))
  857. err = -EBUSY;
  858. else
  859. err = 0;
  860. break;
  861. case RES_CQ_HW:
  862. if (r->com.state != RES_CQ_ALLOCATED)
  863. err = -EINVAL;
  864. else
  865. err = 0;
  866. break;
  867. default:
  868. err = -EINVAL;
  869. }
  870. if (!err) {
  871. r->com.from_state = r->com.state;
  872. r->com.to_state = state;
  873. r->com.state = RES_CQ_BUSY;
  874. if (cq)
  875. *cq = r;
  876. }
  877. }
  878. spin_unlock_irq(mlx4_tlock(dev));
  879. return err;
  880. }
  881. static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
  882. enum res_cq_states state, struct res_srq **srq)
  883. {
  884. struct mlx4_priv *priv = mlx4_priv(dev);
  885. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  886. struct res_srq *r;
  887. int err = 0;
  888. spin_lock_irq(mlx4_tlock(dev));
  889. r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
  890. if (!r)
  891. err = -ENOENT;
  892. else if (r->com.owner != slave)
  893. err = -EPERM;
  894. else {
  895. switch (state) {
  896. case RES_SRQ_BUSY:
  897. err = -EINVAL;
  898. break;
  899. case RES_SRQ_ALLOCATED:
  900. if (r->com.state != RES_SRQ_HW)
  901. err = -EINVAL;
  902. else if (atomic_read(&r->ref_count))
  903. err = -EBUSY;
  904. break;
  905. case RES_SRQ_HW:
  906. if (r->com.state != RES_SRQ_ALLOCATED)
  907. err = -EINVAL;
  908. break;
  909. default:
  910. err = -EINVAL;
  911. }
  912. if (!err) {
  913. r->com.from_state = r->com.state;
  914. r->com.to_state = state;
  915. r->com.state = RES_SRQ_BUSY;
  916. if (srq)
  917. *srq = r;
  918. }
  919. }
  920. spin_unlock_irq(mlx4_tlock(dev));
  921. return err;
  922. }
  923. static void res_abort_move(struct mlx4_dev *dev, int slave,
  924. enum mlx4_resource type, int id)
  925. {
  926. struct mlx4_priv *priv = mlx4_priv(dev);
  927. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  928. struct res_common *r;
  929. spin_lock_irq(mlx4_tlock(dev));
  930. r = res_tracker_lookup(&tracker->res_tree[type], id);
  931. if (r && (r->owner == slave))
  932. r->state = r->from_state;
  933. spin_unlock_irq(mlx4_tlock(dev));
  934. }
  935. static void res_end_move(struct mlx4_dev *dev, int slave,
  936. enum mlx4_resource type, int id)
  937. {
  938. struct mlx4_priv *priv = mlx4_priv(dev);
  939. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  940. struct res_common *r;
  941. spin_lock_irq(mlx4_tlock(dev));
  942. r = res_tracker_lookup(&tracker->res_tree[type], id);
  943. if (r && (r->owner == slave))
  944. r->state = r->to_state;
  945. spin_unlock_irq(mlx4_tlock(dev));
  946. }
  947. static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
  948. {
  949. return mlx4_is_qp_reserved(dev, qpn) &&
  950. (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
  951. }
  952. static int fw_reserved(struct mlx4_dev *dev, int qpn)
  953. {
  954. return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
  955. }
  956. static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  957. u64 in_param, u64 *out_param)
  958. {
  959. int err;
  960. int count;
  961. int align;
  962. int base;
  963. int qpn;
  964. switch (op) {
  965. case RES_OP_RESERVE:
  966. count = get_param_l(&in_param);
  967. align = get_param_h(&in_param);
  968. err = __mlx4_qp_reserve_range(dev, count, align, &base);
  969. if (err)
  970. return err;
  971. err = add_res_range(dev, slave, base, count, RES_QP, 0);
  972. if (err) {
  973. __mlx4_qp_release_range(dev, base, count);
  974. return err;
  975. }
  976. set_param_l(out_param, base);
  977. break;
  978. case RES_OP_MAP_ICM:
  979. qpn = get_param_l(&in_param) & 0x7fffff;
  980. if (valid_reserved(dev, slave, qpn)) {
  981. err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
  982. if (err)
  983. return err;
  984. }
  985. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
  986. NULL, 1);
  987. if (err)
  988. return err;
  989. if (!fw_reserved(dev, qpn)) {
  990. err = __mlx4_qp_alloc_icm(dev, qpn);
  991. if (err) {
  992. res_abort_move(dev, slave, RES_QP, qpn);
  993. return err;
  994. }
  995. }
  996. res_end_move(dev, slave, RES_QP, qpn);
  997. break;
  998. default:
  999. err = -EINVAL;
  1000. break;
  1001. }
  1002. return err;
  1003. }
  1004. static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1005. u64 in_param, u64 *out_param)
  1006. {
  1007. int err = -EINVAL;
  1008. int base;
  1009. int order;
  1010. if (op != RES_OP_RESERVE_AND_MAP)
  1011. return err;
  1012. order = get_param_l(&in_param);
  1013. base = __mlx4_alloc_mtt_range(dev, order);
  1014. if (base == -1)
  1015. return -ENOMEM;
  1016. err = add_res_range(dev, slave, base, 1, RES_MTT, order);
  1017. if (err)
  1018. __mlx4_free_mtt_range(dev, base, order);
  1019. else
  1020. set_param_l(out_param, base);
  1021. return err;
  1022. }
  1023. static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1024. u64 in_param, u64 *out_param)
  1025. {
  1026. int err = -EINVAL;
  1027. int index;
  1028. int id;
  1029. struct res_mpt *mpt;
  1030. switch (op) {
  1031. case RES_OP_RESERVE:
  1032. index = __mlx4_mr_reserve(dev);
  1033. if (index == -1)
  1034. break;
  1035. id = index & mpt_mask(dev);
  1036. err = add_res_range(dev, slave, id, 1, RES_MPT, index);
  1037. if (err) {
  1038. __mlx4_mr_release(dev, index);
  1039. break;
  1040. }
  1041. set_param_l(out_param, index);
  1042. break;
  1043. case RES_OP_MAP_ICM:
  1044. index = get_param_l(&in_param);
  1045. id = index & mpt_mask(dev);
  1046. err = mr_res_start_move_to(dev, slave, id,
  1047. RES_MPT_MAPPED, &mpt);
  1048. if (err)
  1049. return err;
  1050. err = __mlx4_mr_alloc_icm(dev, mpt->key);
  1051. if (err) {
  1052. res_abort_move(dev, slave, RES_MPT, id);
  1053. return err;
  1054. }
  1055. res_end_move(dev, slave, RES_MPT, id);
  1056. break;
  1057. }
  1058. return err;
  1059. }
  1060. static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1061. u64 in_param, u64 *out_param)
  1062. {
  1063. int cqn;
  1064. int err;
  1065. switch (op) {
  1066. case RES_OP_RESERVE_AND_MAP:
  1067. err = __mlx4_cq_alloc_icm(dev, &cqn);
  1068. if (err)
  1069. break;
  1070. err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
  1071. if (err) {
  1072. __mlx4_cq_free_icm(dev, cqn);
  1073. break;
  1074. }
  1075. set_param_l(out_param, cqn);
  1076. break;
  1077. default:
  1078. err = -EINVAL;
  1079. }
  1080. return err;
  1081. }
  1082. static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1083. u64 in_param, u64 *out_param)
  1084. {
  1085. int srqn;
  1086. int err;
  1087. switch (op) {
  1088. case RES_OP_RESERVE_AND_MAP:
  1089. err = __mlx4_srq_alloc_icm(dev, &srqn);
  1090. if (err)
  1091. break;
  1092. err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
  1093. if (err) {
  1094. __mlx4_srq_free_icm(dev, srqn);
  1095. break;
  1096. }
  1097. set_param_l(out_param, srqn);
  1098. break;
  1099. default:
  1100. err = -EINVAL;
  1101. }
  1102. return err;
  1103. }
  1104. static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
  1105. {
  1106. struct mlx4_priv *priv = mlx4_priv(dev);
  1107. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1108. struct mac_res *res;
  1109. res = kzalloc(sizeof *res, GFP_KERNEL);
  1110. if (!res)
  1111. return -ENOMEM;
  1112. res->mac = mac;
  1113. res->port = (u8) port;
  1114. list_add_tail(&res->list,
  1115. &tracker->slave_list[slave].res_list[RES_MAC]);
  1116. return 0;
  1117. }
  1118. static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
  1119. int port)
  1120. {
  1121. struct mlx4_priv *priv = mlx4_priv(dev);
  1122. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1123. struct list_head *mac_list =
  1124. &tracker->slave_list[slave].res_list[RES_MAC];
  1125. struct mac_res *res, *tmp;
  1126. list_for_each_entry_safe(res, tmp, mac_list, list) {
  1127. if (res->mac == mac && res->port == (u8) port) {
  1128. list_del(&res->list);
  1129. kfree(res);
  1130. break;
  1131. }
  1132. }
  1133. }
  1134. static void rem_slave_macs(struct mlx4_dev *dev, int slave)
  1135. {
  1136. struct mlx4_priv *priv = mlx4_priv(dev);
  1137. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1138. struct list_head *mac_list =
  1139. &tracker->slave_list[slave].res_list[RES_MAC];
  1140. struct mac_res *res, *tmp;
  1141. list_for_each_entry_safe(res, tmp, mac_list, list) {
  1142. list_del(&res->list);
  1143. __mlx4_unregister_mac(dev, res->port, res->mac);
  1144. kfree(res);
  1145. }
  1146. }
  1147. static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1148. u64 in_param, u64 *out_param)
  1149. {
  1150. int err = -EINVAL;
  1151. int port;
  1152. u64 mac;
  1153. if (op != RES_OP_RESERVE_AND_MAP)
  1154. return err;
  1155. port = get_param_l(out_param);
  1156. mac = in_param;
  1157. err = __mlx4_register_mac(dev, port, mac);
  1158. if (err >= 0) {
  1159. set_param_l(out_param, err);
  1160. err = 0;
  1161. }
  1162. if (!err) {
  1163. err = mac_add_to_slave(dev, slave, mac, port);
  1164. if (err)
  1165. __mlx4_unregister_mac(dev, port, mac);
  1166. }
  1167. return err;
  1168. }
  1169. static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1170. u64 in_param, u64 *out_param)
  1171. {
  1172. return 0;
  1173. }
  1174. static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1175. u64 in_param, u64 *out_param)
  1176. {
  1177. u32 index;
  1178. int err;
  1179. if (op != RES_OP_RESERVE)
  1180. return -EINVAL;
  1181. err = __mlx4_counter_alloc(dev, &index);
  1182. if (err)
  1183. return err;
  1184. err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
  1185. if (err)
  1186. __mlx4_counter_free(dev, index);
  1187. else
  1188. set_param_l(out_param, index);
  1189. return err;
  1190. }
  1191. static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1192. u64 in_param, u64 *out_param)
  1193. {
  1194. u32 xrcdn;
  1195. int err;
  1196. if (op != RES_OP_RESERVE)
  1197. return -EINVAL;
  1198. err = __mlx4_xrcd_alloc(dev, &xrcdn);
  1199. if (err)
  1200. return err;
  1201. err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
  1202. if (err)
  1203. __mlx4_xrcd_free(dev, xrcdn);
  1204. else
  1205. set_param_l(out_param, xrcdn);
  1206. return err;
  1207. }
  1208. int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
  1209. struct mlx4_vhcr *vhcr,
  1210. struct mlx4_cmd_mailbox *inbox,
  1211. struct mlx4_cmd_mailbox *outbox,
  1212. struct mlx4_cmd_info *cmd)
  1213. {
  1214. int err;
  1215. int alop = vhcr->op_modifier;
  1216. switch (vhcr->in_modifier) {
  1217. case RES_QP:
  1218. err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1219. vhcr->in_param, &vhcr->out_param);
  1220. break;
  1221. case RES_MTT:
  1222. err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1223. vhcr->in_param, &vhcr->out_param);
  1224. break;
  1225. case RES_MPT:
  1226. err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1227. vhcr->in_param, &vhcr->out_param);
  1228. break;
  1229. case RES_CQ:
  1230. err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1231. vhcr->in_param, &vhcr->out_param);
  1232. break;
  1233. case RES_SRQ:
  1234. err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1235. vhcr->in_param, &vhcr->out_param);
  1236. break;
  1237. case RES_MAC:
  1238. err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1239. vhcr->in_param, &vhcr->out_param);
  1240. break;
  1241. case RES_VLAN:
  1242. err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1243. vhcr->in_param, &vhcr->out_param);
  1244. break;
  1245. case RES_COUNTER:
  1246. err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1247. vhcr->in_param, &vhcr->out_param);
  1248. break;
  1249. case RES_XRCD:
  1250. err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1251. vhcr->in_param, &vhcr->out_param);
  1252. break;
  1253. default:
  1254. err = -EINVAL;
  1255. break;
  1256. }
  1257. return err;
  1258. }
  1259. static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1260. u64 in_param)
  1261. {
  1262. int err;
  1263. int count;
  1264. int base;
  1265. int qpn;
  1266. switch (op) {
  1267. case RES_OP_RESERVE:
  1268. base = get_param_l(&in_param) & 0x7fffff;
  1269. count = get_param_h(&in_param);
  1270. err = rem_res_range(dev, slave, base, count, RES_QP, 0);
  1271. if (err)
  1272. break;
  1273. __mlx4_qp_release_range(dev, base, count);
  1274. break;
  1275. case RES_OP_MAP_ICM:
  1276. qpn = get_param_l(&in_param) & 0x7fffff;
  1277. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
  1278. NULL, 0);
  1279. if (err)
  1280. return err;
  1281. if (!fw_reserved(dev, qpn))
  1282. __mlx4_qp_free_icm(dev, qpn);
  1283. res_end_move(dev, slave, RES_QP, qpn);
  1284. if (valid_reserved(dev, slave, qpn))
  1285. err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
  1286. break;
  1287. default:
  1288. err = -EINVAL;
  1289. break;
  1290. }
  1291. return err;
  1292. }
  1293. static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1294. u64 in_param, u64 *out_param)
  1295. {
  1296. int err = -EINVAL;
  1297. int base;
  1298. int order;
  1299. if (op != RES_OP_RESERVE_AND_MAP)
  1300. return err;
  1301. base = get_param_l(&in_param);
  1302. order = get_param_h(&in_param);
  1303. err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
  1304. if (!err)
  1305. __mlx4_free_mtt_range(dev, base, order);
  1306. return err;
  1307. }
  1308. static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1309. u64 in_param)
  1310. {
  1311. int err = -EINVAL;
  1312. int index;
  1313. int id;
  1314. struct res_mpt *mpt;
  1315. switch (op) {
  1316. case RES_OP_RESERVE:
  1317. index = get_param_l(&in_param);
  1318. id = index & mpt_mask(dev);
  1319. err = get_res(dev, slave, id, RES_MPT, &mpt);
  1320. if (err)
  1321. break;
  1322. index = mpt->key;
  1323. put_res(dev, slave, id, RES_MPT);
  1324. err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
  1325. if (err)
  1326. break;
  1327. __mlx4_mr_release(dev, index);
  1328. break;
  1329. case RES_OP_MAP_ICM:
  1330. index = get_param_l(&in_param);
  1331. id = index & mpt_mask(dev);
  1332. err = mr_res_start_move_to(dev, slave, id,
  1333. RES_MPT_RESERVED, &mpt);
  1334. if (err)
  1335. return err;
  1336. __mlx4_mr_free_icm(dev, mpt->key);
  1337. res_end_move(dev, slave, RES_MPT, id);
  1338. return err;
  1339. break;
  1340. default:
  1341. err = -EINVAL;
  1342. break;
  1343. }
  1344. return err;
  1345. }
  1346. static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1347. u64 in_param, u64 *out_param)
  1348. {
  1349. int cqn;
  1350. int err;
  1351. switch (op) {
  1352. case RES_OP_RESERVE_AND_MAP:
  1353. cqn = get_param_l(&in_param);
  1354. err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
  1355. if (err)
  1356. break;
  1357. __mlx4_cq_free_icm(dev, cqn);
  1358. break;
  1359. default:
  1360. err = -EINVAL;
  1361. break;
  1362. }
  1363. return err;
  1364. }
  1365. static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1366. u64 in_param, u64 *out_param)
  1367. {
  1368. int srqn;
  1369. int err;
  1370. switch (op) {
  1371. case RES_OP_RESERVE_AND_MAP:
  1372. srqn = get_param_l(&in_param);
  1373. err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
  1374. if (err)
  1375. break;
  1376. __mlx4_srq_free_icm(dev, srqn);
  1377. break;
  1378. default:
  1379. err = -EINVAL;
  1380. break;
  1381. }
  1382. return err;
  1383. }
  1384. static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1385. u64 in_param, u64 *out_param)
  1386. {
  1387. int port;
  1388. int err = 0;
  1389. switch (op) {
  1390. case RES_OP_RESERVE_AND_MAP:
  1391. port = get_param_l(out_param);
  1392. mac_del_from_slave(dev, slave, in_param, port);
  1393. __mlx4_unregister_mac(dev, port, in_param);
  1394. break;
  1395. default:
  1396. err = -EINVAL;
  1397. break;
  1398. }
  1399. return err;
  1400. }
  1401. static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1402. u64 in_param, u64 *out_param)
  1403. {
  1404. return 0;
  1405. }
  1406. static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1407. u64 in_param, u64 *out_param)
  1408. {
  1409. int index;
  1410. int err;
  1411. if (op != RES_OP_RESERVE)
  1412. return -EINVAL;
  1413. index = get_param_l(&in_param);
  1414. err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
  1415. if (err)
  1416. return err;
  1417. __mlx4_counter_free(dev, index);
  1418. return err;
  1419. }
  1420. static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1421. u64 in_param, u64 *out_param)
  1422. {
  1423. int xrcdn;
  1424. int err;
  1425. if (op != RES_OP_RESERVE)
  1426. return -EINVAL;
  1427. xrcdn = get_param_l(&in_param);
  1428. err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
  1429. if (err)
  1430. return err;
  1431. __mlx4_xrcd_free(dev, xrcdn);
  1432. return err;
  1433. }
  1434. int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
  1435. struct mlx4_vhcr *vhcr,
  1436. struct mlx4_cmd_mailbox *inbox,
  1437. struct mlx4_cmd_mailbox *outbox,
  1438. struct mlx4_cmd_info *cmd)
  1439. {
  1440. int err = -EINVAL;
  1441. int alop = vhcr->op_modifier;
  1442. switch (vhcr->in_modifier) {
  1443. case RES_QP:
  1444. err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
  1445. vhcr->in_param);
  1446. break;
  1447. case RES_MTT:
  1448. err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
  1449. vhcr->in_param, &vhcr->out_param);
  1450. break;
  1451. case RES_MPT:
  1452. err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
  1453. vhcr->in_param);
  1454. break;
  1455. case RES_CQ:
  1456. err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
  1457. vhcr->in_param, &vhcr->out_param);
  1458. break;
  1459. case RES_SRQ:
  1460. err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
  1461. vhcr->in_param, &vhcr->out_param);
  1462. break;
  1463. case RES_MAC:
  1464. err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
  1465. vhcr->in_param, &vhcr->out_param);
  1466. break;
  1467. case RES_VLAN:
  1468. err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
  1469. vhcr->in_param, &vhcr->out_param);
  1470. break;
  1471. case RES_COUNTER:
  1472. err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
  1473. vhcr->in_param, &vhcr->out_param);
  1474. break;
  1475. case RES_XRCD:
  1476. err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
  1477. vhcr->in_param, &vhcr->out_param);
  1478. default:
  1479. break;
  1480. }
  1481. return err;
  1482. }
  1483. /* ugly but other choices are uglier */
  1484. static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
  1485. {
  1486. return (be32_to_cpu(mpt->flags) >> 9) & 1;
  1487. }
  1488. static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
  1489. {
  1490. return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
  1491. }
  1492. static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
  1493. {
  1494. return be32_to_cpu(mpt->mtt_sz);
  1495. }
  1496. static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
  1497. {
  1498. return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
  1499. }
  1500. static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
  1501. {
  1502. return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
  1503. }
  1504. static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
  1505. {
  1506. int page_shift = (qpc->log_page_size & 0x3f) + 12;
  1507. int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
  1508. int log_sq_sride = qpc->sq_size_stride & 7;
  1509. int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
  1510. int log_rq_stride = qpc->rq_size_stride & 7;
  1511. int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
  1512. int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
  1513. int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
  1514. int sq_size;
  1515. int rq_size;
  1516. int total_pages;
  1517. int total_mem;
  1518. int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
  1519. sq_size = 1 << (log_sq_size + log_sq_sride + 4);
  1520. rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
  1521. total_mem = sq_size + rq_size;
  1522. total_pages =
  1523. roundup_pow_of_two((total_mem + (page_offset << 6)) >>
  1524. page_shift);
  1525. return total_pages;
  1526. }
  1527. static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
  1528. int size, struct res_mtt *mtt)
  1529. {
  1530. int res_start = mtt->com.res_id;
  1531. int res_size = (1 << mtt->order);
  1532. if (start < res_start || start + size > res_start + res_size)
  1533. return -EPERM;
  1534. return 0;
  1535. }
  1536. int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
  1537. struct mlx4_vhcr *vhcr,
  1538. struct mlx4_cmd_mailbox *inbox,
  1539. struct mlx4_cmd_mailbox *outbox,
  1540. struct mlx4_cmd_info *cmd)
  1541. {
  1542. int err;
  1543. int index = vhcr->in_modifier;
  1544. struct res_mtt *mtt;
  1545. struct res_mpt *mpt;
  1546. int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
  1547. int phys;
  1548. int id;
  1549. id = index & mpt_mask(dev);
  1550. err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
  1551. if (err)
  1552. return err;
  1553. phys = mr_phys_mpt(inbox->buf);
  1554. if (!phys) {
  1555. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  1556. if (err)
  1557. goto ex_abort;
  1558. err = check_mtt_range(dev, slave, mtt_base,
  1559. mr_get_mtt_size(inbox->buf), mtt);
  1560. if (err)
  1561. goto ex_put;
  1562. mpt->mtt = mtt;
  1563. }
  1564. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1565. if (err)
  1566. goto ex_put;
  1567. if (!phys) {
  1568. atomic_inc(&mtt->ref_count);
  1569. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1570. }
  1571. res_end_move(dev, slave, RES_MPT, id);
  1572. return 0;
  1573. ex_put:
  1574. if (!phys)
  1575. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1576. ex_abort:
  1577. res_abort_move(dev, slave, RES_MPT, id);
  1578. return err;
  1579. }
  1580. int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
  1581. struct mlx4_vhcr *vhcr,
  1582. struct mlx4_cmd_mailbox *inbox,
  1583. struct mlx4_cmd_mailbox *outbox,
  1584. struct mlx4_cmd_info *cmd)
  1585. {
  1586. int err;
  1587. int index = vhcr->in_modifier;
  1588. struct res_mpt *mpt;
  1589. int id;
  1590. id = index & mpt_mask(dev);
  1591. err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
  1592. if (err)
  1593. return err;
  1594. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1595. if (err)
  1596. goto ex_abort;
  1597. if (mpt->mtt)
  1598. atomic_dec(&mpt->mtt->ref_count);
  1599. res_end_move(dev, slave, RES_MPT, id);
  1600. return 0;
  1601. ex_abort:
  1602. res_abort_move(dev, slave, RES_MPT, id);
  1603. return err;
  1604. }
  1605. int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
  1606. struct mlx4_vhcr *vhcr,
  1607. struct mlx4_cmd_mailbox *inbox,
  1608. struct mlx4_cmd_mailbox *outbox,
  1609. struct mlx4_cmd_info *cmd)
  1610. {
  1611. int err;
  1612. int index = vhcr->in_modifier;
  1613. struct res_mpt *mpt;
  1614. int id;
  1615. id = index & mpt_mask(dev);
  1616. err = get_res(dev, slave, id, RES_MPT, &mpt);
  1617. if (err)
  1618. return err;
  1619. if (mpt->com.from_state != RES_MPT_HW) {
  1620. err = -EBUSY;
  1621. goto out;
  1622. }
  1623. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1624. out:
  1625. put_res(dev, slave, id, RES_MPT);
  1626. return err;
  1627. }
  1628. static int qp_get_rcqn(struct mlx4_qp_context *qpc)
  1629. {
  1630. return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
  1631. }
  1632. static int qp_get_scqn(struct mlx4_qp_context *qpc)
  1633. {
  1634. return be32_to_cpu(qpc->cqn_send) & 0xffffff;
  1635. }
  1636. static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
  1637. {
  1638. return be32_to_cpu(qpc->srqn) & 0x1ffffff;
  1639. }
  1640. static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
  1641. struct mlx4_qp_context *context)
  1642. {
  1643. u32 qpn = vhcr->in_modifier & 0xffffff;
  1644. u32 qkey = 0;
  1645. if (mlx4_get_parav_qkey(dev, qpn, &qkey))
  1646. return;
  1647. /* adjust qkey in qp context */
  1648. context->qkey = cpu_to_be32(qkey);
  1649. }
  1650. int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
  1651. struct mlx4_vhcr *vhcr,
  1652. struct mlx4_cmd_mailbox *inbox,
  1653. struct mlx4_cmd_mailbox *outbox,
  1654. struct mlx4_cmd_info *cmd)
  1655. {
  1656. int err;
  1657. int qpn = vhcr->in_modifier & 0x7fffff;
  1658. struct res_mtt *mtt;
  1659. struct res_qp *qp;
  1660. struct mlx4_qp_context *qpc = inbox->buf + 8;
  1661. int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
  1662. int mtt_size = qp_get_mtt_size(qpc);
  1663. struct res_cq *rcq;
  1664. struct res_cq *scq;
  1665. int rcqn = qp_get_rcqn(qpc);
  1666. int scqn = qp_get_scqn(qpc);
  1667. u32 srqn = qp_get_srqn(qpc) & 0xffffff;
  1668. int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
  1669. struct res_srq *srq;
  1670. int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
  1671. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
  1672. if (err)
  1673. return err;
  1674. qp->local_qpn = local_qpn;
  1675. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  1676. if (err)
  1677. goto ex_abort;
  1678. err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
  1679. if (err)
  1680. goto ex_put_mtt;
  1681. err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
  1682. if (err)
  1683. goto ex_put_mtt;
  1684. if (scqn != rcqn) {
  1685. err = get_res(dev, slave, scqn, RES_CQ, &scq);
  1686. if (err)
  1687. goto ex_put_rcq;
  1688. } else
  1689. scq = rcq;
  1690. if (use_srq) {
  1691. err = get_res(dev, slave, srqn, RES_SRQ, &srq);
  1692. if (err)
  1693. goto ex_put_scq;
  1694. }
  1695. adjust_proxy_tun_qkey(dev, vhcr, qpc);
  1696. update_pkey_index(dev, slave, inbox);
  1697. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1698. if (err)
  1699. goto ex_put_srq;
  1700. atomic_inc(&mtt->ref_count);
  1701. qp->mtt = mtt;
  1702. atomic_inc(&rcq->ref_count);
  1703. qp->rcq = rcq;
  1704. atomic_inc(&scq->ref_count);
  1705. qp->scq = scq;
  1706. if (scqn != rcqn)
  1707. put_res(dev, slave, scqn, RES_CQ);
  1708. if (use_srq) {
  1709. atomic_inc(&srq->ref_count);
  1710. put_res(dev, slave, srqn, RES_SRQ);
  1711. qp->srq = srq;
  1712. }
  1713. put_res(dev, slave, rcqn, RES_CQ);
  1714. put_res(dev, slave, mtt_base, RES_MTT);
  1715. res_end_move(dev, slave, RES_QP, qpn);
  1716. return 0;
  1717. ex_put_srq:
  1718. if (use_srq)
  1719. put_res(dev, slave, srqn, RES_SRQ);
  1720. ex_put_scq:
  1721. if (scqn != rcqn)
  1722. put_res(dev, slave, scqn, RES_CQ);
  1723. ex_put_rcq:
  1724. put_res(dev, slave, rcqn, RES_CQ);
  1725. ex_put_mtt:
  1726. put_res(dev, slave, mtt_base, RES_MTT);
  1727. ex_abort:
  1728. res_abort_move(dev, slave, RES_QP, qpn);
  1729. return err;
  1730. }
  1731. static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
  1732. {
  1733. return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
  1734. }
  1735. static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
  1736. {
  1737. int log_eq_size = eqc->log_eq_size & 0x1f;
  1738. int page_shift = (eqc->log_page_size & 0x3f) + 12;
  1739. if (log_eq_size + 5 < page_shift)
  1740. return 1;
  1741. return 1 << (log_eq_size + 5 - page_shift);
  1742. }
  1743. static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
  1744. {
  1745. return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
  1746. }
  1747. static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
  1748. {
  1749. int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
  1750. int page_shift = (cqc->log_page_size & 0x3f) + 12;
  1751. if (log_cq_size + 5 < page_shift)
  1752. return 1;
  1753. return 1 << (log_cq_size + 5 - page_shift);
  1754. }
  1755. int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
  1756. struct mlx4_vhcr *vhcr,
  1757. struct mlx4_cmd_mailbox *inbox,
  1758. struct mlx4_cmd_mailbox *outbox,
  1759. struct mlx4_cmd_info *cmd)
  1760. {
  1761. int err;
  1762. int eqn = vhcr->in_modifier;
  1763. int res_id = (slave << 8) | eqn;
  1764. struct mlx4_eq_context *eqc = inbox->buf;
  1765. int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
  1766. int mtt_size = eq_get_mtt_size(eqc);
  1767. struct res_eq *eq;
  1768. struct res_mtt *mtt;
  1769. err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
  1770. if (err)
  1771. return err;
  1772. err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
  1773. if (err)
  1774. goto out_add;
  1775. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  1776. if (err)
  1777. goto out_move;
  1778. err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
  1779. if (err)
  1780. goto out_put;
  1781. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1782. if (err)
  1783. goto out_put;
  1784. atomic_inc(&mtt->ref_count);
  1785. eq->mtt = mtt;
  1786. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1787. res_end_move(dev, slave, RES_EQ, res_id);
  1788. return 0;
  1789. out_put:
  1790. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1791. out_move:
  1792. res_abort_move(dev, slave, RES_EQ, res_id);
  1793. out_add:
  1794. rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
  1795. return err;
  1796. }
  1797. static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
  1798. int len, struct res_mtt **res)
  1799. {
  1800. struct mlx4_priv *priv = mlx4_priv(dev);
  1801. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1802. struct res_mtt *mtt;
  1803. int err = -EINVAL;
  1804. spin_lock_irq(mlx4_tlock(dev));
  1805. list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
  1806. com.list) {
  1807. if (!check_mtt_range(dev, slave, start, len, mtt)) {
  1808. *res = mtt;
  1809. mtt->com.from_state = mtt->com.state;
  1810. mtt->com.state = RES_MTT_BUSY;
  1811. err = 0;
  1812. break;
  1813. }
  1814. }
  1815. spin_unlock_irq(mlx4_tlock(dev));
  1816. return err;
  1817. }
  1818. static int verify_qp_parameters(struct mlx4_dev *dev,
  1819. struct mlx4_cmd_mailbox *inbox,
  1820. enum qp_transition transition, u8 slave)
  1821. {
  1822. u32 qp_type;
  1823. struct mlx4_qp_context *qp_ctx;
  1824. enum mlx4_qp_optpar optpar;
  1825. qp_ctx = inbox->buf + 8;
  1826. qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
  1827. optpar = be32_to_cpu(*(__be32 *) inbox->buf);
  1828. switch (qp_type) {
  1829. case MLX4_QP_ST_RC:
  1830. case MLX4_QP_ST_UC:
  1831. switch (transition) {
  1832. case QP_TRANS_INIT2RTR:
  1833. case QP_TRANS_RTR2RTS:
  1834. case QP_TRANS_RTS2RTS:
  1835. case QP_TRANS_SQD2SQD:
  1836. case QP_TRANS_SQD2RTS:
  1837. if (slave != mlx4_master_func_num(dev))
  1838. /* slaves have only gid index 0 */
  1839. if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
  1840. if (qp_ctx->pri_path.mgid_index)
  1841. return -EINVAL;
  1842. if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
  1843. if (qp_ctx->alt_path.mgid_index)
  1844. return -EINVAL;
  1845. break;
  1846. default:
  1847. break;
  1848. }
  1849. break;
  1850. default:
  1851. break;
  1852. }
  1853. return 0;
  1854. }
  1855. int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
  1856. struct mlx4_vhcr *vhcr,
  1857. struct mlx4_cmd_mailbox *inbox,
  1858. struct mlx4_cmd_mailbox *outbox,
  1859. struct mlx4_cmd_info *cmd)
  1860. {
  1861. struct mlx4_mtt mtt;
  1862. __be64 *page_list = inbox->buf;
  1863. u64 *pg_list = (u64 *)page_list;
  1864. int i;
  1865. struct res_mtt *rmtt = NULL;
  1866. int start = be64_to_cpu(page_list[0]);
  1867. int npages = vhcr->in_modifier;
  1868. int err;
  1869. err = get_containing_mtt(dev, slave, start, npages, &rmtt);
  1870. if (err)
  1871. return err;
  1872. /* Call the SW implementation of write_mtt:
  1873. * - Prepare a dummy mtt struct
  1874. * - Translate inbox contents to simple addresses in host endianess */
  1875. mtt.offset = 0; /* TBD this is broken but I don't handle it since
  1876. we don't really use it */
  1877. mtt.order = 0;
  1878. mtt.page_shift = 0;
  1879. for (i = 0; i < npages; ++i)
  1880. pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
  1881. err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
  1882. ((u64 *)page_list + 2));
  1883. if (rmtt)
  1884. put_res(dev, slave, rmtt->com.res_id, RES_MTT);
  1885. return err;
  1886. }
  1887. int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
  1888. struct mlx4_vhcr *vhcr,
  1889. struct mlx4_cmd_mailbox *inbox,
  1890. struct mlx4_cmd_mailbox *outbox,
  1891. struct mlx4_cmd_info *cmd)
  1892. {
  1893. int eqn = vhcr->in_modifier;
  1894. int res_id = eqn | (slave << 8);
  1895. struct res_eq *eq;
  1896. int err;
  1897. err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
  1898. if (err)
  1899. return err;
  1900. err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
  1901. if (err)
  1902. goto ex_abort;
  1903. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1904. if (err)
  1905. goto ex_put;
  1906. atomic_dec(&eq->mtt->ref_count);
  1907. put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
  1908. res_end_move(dev, slave, RES_EQ, res_id);
  1909. rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
  1910. return 0;
  1911. ex_put:
  1912. put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
  1913. ex_abort:
  1914. res_abort_move(dev, slave, RES_EQ, res_id);
  1915. return err;
  1916. }
  1917. int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
  1918. {
  1919. struct mlx4_priv *priv = mlx4_priv(dev);
  1920. struct mlx4_slave_event_eq_info *event_eq;
  1921. struct mlx4_cmd_mailbox *mailbox;
  1922. u32 in_modifier = 0;
  1923. int err;
  1924. int res_id;
  1925. struct res_eq *req;
  1926. if (!priv->mfunc.master.slave_state)
  1927. return -EINVAL;
  1928. event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
  1929. /* Create the event only if the slave is registered */
  1930. if (event_eq->eqn < 0)
  1931. return 0;
  1932. mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
  1933. res_id = (slave << 8) | event_eq->eqn;
  1934. err = get_res(dev, slave, res_id, RES_EQ, &req);
  1935. if (err)
  1936. goto unlock;
  1937. if (req->com.from_state != RES_EQ_HW) {
  1938. err = -EINVAL;
  1939. goto put;
  1940. }
  1941. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1942. if (IS_ERR(mailbox)) {
  1943. err = PTR_ERR(mailbox);
  1944. goto put;
  1945. }
  1946. if (eqe->type == MLX4_EVENT_TYPE_CMD) {
  1947. ++event_eq->token;
  1948. eqe->event.cmd.token = cpu_to_be16(event_eq->token);
  1949. }
  1950. memcpy(mailbox->buf, (u8 *) eqe, 28);
  1951. in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
  1952. err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
  1953. MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
  1954. MLX4_CMD_NATIVE);
  1955. put_res(dev, slave, res_id, RES_EQ);
  1956. mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
  1957. mlx4_free_cmd_mailbox(dev, mailbox);
  1958. return err;
  1959. put:
  1960. put_res(dev, slave, res_id, RES_EQ);
  1961. unlock:
  1962. mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
  1963. return err;
  1964. }
  1965. int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
  1966. struct mlx4_vhcr *vhcr,
  1967. struct mlx4_cmd_mailbox *inbox,
  1968. struct mlx4_cmd_mailbox *outbox,
  1969. struct mlx4_cmd_info *cmd)
  1970. {
  1971. int eqn = vhcr->in_modifier;
  1972. int res_id = eqn | (slave << 8);
  1973. struct res_eq *eq;
  1974. int err;
  1975. err = get_res(dev, slave, res_id, RES_EQ, &eq);
  1976. if (err)
  1977. return err;
  1978. if (eq->com.from_state != RES_EQ_HW) {
  1979. err = -EINVAL;
  1980. goto ex_put;
  1981. }
  1982. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1983. ex_put:
  1984. put_res(dev, slave, res_id, RES_EQ);
  1985. return err;
  1986. }
  1987. int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
  1988. struct mlx4_vhcr *vhcr,
  1989. struct mlx4_cmd_mailbox *inbox,
  1990. struct mlx4_cmd_mailbox *outbox,
  1991. struct mlx4_cmd_info *cmd)
  1992. {
  1993. int err;
  1994. int cqn = vhcr->in_modifier;
  1995. struct mlx4_cq_context *cqc = inbox->buf;
  1996. int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
  1997. struct res_cq *cq;
  1998. struct res_mtt *mtt;
  1999. err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
  2000. if (err)
  2001. return err;
  2002. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  2003. if (err)
  2004. goto out_move;
  2005. err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
  2006. if (err)
  2007. goto out_put;
  2008. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2009. if (err)
  2010. goto out_put;
  2011. atomic_inc(&mtt->ref_count);
  2012. cq->mtt = mtt;
  2013. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2014. res_end_move(dev, slave, RES_CQ, cqn);
  2015. return 0;
  2016. out_put:
  2017. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2018. out_move:
  2019. res_abort_move(dev, slave, RES_CQ, cqn);
  2020. return err;
  2021. }
  2022. int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
  2023. struct mlx4_vhcr *vhcr,
  2024. struct mlx4_cmd_mailbox *inbox,
  2025. struct mlx4_cmd_mailbox *outbox,
  2026. struct mlx4_cmd_info *cmd)
  2027. {
  2028. int err;
  2029. int cqn = vhcr->in_modifier;
  2030. struct res_cq *cq;
  2031. err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
  2032. if (err)
  2033. return err;
  2034. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2035. if (err)
  2036. goto out_move;
  2037. atomic_dec(&cq->mtt->ref_count);
  2038. res_end_move(dev, slave, RES_CQ, cqn);
  2039. return 0;
  2040. out_move:
  2041. res_abort_move(dev, slave, RES_CQ, cqn);
  2042. return err;
  2043. }
  2044. int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
  2045. struct mlx4_vhcr *vhcr,
  2046. struct mlx4_cmd_mailbox *inbox,
  2047. struct mlx4_cmd_mailbox *outbox,
  2048. struct mlx4_cmd_info *cmd)
  2049. {
  2050. int cqn = vhcr->in_modifier;
  2051. struct res_cq *cq;
  2052. int err;
  2053. err = get_res(dev, slave, cqn, RES_CQ, &cq);
  2054. if (err)
  2055. return err;
  2056. if (cq->com.from_state != RES_CQ_HW)
  2057. goto ex_put;
  2058. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2059. ex_put:
  2060. put_res(dev, slave, cqn, RES_CQ);
  2061. return err;
  2062. }
  2063. static int handle_resize(struct mlx4_dev *dev, int slave,
  2064. struct mlx4_vhcr *vhcr,
  2065. struct mlx4_cmd_mailbox *inbox,
  2066. struct mlx4_cmd_mailbox *outbox,
  2067. struct mlx4_cmd_info *cmd,
  2068. struct res_cq *cq)
  2069. {
  2070. int err;
  2071. struct res_mtt *orig_mtt;
  2072. struct res_mtt *mtt;
  2073. struct mlx4_cq_context *cqc = inbox->buf;
  2074. int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
  2075. err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
  2076. if (err)
  2077. return err;
  2078. if (orig_mtt != cq->mtt) {
  2079. err = -EINVAL;
  2080. goto ex_put;
  2081. }
  2082. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  2083. if (err)
  2084. goto ex_put;
  2085. err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
  2086. if (err)
  2087. goto ex_put1;
  2088. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2089. if (err)
  2090. goto ex_put1;
  2091. atomic_dec(&orig_mtt->ref_count);
  2092. put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
  2093. atomic_inc(&mtt->ref_count);
  2094. cq->mtt = mtt;
  2095. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2096. return 0;
  2097. ex_put1:
  2098. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2099. ex_put:
  2100. put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
  2101. return err;
  2102. }
  2103. int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
  2104. struct mlx4_vhcr *vhcr,
  2105. struct mlx4_cmd_mailbox *inbox,
  2106. struct mlx4_cmd_mailbox *outbox,
  2107. struct mlx4_cmd_info *cmd)
  2108. {
  2109. int cqn = vhcr->in_modifier;
  2110. struct res_cq *cq;
  2111. int err;
  2112. err = get_res(dev, slave, cqn, RES_CQ, &cq);
  2113. if (err)
  2114. return err;
  2115. if (cq->com.from_state != RES_CQ_HW)
  2116. goto ex_put;
  2117. if (vhcr->op_modifier == 0) {
  2118. err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
  2119. goto ex_put;
  2120. }
  2121. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2122. ex_put:
  2123. put_res(dev, slave, cqn, RES_CQ);
  2124. return err;
  2125. }
  2126. static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
  2127. {
  2128. int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
  2129. int log_rq_stride = srqc->logstride & 7;
  2130. int page_shift = (srqc->log_page_size & 0x3f) + 12;
  2131. if (log_srq_size + log_rq_stride + 4 < page_shift)
  2132. return 1;
  2133. return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
  2134. }
  2135. int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  2136. struct mlx4_vhcr *vhcr,
  2137. struct mlx4_cmd_mailbox *inbox,
  2138. struct mlx4_cmd_mailbox *outbox,
  2139. struct mlx4_cmd_info *cmd)
  2140. {
  2141. int err;
  2142. int srqn = vhcr->in_modifier;
  2143. struct res_mtt *mtt;
  2144. struct res_srq *srq;
  2145. struct mlx4_srq_context *srqc = inbox->buf;
  2146. int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
  2147. if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
  2148. return -EINVAL;
  2149. err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
  2150. if (err)
  2151. return err;
  2152. err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
  2153. if (err)
  2154. goto ex_abort;
  2155. err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
  2156. mtt);
  2157. if (err)
  2158. goto ex_put_mtt;
  2159. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2160. if (err)
  2161. goto ex_put_mtt;
  2162. atomic_inc(&mtt->ref_count);
  2163. srq->mtt = mtt;
  2164. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2165. res_end_move(dev, slave, RES_SRQ, srqn);
  2166. return 0;
  2167. ex_put_mtt:
  2168. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  2169. ex_abort:
  2170. res_abort_move(dev, slave, RES_SRQ, srqn);
  2171. return err;
  2172. }
  2173. int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  2174. struct mlx4_vhcr *vhcr,
  2175. struct mlx4_cmd_mailbox *inbox,
  2176. struct mlx4_cmd_mailbox *outbox,
  2177. struct mlx4_cmd_info *cmd)
  2178. {
  2179. int err;
  2180. int srqn = vhcr->in_modifier;
  2181. struct res_srq *srq;
  2182. err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
  2183. if (err)
  2184. return err;
  2185. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2186. if (err)
  2187. goto ex_abort;
  2188. atomic_dec(&srq->mtt->ref_count);
  2189. if (srq->cq)
  2190. atomic_dec(&srq->cq->ref_count);
  2191. res_end_move(dev, slave, RES_SRQ, srqn);
  2192. return 0;
  2193. ex_abort:
  2194. res_abort_move(dev, slave, RES_SRQ, srqn);
  2195. return err;
  2196. }
  2197. int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  2198. struct mlx4_vhcr *vhcr,
  2199. struct mlx4_cmd_mailbox *inbox,
  2200. struct mlx4_cmd_mailbox *outbox,
  2201. struct mlx4_cmd_info *cmd)
  2202. {
  2203. int err;
  2204. int srqn = vhcr->in_modifier;
  2205. struct res_srq *srq;
  2206. err = get_res(dev, slave, srqn, RES_SRQ, &srq);
  2207. if (err)
  2208. return err;
  2209. if (srq->com.from_state != RES_SRQ_HW) {
  2210. err = -EBUSY;
  2211. goto out;
  2212. }
  2213. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2214. out:
  2215. put_res(dev, slave, srqn, RES_SRQ);
  2216. return err;
  2217. }
  2218. int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  2219. struct mlx4_vhcr *vhcr,
  2220. struct mlx4_cmd_mailbox *inbox,
  2221. struct mlx4_cmd_mailbox *outbox,
  2222. struct mlx4_cmd_info *cmd)
  2223. {
  2224. int err;
  2225. int srqn = vhcr->in_modifier;
  2226. struct res_srq *srq;
  2227. err = get_res(dev, slave, srqn, RES_SRQ, &srq);
  2228. if (err)
  2229. return err;
  2230. if (srq->com.from_state != RES_SRQ_HW) {
  2231. err = -EBUSY;
  2232. goto out;
  2233. }
  2234. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2235. out:
  2236. put_res(dev, slave, srqn, RES_SRQ);
  2237. return err;
  2238. }
  2239. int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
  2240. struct mlx4_vhcr *vhcr,
  2241. struct mlx4_cmd_mailbox *inbox,
  2242. struct mlx4_cmd_mailbox *outbox,
  2243. struct mlx4_cmd_info *cmd)
  2244. {
  2245. int err;
  2246. int qpn = vhcr->in_modifier & 0x7fffff;
  2247. struct res_qp *qp;
  2248. err = get_res(dev, slave, qpn, RES_QP, &qp);
  2249. if (err)
  2250. return err;
  2251. if (qp->com.from_state != RES_QP_HW) {
  2252. err = -EBUSY;
  2253. goto out;
  2254. }
  2255. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2256. out:
  2257. put_res(dev, slave, qpn, RES_QP);
  2258. return err;
  2259. }
  2260. int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
  2261. struct mlx4_vhcr *vhcr,
  2262. struct mlx4_cmd_mailbox *inbox,
  2263. struct mlx4_cmd_mailbox *outbox,
  2264. struct mlx4_cmd_info *cmd)
  2265. {
  2266. struct mlx4_qp_context *context = inbox->buf + 8;
  2267. adjust_proxy_tun_qkey(dev, vhcr, context);
  2268. update_pkey_index(dev, slave, inbox);
  2269. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2270. }
  2271. int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
  2272. struct mlx4_vhcr *vhcr,
  2273. struct mlx4_cmd_mailbox *inbox,
  2274. struct mlx4_cmd_mailbox *outbox,
  2275. struct mlx4_cmd_info *cmd)
  2276. {
  2277. int err;
  2278. struct mlx4_qp_context *qpc = inbox->buf + 8;
  2279. err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
  2280. if (err)
  2281. return err;
  2282. update_pkey_index(dev, slave, inbox);
  2283. update_gid(dev, inbox, (u8)slave);
  2284. adjust_proxy_tun_qkey(dev, vhcr, qpc);
  2285. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2286. }
  2287. int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
  2288. struct mlx4_vhcr *vhcr,
  2289. struct mlx4_cmd_mailbox *inbox,
  2290. struct mlx4_cmd_mailbox *outbox,
  2291. struct mlx4_cmd_info *cmd)
  2292. {
  2293. int err;
  2294. struct mlx4_qp_context *context = inbox->buf + 8;
  2295. err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
  2296. if (err)
  2297. return err;
  2298. update_pkey_index(dev, slave, inbox);
  2299. update_gid(dev, inbox, (u8)slave);
  2300. adjust_proxy_tun_qkey(dev, vhcr, context);
  2301. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2302. }
  2303. int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
  2304. struct mlx4_vhcr *vhcr,
  2305. struct mlx4_cmd_mailbox *inbox,
  2306. struct mlx4_cmd_mailbox *outbox,
  2307. struct mlx4_cmd_info *cmd)
  2308. {
  2309. int err;
  2310. struct mlx4_qp_context *context = inbox->buf + 8;
  2311. err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
  2312. if (err)
  2313. return err;
  2314. update_pkey_index(dev, slave, inbox);
  2315. update_gid(dev, inbox, (u8)slave);
  2316. adjust_proxy_tun_qkey(dev, vhcr, context);
  2317. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2318. }
  2319. int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
  2320. struct mlx4_vhcr *vhcr,
  2321. struct mlx4_cmd_mailbox *inbox,
  2322. struct mlx4_cmd_mailbox *outbox,
  2323. struct mlx4_cmd_info *cmd)
  2324. {
  2325. struct mlx4_qp_context *context = inbox->buf + 8;
  2326. adjust_proxy_tun_qkey(dev, vhcr, context);
  2327. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2328. }
  2329. int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
  2330. struct mlx4_vhcr *vhcr,
  2331. struct mlx4_cmd_mailbox *inbox,
  2332. struct mlx4_cmd_mailbox *outbox,
  2333. struct mlx4_cmd_info *cmd)
  2334. {
  2335. int err;
  2336. struct mlx4_qp_context *context = inbox->buf + 8;
  2337. err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
  2338. if (err)
  2339. return err;
  2340. adjust_proxy_tun_qkey(dev, vhcr, context);
  2341. update_gid(dev, inbox, (u8)slave);
  2342. update_pkey_index(dev, slave, inbox);
  2343. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2344. }
  2345. int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
  2346. struct mlx4_vhcr *vhcr,
  2347. struct mlx4_cmd_mailbox *inbox,
  2348. struct mlx4_cmd_mailbox *outbox,
  2349. struct mlx4_cmd_info *cmd)
  2350. {
  2351. int err;
  2352. struct mlx4_qp_context *context = inbox->buf + 8;
  2353. err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
  2354. if (err)
  2355. return err;
  2356. adjust_proxy_tun_qkey(dev, vhcr, context);
  2357. update_gid(dev, inbox, (u8)slave);
  2358. update_pkey_index(dev, slave, inbox);
  2359. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2360. }
  2361. int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
  2362. struct mlx4_vhcr *vhcr,
  2363. struct mlx4_cmd_mailbox *inbox,
  2364. struct mlx4_cmd_mailbox *outbox,
  2365. struct mlx4_cmd_info *cmd)
  2366. {
  2367. int err;
  2368. int qpn = vhcr->in_modifier & 0x7fffff;
  2369. struct res_qp *qp;
  2370. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
  2371. if (err)
  2372. return err;
  2373. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2374. if (err)
  2375. goto ex_abort;
  2376. atomic_dec(&qp->mtt->ref_count);
  2377. atomic_dec(&qp->rcq->ref_count);
  2378. atomic_dec(&qp->scq->ref_count);
  2379. if (qp->srq)
  2380. atomic_dec(&qp->srq->ref_count);
  2381. res_end_move(dev, slave, RES_QP, qpn);
  2382. return 0;
  2383. ex_abort:
  2384. res_abort_move(dev, slave, RES_QP, qpn);
  2385. return err;
  2386. }
  2387. static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
  2388. struct res_qp *rqp, u8 *gid)
  2389. {
  2390. struct res_gid *res;
  2391. list_for_each_entry(res, &rqp->mcg_list, list) {
  2392. if (!memcmp(res->gid, gid, 16))
  2393. return res;
  2394. }
  2395. return NULL;
  2396. }
  2397. static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
  2398. u8 *gid, enum mlx4_protocol prot,
  2399. enum mlx4_steer_type steer)
  2400. {
  2401. struct res_gid *res;
  2402. int err;
  2403. res = kzalloc(sizeof *res, GFP_KERNEL);
  2404. if (!res)
  2405. return -ENOMEM;
  2406. spin_lock_irq(&rqp->mcg_spl);
  2407. if (find_gid(dev, slave, rqp, gid)) {
  2408. kfree(res);
  2409. err = -EEXIST;
  2410. } else {
  2411. memcpy(res->gid, gid, 16);
  2412. res->prot = prot;
  2413. res->steer = steer;
  2414. list_add_tail(&res->list, &rqp->mcg_list);
  2415. err = 0;
  2416. }
  2417. spin_unlock_irq(&rqp->mcg_spl);
  2418. return err;
  2419. }
  2420. static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
  2421. u8 *gid, enum mlx4_protocol prot,
  2422. enum mlx4_steer_type steer)
  2423. {
  2424. struct res_gid *res;
  2425. int err;
  2426. spin_lock_irq(&rqp->mcg_spl);
  2427. res = find_gid(dev, slave, rqp, gid);
  2428. if (!res || res->prot != prot || res->steer != steer)
  2429. err = -EINVAL;
  2430. else {
  2431. list_del(&res->list);
  2432. kfree(res);
  2433. err = 0;
  2434. }
  2435. spin_unlock_irq(&rqp->mcg_spl);
  2436. return err;
  2437. }
  2438. int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
  2439. struct mlx4_vhcr *vhcr,
  2440. struct mlx4_cmd_mailbox *inbox,
  2441. struct mlx4_cmd_mailbox *outbox,
  2442. struct mlx4_cmd_info *cmd)
  2443. {
  2444. struct mlx4_qp qp; /* dummy for calling attach/detach */
  2445. u8 *gid = inbox->buf;
  2446. enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
  2447. int err;
  2448. int qpn;
  2449. struct res_qp *rqp;
  2450. int attach = vhcr->op_modifier;
  2451. int block_loopback = vhcr->in_modifier >> 31;
  2452. u8 steer_type_mask = 2;
  2453. enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
  2454. qpn = vhcr->in_modifier & 0xffffff;
  2455. err = get_res(dev, slave, qpn, RES_QP, &rqp);
  2456. if (err)
  2457. return err;
  2458. qp.qpn = qpn;
  2459. if (attach) {
  2460. err = add_mcg_res(dev, slave, rqp, gid, prot, type);
  2461. if (err)
  2462. goto ex_put;
  2463. err = mlx4_qp_attach_common(dev, &qp, gid,
  2464. block_loopback, prot, type);
  2465. if (err)
  2466. goto ex_rem;
  2467. } else {
  2468. err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
  2469. if (err)
  2470. goto ex_put;
  2471. err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
  2472. }
  2473. put_res(dev, slave, qpn, RES_QP);
  2474. return 0;
  2475. ex_rem:
  2476. /* ignore error return below, already in error */
  2477. (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
  2478. ex_put:
  2479. put_res(dev, slave, qpn, RES_QP);
  2480. return err;
  2481. }
  2482. /*
  2483. * MAC validation for Flow Steering rules.
  2484. * VF can attach rules only with a mac address which is assigned to it.
  2485. */
  2486. static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
  2487. struct list_head *rlist)
  2488. {
  2489. struct mac_res *res, *tmp;
  2490. __be64 be_mac;
  2491. /* make sure it isn't multicast or broadcast mac*/
  2492. if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
  2493. !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
  2494. list_for_each_entry_safe(res, tmp, rlist, list) {
  2495. be_mac = cpu_to_be64(res->mac << 16);
  2496. if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
  2497. return 0;
  2498. }
  2499. pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
  2500. eth_header->eth.dst_mac, slave);
  2501. return -EINVAL;
  2502. }
  2503. return 0;
  2504. }
  2505. /*
  2506. * In case of missing eth header, append eth header with a MAC address
  2507. * assigned to the VF.
  2508. */
  2509. static int add_eth_header(struct mlx4_dev *dev, int slave,
  2510. struct mlx4_cmd_mailbox *inbox,
  2511. struct list_head *rlist, int header_id)
  2512. {
  2513. struct mac_res *res, *tmp;
  2514. u8 port;
  2515. struct mlx4_net_trans_rule_hw_ctrl *ctrl;
  2516. struct mlx4_net_trans_rule_hw_eth *eth_header;
  2517. struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
  2518. struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
  2519. __be64 be_mac = 0;
  2520. __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
  2521. ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
  2522. port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
  2523. eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
  2524. /* Clear a space in the inbox for eth header */
  2525. switch (header_id) {
  2526. case MLX4_NET_TRANS_RULE_ID_IPV4:
  2527. ip_header =
  2528. (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
  2529. memmove(ip_header, eth_header,
  2530. sizeof(*ip_header) + sizeof(*l4_header));
  2531. break;
  2532. case MLX4_NET_TRANS_RULE_ID_TCP:
  2533. case MLX4_NET_TRANS_RULE_ID_UDP:
  2534. l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
  2535. (eth_header + 1);
  2536. memmove(l4_header, eth_header, sizeof(*l4_header));
  2537. break;
  2538. default:
  2539. return -EINVAL;
  2540. }
  2541. list_for_each_entry_safe(res, tmp, rlist, list) {
  2542. if (port == res->port) {
  2543. be_mac = cpu_to_be64(res->mac << 16);
  2544. break;
  2545. }
  2546. }
  2547. if (!be_mac) {
  2548. pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
  2549. port);
  2550. return -EINVAL;
  2551. }
  2552. memset(eth_header, 0, sizeof(*eth_header));
  2553. eth_header->size = sizeof(*eth_header) >> 2;
  2554. eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
  2555. memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
  2556. memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
  2557. return 0;
  2558. }
  2559. int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
  2560. struct mlx4_vhcr *vhcr,
  2561. struct mlx4_cmd_mailbox *inbox,
  2562. struct mlx4_cmd_mailbox *outbox,
  2563. struct mlx4_cmd_info *cmd)
  2564. {
  2565. struct mlx4_priv *priv = mlx4_priv(dev);
  2566. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2567. struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
  2568. int err;
  2569. struct mlx4_net_trans_rule_hw_ctrl *ctrl;
  2570. struct _rule_hw *rule_header;
  2571. int header_id;
  2572. if (dev->caps.steering_mode !=
  2573. MLX4_STEERING_MODE_DEVICE_MANAGED)
  2574. return -EOPNOTSUPP;
  2575. ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
  2576. rule_header = (struct _rule_hw *)(ctrl + 1);
  2577. header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
  2578. switch (header_id) {
  2579. case MLX4_NET_TRANS_RULE_ID_ETH:
  2580. if (validate_eth_header_mac(slave, rule_header, rlist))
  2581. return -EINVAL;
  2582. break;
  2583. case MLX4_NET_TRANS_RULE_ID_IB:
  2584. break;
  2585. case MLX4_NET_TRANS_RULE_ID_IPV4:
  2586. case MLX4_NET_TRANS_RULE_ID_TCP:
  2587. case MLX4_NET_TRANS_RULE_ID_UDP:
  2588. pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
  2589. if (add_eth_header(dev, slave, inbox, rlist, header_id))
  2590. return -EINVAL;
  2591. vhcr->in_modifier +=
  2592. sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
  2593. break;
  2594. default:
  2595. pr_err("Corrupted mailbox.\n");
  2596. return -EINVAL;
  2597. }
  2598. err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
  2599. vhcr->in_modifier, 0,
  2600. MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  2601. MLX4_CMD_NATIVE);
  2602. if (err)
  2603. return err;
  2604. err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
  2605. if (err) {
  2606. mlx4_err(dev, "Fail to add flow steering resources.\n ");
  2607. /* detach rule*/
  2608. mlx4_cmd(dev, vhcr->out_param, 0, 0,
  2609. MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  2610. MLX4_CMD_NATIVE);
  2611. }
  2612. return err;
  2613. }
  2614. int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
  2615. struct mlx4_vhcr *vhcr,
  2616. struct mlx4_cmd_mailbox *inbox,
  2617. struct mlx4_cmd_mailbox *outbox,
  2618. struct mlx4_cmd_info *cmd)
  2619. {
  2620. int err;
  2621. if (dev->caps.steering_mode !=
  2622. MLX4_STEERING_MODE_DEVICE_MANAGED)
  2623. return -EOPNOTSUPP;
  2624. err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
  2625. if (err) {
  2626. mlx4_err(dev, "Fail to remove flow steering resources.\n ");
  2627. return err;
  2628. }
  2629. err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
  2630. MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  2631. MLX4_CMD_NATIVE);
  2632. return err;
  2633. }
  2634. enum {
  2635. BUSY_MAX_RETRIES = 10
  2636. };
  2637. int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
  2638. struct mlx4_vhcr *vhcr,
  2639. struct mlx4_cmd_mailbox *inbox,
  2640. struct mlx4_cmd_mailbox *outbox,
  2641. struct mlx4_cmd_info *cmd)
  2642. {
  2643. int err;
  2644. int index = vhcr->in_modifier & 0xffff;
  2645. err = get_res(dev, slave, index, RES_COUNTER, NULL);
  2646. if (err)
  2647. return err;
  2648. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2649. put_res(dev, slave, index, RES_COUNTER);
  2650. return err;
  2651. }
  2652. static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
  2653. {
  2654. struct res_gid *rgid;
  2655. struct res_gid *tmp;
  2656. struct mlx4_qp qp; /* dummy for calling attach/detach */
  2657. list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
  2658. qp.qpn = rqp->local_qpn;
  2659. (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
  2660. rgid->steer);
  2661. list_del(&rgid->list);
  2662. kfree(rgid);
  2663. }
  2664. }
  2665. static int _move_all_busy(struct mlx4_dev *dev, int slave,
  2666. enum mlx4_resource type, int print)
  2667. {
  2668. struct mlx4_priv *priv = mlx4_priv(dev);
  2669. struct mlx4_resource_tracker *tracker =
  2670. &priv->mfunc.master.res_tracker;
  2671. struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
  2672. struct res_common *r;
  2673. struct res_common *tmp;
  2674. int busy;
  2675. busy = 0;
  2676. spin_lock_irq(mlx4_tlock(dev));
  2677. list_for_each_entry_safe(r, tmp, rlist, list) {
  2678. if (r->owner == slave) {
  2679. if (!r->removing) {
  2680. if (r->state == RES_ANY_BUSY) {
  2681. if (print)
  2682. mlx4_dbg(dev,
  2683. "%s id 0x%llx is busy\n",
  2684. ResourceType(type),
  2685. r->res_id);
  2686. ++busy;
  2687. } else {
  2688. r->from_state = r->state;
  2689. r->state = RES_ANY_BUSY;
  2690. r->removing = 1;
  2691. }
  2692. }
  2693. }
  2694. }
  2695. spin_unlock_irq(mlx4_tlock(dev));
  2696. return busy;
  2697. }
  2698. static int move_all_busy(struct mlx4_dev *dev, int slave,
  2699. enum mlx4_resource type)
  2700. {
  2701. unsigned long begin;
  2702. int busy;
  2703. begin = jiffies;
  2704. do {
  2705. busy = _move_all_busy(dev, slave, type, 0);
  2706. if (time_after(jiffies, begin + 5 * HZ))
  2707. break;
  2708. if (busy)
  2709. cond_resched();
  2710. } while (busy);
  2711. if (busy)
  2712. busy = _move_all_busy(dev, slave, type, 1);
  2713. return busy;
  2714. }
  2715. static void rem_slave_qps(struct mlx4_dev *dev, int slave)
  2716. {
  2717. struct mlx4_priv *priv = mlx4_priv(dev);
  2718. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2719. struct list_head *qp_list =
  2720. &tracker->slave_list[slave].res_list[RES_QP];
  2721. struct res_qp *qp;
  2722. struct res_qp *tmp;
  2723. int state;
  2724. u64 in_param;
  2725. int qpn;
  2726. int err;
  2727. err = move_all_busy(dev, slave, RES_QP);
  2728. if (err)
  2729. mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
  2730. "for slave %d\n", slave);
  2731. spin_lock_irq(mlx4_tlock(dev));
  2732. list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
  2733. spin_unlock_irq(mlx4_tlock(dev));
  2734. if (qp->com.owner == slave) {
  2735. qpn = qp->com.res_id;
  2736. detach_qp(dev, slave, qp);
  2737. state = qp->com.from_state;
  2738. while (state != 0) {
  2739. switch (state) {
  2740. case RES_QP_RESERVED:
  2741. spin_lock_irq(mlx4_tlock(dev));
  2742. rb_erase(&qp->com.node,
  2743. &tracker->res_tree[RES_QP]);
  2744. list_del(&qp->com.list);
  2745. spin_unlock_irq(mlx4_tlock(dev));
  2746. kfree(qp);
  2747. state = 0;
  2748. break;
  2749. case RES_QP_MAPPED:
  2750. if (!valid_reserved(dev, slave, qpn))
  2751. __mlx4_qp_free_icm(dev, qpn);
  2752. state = RES_QP_RESERVED;
  2753. break;
  2754. case RES_QP_HW:
  2755. in_param = slave;
  2756. err = mlx4_cmd(dev, in_param,
  2757. qp->local_qpn, 2,
  2758. MLX4_CMD_2RST_QP,
  2759. MLX4_CMD_TIME_CLASS_A,
  2760. MLX4_CMD_NATIVE);
  2761. if (err)
  2762. mlx4_dbg(dev, "rem_slave_qps: failed"
  2763. " to move slave %d qpn %d to"
  2764. " reset\n", slave,
  2765. qp->local_qpn);
  2766. atomic_dec(&qp->rcq->ref_count);
  2767. atomic_dec(&qp->scq->ref_count);
  2768. atomic_dec(&qp->mtt->ref_count);
  2769. if (qp->srq)
  2770. atomic_dec(&qp->srq->ref_count);
  2771. state = RES_QP_MAPPED;
  2772. break;
  2773. default:
  2774. state = 0;
  2775. }
  2776. }
  2777. }
  2778. spin_lock_irq(mlx4_tlock(dev));
  2779. }
  2780. spin_unlock_irq(mlx4_tlock(dev));
  2781. }
  2782. static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
  2783. {
  2784. struct mlx4_priv *priv = mlx4_priv(dev);
  2785. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2786. struct list_head *srq_list =
  2787. &tracker->slave_list[slave].res_list[RES_SRQ];
  2788. struct res_srq *srq;
  2789. struct res_srq *tmp;
  2790. int state;
  2791. u64 in_param;
  2792. LIST_HEAD(tlist);
  2793. int srqn;
  2794. int err;
  2795. err = move_all_busy(dev, slave, RES_SRQ);
  2796. if (err)
  2797. mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
  2798. "busy for slave %d\n", slave);
  2799. spin_lock_irq(mlx4_tlock(dev));
  2800. list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
  2801. spin_unlock_irq(mlx4_tlock(dev));
  2802. if (srq->com.owner == slave) {
  2803. srqn = srq->com.res_id;
  2804. state = srq->com.from_state;
  2805. while (state != 0) {
  2806. switch (state) {
  2807. case RES_SRQ_ALLOCATED:
  2808. __mlx4_srq_free_icm(dev, srqn);
  2809. spin_lock_irq(mlx4_tlock(dev));
  2810. rb_erase(&srq->com.node,
  2811. &tracker->res_tree[RES_SRQ]);
  2812. list_del(&srq->com.list);
  2813. spin_unlock_irq(mlx4_tlock(dev));
  2814. kfree(srq);
  2815. state = 0;
  2816. break;
  2817. case RES_SRQ_HW:
  2818. in_param = slave;
  2819. err = mlx4_cmd(dev, in_param, srqn, 1,
  2820. MLX4_CMD_HW2SW_SRQ,
  2821. MLX4_CMD_TIME_CLASS_A,
  2822. MLX4_CMD_NATIVE);
  2823. if (err)
  2824. mlx4_dbg(dev, "rem_slave_srqs: failed"
  2825. " to move slave %d srq %d to"
  2826. " SW ownership\n",
  2827. slave, srqn);
  2828. atomic_dec(&srq->mtt->ref_count);
  2829. if (srq->cq)
  2830. atomic_dec(&srq->cq->ref_count);
  2831. state = RES_SRQ_ALLOCATED;
  2832. break;
  2833. default:
  2834. state = 0;
  2835. }
  2836. }
  2837. }
  2838. spin_lock_irq(mlx4_tlock(dev));
  2839. }
  2840. spin_unlock_irq(mlx4_tlock(dev));
  2841. }
  2842. static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
  2843. {
  2844. struct mlx4_priv *priv = mlx4_priv(dev);
  2845. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2846. struct list_head *cq_list =
  2847. &tracker->slave_list[slave].res_list[RES_CQ];
  2848. struct res_cq *cq;
  2849. struct res_cq *tmp;
  2850. int state;
  2851. u64 in_param;
  2852. LIST_HEAD(tlist);
  2853. int cqn;
  2854. int err;
  2855. err = move_all_busy(dev, slave, RES_CQ);
  2856. if (err)
  2857. mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
  2858. "busy for slave %d\n", slave);
  2859. spin_lock_irq(mlx4_tlock(dev));
  2860. list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
  2861. spin_unlock_irq(mlx4_tlock(dev));
  2862. if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
  2863. cqn = cq->com.res_id;
  2864. state = cq->com.from_state;
  2865. while (state != 0) {
  2866. switch (state) {
  2867. case RES_CQ_ALLOCATED:
  2868. __mlx4_cq_free_icm(dev, cqn);
  2869. spin_lock_irq(mlx4_tlock(dev));
  2870. rb_erase(&cq->com.node,
  2871. &tracker->res_tree[RES_CQ]);
  2872. list_del(&cq->com.list);
  2873. spin_unlock_irq(mlx4_tlock(dev));
  2874. kfree(cq);
  2875. state = 0;
  2876. break;
  2877. case RES_CQ_HW:
  2878. in_param = slave;
  2879. err = mlx4_cmd(dev, in_param, cqn, 1,
  2880. MLX4_CMD_HW2SW_CQ,
  2881. MLX4_CMD_TIME_CLASS_A,
  2882. MLX4_CMD_NATIVE);
  2883. if (err)
  2884. mlx4_dbg(dev, "rem_slave_cqs: failed"
  2885. " to move slave %d cq %d to"
  2886. " SW ownership\n",
  2887. slave, cqn);
  2888. atomic_dec(&cq->mtt->ref_count);
  2889. state = RES_CQ_ALLOCATED;
  2890. break;
  2891. default:
  2892. state = 0;
  2893. }
  2894. }
  2895. }
  2896. spin_lock_irq(mlx4_tlock(dev));
  2897. }
  2898. spin_unlock_irq(mlx4_tlock(dev));
  2899. }
  2900. static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
  2901. {
  2902. struct mlx4_priv *priv = mlx4_priv(dev);
  2903. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2904. struct list_head *mpt_list =
  2905. &tracker->slave_list[slave].res_list[RES_MPT];
  2906. struct res_mpt *mpt;
  2907. struct res_mpt *tmp;
  2908. int state;
  2909. u64 in_param;
  2910. LIST_HEAD(tlist);
  2911. int mptn;
  2912. int err;
  2913. err = move_all_busy(dev, slave, RES_MPT);
  2914. if (err)
  2915. mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
  2916. "busy for slave %d\n", slave);
  2917. spin_lock_irq(mlx4_tlock(dev));
  2918. list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
  2919. spin_unlock_irq(mlx4_tlock(dev));
  2920. if (mpt->com.owner == slave) {
  2921. mptn = mpt->com.res_id;
  2922. state = mpt->com.from_state;
  2923. while (state != 0) {
  2924. switch (state) {
  2925. case RES_MPT_RESERVED:
  2926. __mlx4_mr_release(dev, mpt->key);
  2927. spin_lock_irq(mlx4_tlock(dev));
  2928. rb_erase(&mpt->com.node,
  2929. &tracker->res_tree[RES_MPT]);
  2930. list_del(&mpt->com.list);
  2931. spin_unlock_irq(mlx4_tlock(dev));
  2932. kfree(mpt);
  2933. state = 0;
  2934. break;
  2935. case RES_MPT_MAPPED:
  2936. __mlx4_mr_free_icm(dev, mpt->key);
  2937. state = RES_MPT_RESERVED;
  2938. break;
  2939. case RES_MPT_HW:
  2940. in_param = slave;
  2941. err = mlx4_cmd(dev, in_param, mptn, 0,
  2942. MLX4_CMD_HW2SW_MPT,
  2943. MLX4_CMD_TIME_CLASS_A,
  2944. MLX4_CMD_NATIVE);
  2945. if (err)
  2946. mlx4_dbg(dev, "rem_slave_mrs: failed"
  2947. " to move slave %d mpt %d to"
  2948. " SW ownership\n",
  2949. slave, mptn);
  2950. if (mpt->mtt)
  2951. atomic_dec(&mpt->mtt->ref_count);
  2952. state = RES_MPT_MAPPED;
  2953. break;
  2954. default:
  2955. state = 0;
  2956. }
  2957. }
  2958. }
  2959. spin_lock_irq(mlx4_tlock(dev));
  2960. }
  2961. spin_unlock_irq(mlx4_tlock(dev));
  2962. }
  2963. static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
  2964. {
  2965. struct mlx4_priv *priv = mlx4_priv(dev);
  2966. struct mlx4_resource_tracker *tracker =
  2967. &priv->mfunc.master.res_tracker;
  2968. struct list_head *mtt_list =
  2969. &tracker->slave_list[slave].res_list[RES_MTT];
  2970. struct res_mtt *mtt;
  2971. struct res_mtt *tmp;
  2972. int state;
  2973. LIST_HEAD(tlist);
  2974. int base;
  2975. int err;
  2976. err = move_all_busy(dev, slave, RES_MTT);
  2977. if (err)
  2978. mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
  2979. "busy for slave %d\n", slave);
  2980. spin_lock_irq(mlx4_tlock(dev));
  2981. list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
  2982. spin_unlock_irq(mlx4_tlock(dev));
  2983. if (mtt->com.owner == slave) {
  2984. base = mtt->com.res_id;
  2985. state = mtt->com.from_state;
  2986. while (state != 0) {
  2987. switch (state) {
  2988. case RES_MTT_ALLOCATED:
  2989. __mlx4_free_mtt_range(dev, base,
  2990. mtt->order);
  2991. spin_lock_irq(mlx4_tlock(dev));
  2992. rb_erase(&mtt->com.node,
  2993. &tracker->res_tree[RES_MTT]);
  2994. list_del(&mtt->com.list);
  2995. spin_unlock_irq(mlx4_tlock(dev));
  2996. kfree(mtt);
  2997. state = 0;
  2998. break;
  2999. default:
  3000. state = 0;
  3001. }
  3002. }
  3003. }
  3004. spin_lock_irq(mlx4_tlock(dev));
  3005. }
  3006. spin_unlock_irq(mlx4_tlock(dev));
  3007. }
  3008. static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
  3009. {
  3010. struct mlx4_priv *priv = mlx4_priv(dev);
  3011. struct mlx4_resource_tracker *tracker =
  3012. &priv->mfunc.master.res_tracker;
  3013. struct list_head *fs_rule_list =
  3014. &tracker->slave_list[slave].res_list[RES_FS_RULE];
  3015. struct res_fs_rule *fs_rule;
  3016. struct res_fs_rule *tmp;
  3017. int state;
  3018. u64 base;
  3019. int err;
  3020. err = move_all_busy(dev, slave, RES_FS_RULE);
  3021. if (err)
  3022. mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
  3023. slave);
  3024. spin_lock_irq(mlx4_tlock(dev));
  3025. list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
  3026. spin_unlock_irq(mlx4_tlock(dev));
  3027. if (fs_rule->com.owner == slave) {
  3028. base = fs_rule->com.res_id;
  3029. state = fs_rule->com.from_state;
  3030. while (state != 0) {
  3031. switch (state) {
  3032. case RES_FS_RULE_ALLOCATED:
  3033. /* detach rule */
  3034. err = mlx4_cmd(dev, base, 0, 0,
  3035. MLX4_QP_FLOW_STEERING_DETACH,
  3036. MLX4_CMD_TIME_CLASS_A,
  3037. MLX4_CMD_NATIVE);
  3038. spin_lock_irq(mlx4_tlock(dev));
  3039. rb_erase(&fs_rule->com.node,
  3040. &tracker->res_tree[RES_FS_RULE]);
  3041. list_del(&fs_rule->com.list);
  3042. spin_unlock_irq(mlx4_tlock(dev));
  3043. kfree(fs_rule);
  3044. state = 0;
  3045. break;
  3046. default:
  3047. state = 0;
  3048. }
  3049. }
  3050. }
  3051. spin_lock_irq(mlx4_tlock(dev));
  3052. }
  3053. spin_unlock_irq(mlx4_tlock(dev));
  3054. }
  3055. static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
  3056. {
  3057. struct mlx4_priv *priv = mlx4_priv(dev);
  3058. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  3059. struct list_head *eq_list =
  3060. &tracker->slave_list[slave].res_list[RES_EQ];
  3061. struct res_eq *eq;
  3062. struct res_eq *tmp;
  3063. int err;
  3064. int state;
  3065. LIST_HEAD(tlist);
  3066. int eqn;
  3067. struct mlx4_cmd_mailbox *mailbox;
  3068. err = move_all_busy(dev, slave, RES_EQ);
  3069. if (err)
  3070. mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
  3071. "busy for slave %d\n", slave);
  3072. spin_lock_irq(mlx4_tlock(dev));
  3073. list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
  3074. spin_unlock_irq(mlx4_tlock(dev));
  3075. if (eq->com.owner == slave) {
  3076. eqn = eq->com.res_id;
  3077. state = eq->com.from_state;
  3078. while (state != 0) {
  3079. switch (state) {
  3080. case RES_EQ_RESERVED:
  3081. spin_lock_irq(mlx4_tlock(dev));
  3082. rb_erase(&eq->com.node,
  3083. &tracker->res_tree[RES_EQ]);
  3084. list_del(&eq->com.list);
  3085. spin_unlock_irq(mlx4_tlock(dev));
  3086. kfree(eq);
  3087. state = 0;
  3088. break;
  3089. case RES_EQ_HW:
  3090. mailbox = mlx4_alloc_cmd_mailbox(dev);
  3091. if (IS_ERR(mailbox)) {
  3092. cond_resched();
  3093. continue;
  3094. }
  3095. err = mlx4_cmd_box(dev, slave, 0,
  3096. eqn & 0xff, 0,
  3097. MLX4_CMD_HW2SW_EQ,
  3098. MLX4_CMD_TIME_CLASS_A,
  3099. MLX4_CMD_NATIVE);
  3100. if (err)
  3101. mlx4_dbg(dev, "rem_slave_eqs: failed"
  3102. " to move slave %d eqs %d to"
  3103. " SW ownership\n", slave, eqn);
  3104. mlx4_free_cmd_mailbox(dev, mailbox);
  3105. atomic_dec(&eq->mtt->ref_count);
  3106. state = RES_EQ_RESERVED;
  3107. break;
  3108. default:
  3109. state = 0;
  3110. }
  3111. }
  3112. }
  3113. spin_lock_irq(mlx4_tlock(dev));
  3114. }
  3115. spin_unlock_irq(mlx4_tlock(dev));
  3116. }
  3117. static void rem_slave_counters(struct mlx4_dev *dev, int slave)
  3118. {
  3119. struct mlx4_priv *priv = mlx4_priv(dev);
  3120. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  3121. struct list_head *counter_list =
  3122. &tracker->slave_list[slave].res_list[RES_COUNTER];
  3123. struct res_counter *counter;
  3124. struct res_counter *tmp;
  3125. int err;
  3126. int index;
  3127. err = move_all_busy(dev, slave, RES_COUNTER);
  3128. if (err)
  3129. mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
  3130. "busy for slave %d\n", slave);
  3131. spin_lock_irq(mlx4_tlock(dev));
  3132. list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
  3133. if (counter->com.owner == slave) {
  3134. index = counter->com.res_id;
  3135. rb_erase(&counter->com.node,
  3136. &tracker->res_tree[RES_COUNTER]);
  3137. list_del(&counter->com.list);
  3138. kfree(counter);
  3139. __mlx4_counter_free(dev, index);
  3140. }
  3141. }
  3142. spin_unlock_irq(mlx4_tlock(dev));
  3143. }
  3144. static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
  3145. {
  3146. struct mlx4_priv *priv = mlx4_priv(dev);
  3147. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  3148. struct list_head *xrcdn_list =
  3149. &tracker->slave_list[slave].res_list[RES_XRCD];
  3150. struct res_xrcdn *xrcd;
  3151. struct res_xrcdn *tmp;
  3152. int err;
  3153. int xrcdn;
  3154. err = move_all_busy(dev, slave, RES_XRCD);
  3155. if (err)
  3156. mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
  3157. "busy for slave %d\n", slave);
  3158. spin_lock_irq(mlx4_tlock(dev));
  3159. list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
  3160. if (xrcd->com.owner == slave) {
  3161. xrcdn = xrcd->com.res_id;
  3162. rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
  3163. list_del(&xrcd->com.list);
  3164. kfree(xrcd);
  3165. __mlx4_xrcd_free(dev, xrcdn);
  3166. }
  3167. }
  3168. spin_unlock_irq(mlx4_tlock(dev));
  3169. }
  3170. void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
  3171. {
  3172. struct mlx4_priv *priv = mlx4_priv(dev);
  3173. mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
  3174. /*VLAN*/
  3175. rem_slave_macs(dev, slave);
  3176. rem_slave_qps(dev, slave);
  3177. rem_slave_srqs(dev, slave);
  3178. rem_slave_cqs(dev, slave);
  3179. rem_slave_mrs(dev, slave);
  3180. rem_slave_eqs(dev, slave);
  3181. rem_slave_mtts(dev, slave);
  3182. rem_slave_counters(dev, slave);
  3183. rem_slave_xrcdns(dev, slave);
  3184. rem_slave_fs_rule(dev, slave);
  3185. mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
  3186. }