resource_tracker.c 68 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
  4. * All rights reserved.
  5. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/sched.h>
  36. #include <linux/pci.h>
  37. #include <linux/errno.h>
  38. #include <linux/kernel.h>
  39. #include <linux/io.h>
  40. #include <linux/mlx4/cmd.h>
  41. #include <linux/mlx4/qp.h>
  42. #include "mlx4.h"
  43. #include "fw.h"
  44. #define MLX4_MAC_VALID (1ull << 63)
  45. #define MLX4_MAC_MASK 0x7fffffffffffffffULL
  46. #define ETH_ALEN 6
  47. struct mac_res {
  48. struct list_head list;
  49. u64 mac;
  50. u8 port;
  51. };
  52. struct res_common {
  53. struct list_head list;
  54. u32 res_id;
  55. int owner;
  56. int state;
  57. int from_state;
  58. int to_state;
  59. int removing;
  60. };
  61. enum {
  62. RES_ANY_BUSY = 1
  63. };
  64. struct res_gid {
  65. struct list_head list;
  66. u8 gid[16];
  67. enum mlx4_protocol prot;
  68. };
  69. enum res_qp_states {
  70. RES_QP_BUSY = RES_ANY_BUSY,
  71. /* QP number was allocated */
  72. RES_QP_RESERVED,
  73. /* ICM memory for QP context was mapped */
  74. RES_QP_MAPPED,
  75. /* QP is in hw ownership */
  76. RES_QP_HW
  77. };
  78. static inline const char *qp_states_str(enum res_qp_states state)
  79. {
  80. switch (state) {
  81. case RES_QP_BUSY: return "RES_QP_BUSY";
  82. case RES_QP_RESERVED: return "RES_QP_RESERVED";
  83. case RES_QP_MAPPED: return "RES_QP_MAPPED";
  84. case RES_QP_HW: return "RES_QP_HW";
  85. default: return "Unknown";
  86. }
  87. }
  88. struct res_qp {
  89. struct res_common com;
  90. struct res_mtt *mtt;
  91. struct res_cq *rcq;
  92. struct res_cq *scq;
  93. struct res_srq *srq;
  94. struct list_head mcg_list;
  95. spinlock_t mcg_spl;
  96. int local_qpn;
  97. };
  98. enum res_mtt_states {
  99. RES_MTT_BUSY = RES_ANY_BUSY,
  100. RES_MTT_ALLOCATED,
  101. };
  102. static inline const char *mtt_states_str(enum res_mtt_states state)
  103. {
  104. switch (state) {
  105. case RES_MTT_BUSY: return "RES_MTT_BUSY";
  106. case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
  107. default: return "Unknown";
  108. }
  109. }
  110. struct res_mtt {
  111. struct res_common com;
  112. int order;
  113. atomic_t ref_count;
  114. };
  115. enum res_mpt_states {
  116. RES_MPT_BUSY = RES_ANY_BUSY,
  117. RES_MPT_RESERVED,
  118. RES_MPT_MAPPED,
  119. RES_MPT_HW,
  120. };
  121. struct res_mpt {
  122. struct res_common com;
  123. struct res_mtt *mtt;
  124. int key;
  125. };
  126. enum res_eq_states {
  127. RES_EQ_BUSY = RES_ANY_BUSY,
  128. RES_EQ_RESERVED,
  129. RES_EQ_HW,
  130. };
  131. struct res_eq {
  132. struct res_common com;
  133. struct res_mtt *mtt;
  134. };
  135. enum res_cq_states {
  136. RES_CQ_BUSY = RES_ANY_BUSY,
  137. RES_CQ_ALLOCATED,
  138. RES_CQ_HW,
  139. };
  140. struct res_cq {
  141. struct res_common com;
  142. struct res_mtt *mtt;
  143. atomic_t ref_count;
  144. };
  145. enum res_srq_states {
  146. RES_SRQ_BUSY = RES_ANY_BUSY,
  147. RES_SRQ_ALLOCATED,
  148. RES_SRQ_HW,
  149. };
  150. static inline const char *srq_states_str(enum res_srq_states state)
  151. {
  152. switch (state) {
  153. case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
  154. case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
  155. case RES_SRQ_HW: return "RES_SRQ_HW";
  156. default: return "Unknown";
  157. }
  158. }
  159. struct res_srq {
  160. struct res_common com;
  161. struct res_mtt *mtt;
  162. struct res_cq *cq;
  163. atomic_t ref_count;
  164. };
  165. enum res_counter_states {
  166. RES_COUNTER_BUSY = RES_ANY_BUSY,
  167. RES_COUNTER_ALLOCATED,
  168. };
  169. static inline const char *counter_states_str(enum res_counter_states state)
  170. {
  171. switch (state) {
  172. case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
  173. case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
  174. default: return "Unknown";
  175. }
  176. }
  177. struct res_counter {
  178. struct res_common com;
  179. int port;
  180. };
  181. /* For Debug uses */
  182. static const char *ResourceType(enum mlx4_resource rt)
  183. {
  184. switch (rt) {
  185. case RES_QP: return "RES_QP";
  186. case RES_CQ: return "RES_CQ";
  187. case RES_SRQ: return "RES_SRQ";
  188. case RES_MPT: return "RES_MPT";
  189. case RES_MTT: return "RES_MTT";
  190. case RES_MAC: return "RES_MAC";
  191. case RES_EQ: return "RES_EQ";
  192. case RES_COUNTER: return "RES_COUNTER";
  193. default: return "Unknown resource type !!!";
  194. };
  195. }
  196. int mlx4_init_resource_tracker(struct mlx4_dev *dev)
  197. {
  198. struct mlx4_priv *priv = mlx4_priv(dev);
  199. int i;
  200. int t;
  201. priv->mfunc.master.res_tracker.slave_list =
  202. kzalloc(dev->num_slaves * sizeof(struct slave_list),
  203. GFP_KERNEL);
  204. if (!priv->mfunc.master.res_tracker.slave_list)
  205. return -ENOMEM;
  206. for (i = 0 ; i < dev->num_slaves; i++) {
  207. for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
  208. INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
  209. slave_list[i].res_list[t]);
  210. mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
  211. }
  212. mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
  213. dev->num_slaves);
  214. for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
  215. INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
  216. GFP_ATOMIC|__GFP_NOWARN);
  217. spin_lock_init(&priv->mfunc.master.res_tracker.lock);
  218. return 0 ;
  219. }
  220. void mlx4_free_resource_tracker(struct mlx4_dev *dev)
  221. {
  222. struct mlx4_priv *priv = mlx4_priv(dev);
  223. int i;
  224. if (priv->mfunc.master.res_tracker.slave_list) {
  225. for (i = 0 ; i < dev->num_slaves; i++)
  226. mlx4_delete_all_resources_for_slave(dev, i);
  227. kfree(priv->mfunc.master.res_tracker.slave_list);
  228. }
  229. }
  230. static void update_ud_gid(struct mlx4_dev *dev,
  231. struct mlx4_qp_context *qp_ctx, u8 slave)
  232. {
  233. u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
  234. if (MLX4_QP_ST_UD == ts)
  235. qp_ctx->pri_path.mgid_index = 0x80 | slave;
  236. mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
  237. slave, qp_ctx->pri_path.mgid_index);
  238. }
  239. static int mpt_mask(struct mlx4_dev *dev)
  240. {
  241. return dev->caps.num_mpts - 1;
  242. }
  243. static void *find_res(struct mlx4_dev *dev, int res_id,
  244. enum mlx4_resource type)
  245. {
  246. struct mlx4_priv *priv = mlx4_priv(dev);
  247. return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
  248. res_id);
  249. }
  250. static int get_res(struct mlx4_dev *dev, int slave, int res_id,
  251. enum mlx4_resource type,
  252. void *res)
  253. {
  254. struct res_common *r;
  255. int err = 0;
  256. spin_lock_irq(mlx4_tlock(dev));
  257. r = find_res(dev, res_id, type);
  258. if (!r) {
  259. err = -ENONET;
  260. goto exit;
  261. }
  262. if (r->state == RES_ANY_BUSY) {
  263. err = -EBUSY;
  264. goto exit;
  265. }
  266. if (r->owner != slave) {
  267. err = -EPERM;
  268. goto exit;
  269. }
  270. r->from_state = r->state;
  271. r->state = RES_ANY_BUSY;
  272. mlx4_dbg(dev, "res %s id 0x%x to busy\n",
  273. ResourceType(type), r->res_id);
  274. if (res)
  275. *((struct res_common **)res) = r;
  276. exit:
  277. spin_unlock_irq(mlx4_tlock(dev));
  278. return err;
  279. }
  280. int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
  281. enum mlx4_resource type,
  282. int res_id, int *slave)
  283. {
  284. struct res_common *r;
  285. int err = -ENOENT;
  286. int id = res_id;
  287. if (type == RES_QP)
  288. id &= 0x7fffff;
  289. spin_lock_irq(mlx4_tlock(dev));
  290. r = find_res(dev, id, type);
  291. if (r) {
  292. *slave = r->owner;
  293. err = 0;
  294. }
  295. spin_unlock_irq(mlx4_tlock(dev));
  296. return err;
  297. }
  298. static void put_res(struct mlx4_dev *dev, int slave, int res_id,
  299. enum mlx4_resource type)
  300. {
  301. struct res_common *r;
  302. spin_lock_irq(mlx4_tlock(dev));
  303. r = find_res(dev, res_id, type);
  304. if (r)
  305. r->state = r->from_state;
  306. spin_unlock_irq(mlx4_tlock(dev));
  307. }
  308. static struct res_common *alloc_qp_tr(int id)
  309. {
  310. struct res_qp *ret;
  311. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  312. if (!ret)
  313. return NULL;
  314. ret->com.res_id = id;
  315. ret->com.state = RES_QP_RESERVED;
  316. INIT_LIST_HEAD(&ret->mcg_list);
  317. spin_lock_init(&ret->mcg_spl);
  318. return &ret->com;
  319. }
  320. static struct res_common *alloc_mtt_tr(int id, int order)
  321. {
  322. struct res_mtt *ret;
  323. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  324. if (!ret)
  325. return NULL;
  326. ret->com.res_id = id;
  327. ret->order = order;
  328. ret->com.state = RES_MTT_ALLOCATED;
  329. atomic_set(&ret->ref_count, 0);
  330. return &ret->com;
  331. }
  332. static struct res_common *alloc_mpt_tr(int id, int key)
  333. {
  334. struct res_mpt *ret;
  335. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  336. if (!ret)
  337. return NULL;
  338. ret->com.res_id = id;
  339. ret->com.state = RES_MPT_RESERVED;
  340. ret->key = key;
  341. return &ret->com;
  342. }
  343. static struct res_common *alloc_eq_tr(int id)
  344. {
  345. struct res_eq *ret;
  346. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  347. if (!ret)
  348. return NULL;
  349. ret->com.res_id = id;
  350. ret->com.state = RES_EQ_RESERVED;
  351. return &ret->com;
  352. }
  353. static struct res_common *alloc_cq_tr(int id)
  354. {
  355. struct res_cq *ret;
  356. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  357. if (!ret)
  358. return NULL;
  359. ret->com.res_id = id;
  360. ret->com.state = RES_CQ_ALLOCATED;
  361. atomic_set(&ret->ref_count, 0);
  362. return &ret->com;
  363. }
  364. static struct res_common *alloc_srq_tr(int id)
  365. {
  366. struct res_srq *ret;
  367. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  368. if (!ret)
  369. return NULL;
  370. ret->com.res_id = id;
  371. ret->com.state = RES_SRQ_ALLOCATED;
  372. atomic_set(&ret->ref_count, 0);
  373. return &ret->com;
  374. }
  375. static struct res_common *alloc_counter_tr(int id)
  376. {
  377. struct res_counter *ret;
  378. ret = kzalloc(sizeof *ret, GFP_KERNEL);
  379. if (!ret)
  380. return NULL;
  381. ret->com.res_id = id;
  382. ret->com.state = RES_COUNTER_ALLOCATED;
  383. return &ret->com;
  384. }
  385. static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
  386. int extra)
  387. {
  388. struct res_common *ret;
  389. switch (type) {
  390. case RES_QP:
  391. ret = alloc_qp_tr(id);
  392. break;
  393. case RES_MPT:
  394. ret = alloc_mpt_tr(id, extra);
  395. break;
  396. case RES_MTT:
  397. ret = alloc_mtt_tr(id, extra);
  398. break;
  399. case RES_EQ:
  400. ret = alloc_eq_tr(id);
  401. break;
  402. case RES_CQ:
  403. ret = alloc_cq_tr(id);
  404. break;
  405. case RES_SRQ:
  406. ret = alloc_srq_tr(id);
  407. break;
  408. case RES_MAC:
  409. printk(KERN_ERR "implementation missing\n");
  410. return NULL;
  411. case RES_COUNTER:
  412. ret = alloc_counter_tr(id);
  413. break;
  414. default:
  415. return NULL;
  416. }
  417. if (ret)
  418. ret->owner = slave;
  419. return ret;
  420. }
  421. static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
  422. enum mlx4_resource type, int extra)
  423. {
  424. int i;
  425. int err;
  426. struct mlx4_priv *priv = mlx4_priv(dev);
  427. struct res_common **res_arr;
  428. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  429. struct radix_tree_root *root = &tracker->res_tree[type];
  430. res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
  431. if (!res_arr)
  432. return -ENOMEM;
  433. for (i = 0; i < count; ++i) {
  434. res_arr[i] = alloc_tr(base + i, type, slave, extra);
  435. if (!res_arr[i]) {
  436. for (--i; i >= 0; --i)
  437. kfree(res_arr[i]);
  438. kfree(res_arr);
  439. return -ENOMEM;
  440. }
  441. }
  442. spin_lock_irq(mlx4_tlock(dev));
  443. for (i = 0; i < count; ++i) {
  444. if (find_res(dev, base + i, type)) {
  445. err = -EEXIST;
  446. goto undo;
  447. }
  448. err = radix_tree_insert(root, base + i, res_arr[i]);
  449. if (err)
  450. goto undo;
  451. list_add_tail(&res_arr[i]->list,
  452. &tracker->slave_list[slave].res_list[type]);
  453. }
  454. spin_unlock_irq(mlx4_tlock(dev));
  455. kfree(res_arr);
  456. return 0;
  457. undo:
  458. for (--i; i >= base; --i)
  459. radix_tree_delete(&tracker->res_tree[type], i);
  460. spin_unlock_irq(mlx4_tlock(dev));
  461. for (i = 0; i < count; ++i)
  462. kfree(res_arr[i]);
  463. kfree(res_arr);
  464. return err;
  465. }
  466. static int remove_qp_ok(struct res_qp *res)
  467. {
  468. if (res->com.state == RES_QP_BUSY)
  469. return -EBUSY;
  470. else if (res->com.state != RES_QP_RESERVED)
  471. return -EPERM;
  472. return 0;
  473. }
  474. static int remove_mtt_ok(struct res_mtt *res, int order)
  475. {
  476. if (res->com.state == RES_MTT_BUSY ||
  477. atomic_read(&res->ref_count)) {
  478. printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
  479. __func__, __LINE__,
  480. mtt_states_str(res->com.state),
  481. atomic_read(&res->ref_count));
  482. return -EBUSY;
  483. } else if (res->com.state != RES_MTT_ALLOCATED)
  484. return -EPERM;
  485. else if (res->order != order)
  486. return -EINVAL;
  487. return 0;
  488. }
  489. static int remove_mpt_ok(struct res_mpt *res)
  490. {
  491. if (res->com.state == RES_MPT_BUSY)
  492. return -EBUSY;
  493. else if (res->com.state != RES_MPT_RESERVED)
  494. return -EPERM;
  495. return 0;
  496. }
  497. static int remove_eq_ok(struct res_eq *res)
  498. {
  499. if (res->com.state == RES_MPT_BUSY)
  500. return -EBUSY;
  501. else if (res->com.state != RES_MPT_RESERVED)
  502. return -EPERM;
  503. return 0;
  504. }
  505. static int remove_counter_ok(struct res_counter *res)
  506. {
  507. if (res->com.state == RES_COUNTER_BUSY)
  508. return -EBUSY;
  509. else if (res->com.state != RES_COUNTER_ALLOCATED)
  510. return -EPERM;
  511. return 0;
  512. }
  513. static int remove_cq_ok(struct res_cq *res)
  514. {
  515. if (res->com.state == RES_CQ_BUSY)
  516. return -EBUSY;
  517. else if (res->com.state != RES_CQ_ALLOCATED)
  518. return -EPERM;
  519. return 0;
  520. }
  521. static int remove_srq_ok(struct res_srq *res)
  522. {
  523. if (res->com.state == RES_SRQ_BUSY)
  524. return -EBUSY;
  525. else if (res->com.state != RES_SRQ_ALLOCATED)
  526. return -EPERM;
  527. return 0;
  528. }
  529. static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
  530. {
  531. switch (type) {
  532. case RES_QP:
  533. return remove_qp_ok((struct res_qp *)res);
  534. case RES_CQ:
  535. return remove_cq_ok((struct res_cq *)res);
  536. case RES_SRQ:
  537. return remove_srq_ok((struct res_srq *)res);
  538. case RES_MPT:
  539. return remove_mpt_ok((struct res_mpt *)res);
  540. case RES_MTT:
  541. return remove_mtt_ok((struct res_mtt *)res, extra);
  542. case RES_MAC:
  543. return -ENOSYS;
  544. case RES_EQ:
  545. return remove_eq_ok((struct res_eq *)res);
  546. case RES_COUNTER:
  547. return remove_counter_ok((struct res_counter *)res);
  548. default:
  549. return -EINVAL;
  550. }
  551. }
  552. static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
  553. enum mlx4_resource type, int extra)
  554. {
  555. int i;
  556. int err;
  557. struct mlx4_priv *priv = mlx4_priv(dev);
  558. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  559. struct res_common *r;
  560. spin_lock_irq(mlx4_tlock(dev));
  561. for (i = base; i < base + count; ++i) {
  562. r = radix_tree_lookup(&tracker->res_tree[type], i);
  563. if (!r) {
  564. err = -ENOENT;
  565. goto out;
  566. }
  567. if (r->owner != slave) {
  568. err = -EPERM;
  569. goto out;
  570. }
  571. err = remove_ok(r, type, extra);
  572. if (err)
  573. goto out;
  574. }
  575. for (i = base; i < base + count; ++i) {
  576. r = radix_tree_lookup(&tracker->res_tree[type], i);
  577. radix_tree_delete(&tracker->res_tree[type], i);
  578. list_del(&r->list);
  579. kfree(r);
  580. }
  581. err = 0;
  582. out:
  583. spin_unlock_irq(mlx4_tlock(dev));
  584. return err;
  585. }
  586. static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
  587. enum res_qp_states state, struct res_qp **qp,
  588. int alloc)
  589. {
  590. struct mlx4_priv *priv = mlx4_priv(dev);
  591. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  592. struct res_qp *r;
  593. int err = 0;
  594. spin_lock_irq(mlx4_tlock(dev));
  595. r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
  596. if (!r)
  597. err = -ENOENT;
  598. else if (r->com.owner != slave)
  599. err = -EPERM;
  600. else {
  601. switch (state) {
  602. case RES_QP_BUSY:
  603. mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
  604. __func__, r->com.res_id);
  605. err = -EBUSY;
  606. break;
  607. case RES_QP_RESERVED:
  608. if (r->com.state == RES_QP_MAPPED && !alloc)
  609. break;
  610. mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
  611. err = -EINVAL;
  612. break;
  613. case RES_QP_MAPPED:
  614. if ((r->com.state == RES_QP_RESERVED && alloc) ||
  615. r->com.state == RES_QP_HW)
  616. break;
  617. else {
  618. mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
  619. r->com.res_id);
  620. err = -EINVAL;
  621. }
  622. break;
  623. case RES_QP_HW:
  624. if (r->com.state != RES_QP_MAPPED)
  625. err = -EINVAL;
  626. break;
  627. default:
  628. err = -EINVAL;
  629. }
  630. if (!err) {
  631. r->com.from_state = r->com.state;
  632. r->com.to_state = state;
  633. r->com.state = RES_QP_BUSY;
  634. if (qp)
  635. *qp = (struct res_qp *)r;
  636. }
  637. }
  638. spin_unlock_irq(mlx4_tlock(dev));
  639. return err;
  640. }
  641. static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
  642. enum res_mpt_states state, struct res_mpt **mpt)
  643. {
  644. struct mlx4_priv *priv = mlx4_priv(dev);
  645. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  646. struct res_mpt *r;
  647. int err = 0;
  648. spin_lock_irq(mlx4_tlock(dev));
  649. r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
  650. if (!r)
  651. err = -ENOENT;
  652. else if (r->com.owner != slave)
  653. err = -EPERM;
  654. else {
  655. switch (state) {
  656. case RES_MPT_BUSY:
  657. err = -EINVAL;
  658. break;
  659. case RES_MPT_RESERVED:
  660. if (r->com.state != RES_MPT_MAPPED)
  661. err = -EINVAL;
  662. break;
  663. case RES_MPT_MAPPED:
  664. if (r->com.state != RES_MPT_RESERVED &&
  665. r->com.state != RES_MPT_HW)
  666. err = -EINVAL;
  667. break;
  668. case RES_MPT_HW:
  669. if (r->com.state != RES_MPT_MAPPED)
  670. err = -EINVAL;
  671. break;
  672. default:
  673. err = -EINVAL;
  674. }
  675. if (!err) {
  676. r->com.from_state = r->com.state;
  677. r->com.to_state = state;
  678. r->com.state = RES_MPT_BUSY;
  679. if (mpt)
  680. *mpt = (struct res_mpt *)r;
  681. }
  682. }
  683. spin_unlock_irq(mlx4_tlock(dev));
  684. return err;
  685. }
  686. static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
  687. enum res_eq_states state, struct res_eq **eq)
  688. {
  689. struct mlx4_priv *priv = mlx4_priv(dev);
  690. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  691. struct res_eq *r;
  692. int err = 0;
  693. spin_lock_irq(mlx4_tlock(dev));
  694. r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
  695. if (!r)
  696. err = -ENOENT;
  697. else if (r->com.owner != slave)
  698. err = -EPERM;
  699. else {
  700. switch (state) {
  701. case RES_EQ_BUSY:
  702. err = -EINVAL;
  703. break;
  704. case RES_EQ_RESERVED:
  705. if (r->com.state != RES_EQ_HW)
  706. err = -EINVAL;
  707. break;
  708. case RES_EQ_HW:
  709. if (r->com.state != RES_EQ_RESERVED)
  710. err = -EINVAL;
  711. break;
  712. default:
  713. err = -EINVAL;
  714. }
  715. if (!err) {
  716. r->com.from_state = r->com.state;
  717. r->com.to_state = state;
  718. r->com.state = RES_EQ_BUSY;
  719. if (eq)
  720. *eq = r;
  721. }
  722. }
  723. spin_unlock_irq(mlx4_tlock(dev));
  724. return err;
  725. }
  726. static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
  727. enum res_cq_states state, struct res_cq **cq)
  728. {
  729. struct mlx4_priv *priv = mlx4_priv(dev);
  730. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  731. struct res_cq *r;
  732. int err;
  733. spin_lock_irq(mlx4_tlock(dev));
  734. r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
  735. if (!r)
  736. err = -ENOENT;
  737. else if (r->com.owner != slave)
  738. err = -EPERM;
  739. else {
  740. switch (state) {
  741. case RES_CQ_BUSY:
  742. err = -EBUSY;
  743. break;
  744. case RES_CQ_ALLOCATED:
  745. if (r->com.state != RES_CQ_HW)
  746. err = -EINVAL;
  747. else if (atomic_read(&r->ref_count))
  748. err = -EBUSY;
  749. else
  750. err = 0;
  751. break;
  752. case RES_CQ_HW:
  753. if (r->com.state != RES_CQ_ALLOCATED)
  754. err = -EINVAL;
  755. else
  756. err = 0;
  757. break;
  758. default:
  759. err = -EINVAL;
  760. }
  761. if (!err) {
  762. r->com.from_state = r->com.state;
  763. r->com.to_state = state;
  764. r->com.state = RES_CQ_BUSY;
  765. if (cq)
  766. *cq = r;
  767. }
  768. }
  769. spin_unlock_irq(mlx4_tlock(dev));
  770. return err;
  771. }
  772. static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
  773. enum res_cq_states state, struct res_srq **srq)
  774. {
  775. struct mlx4_priv *priv = mlx4_priv(dev);
  776. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  777. struct res_srq *r;
  778. int err = 0;
  779. spin_lock_irq(mlx4_tlock(dev));
  780. r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
  781. if (!r)
  782. err = -ENOENT;
  783. else if (r->com.owner != slave)
  784. err = -EPERM;
  785. else {
  786. switch (state) {
  787. case RES_SRQ_BUSY:
  788. err = -EINVAL;
  789. break;
  790. case RES_SRQ_ALLOCATED:
  791. if (r->com.state != RES_SRQ_HW)
  792. err = -EINVAL;
  793. else if (atomic_read(&r->ref_count))
  794. err = -EBUSY;
  795. break;
  796. case RES_SRQ_HW:
  797. if (r->com.state != RES_SRQ_ALLOCATED)
  798. err = -EINVAL;
  799. break;
  800. default:
  801. err = -EINVAL;
  802. }
  803. if (!err) {
  804. r->com.from_state = r->com.state;
  805. r->com.to_state = state;
  806. r->com.state = RES_SRQ_BUSY;
  807. if (srq)
  808. *srq = r;
  809. }
  810. }
  811. spin_unlock_irq(mlx4_tlock(dev));
  812. return err;
  813. }
  814. static void res_abort_move(struct mlx4_dev *dev, int slave,
  815. enum mlx4_resource type, int id)
  816. {
  817. struct mlx4_priv *priv = mlx4_priv(dev);
  818. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  819. struct res_common *r;
  820. spin_lock_irq(mlx4_tlock(dev));
  821. r = radix_tree_lookup(&tracker->res_tree[type], id);
  822. if (r && (r->owner == slave))
  823. r->state = r->from_state;
  824. spin_unlock_irq(mlx4_tlock(dev));
  825. }
  826. static void res_end_move(struct mlx4_dev *dev, int slave,
  827. enum mlx4_resource type, int id)
  828. {
  829. struct mlx4_priv *priv = mlx4_priv(dev);
  830. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  831. struct res_common *r;
  832. spin_lock_irq(mlx4_tlock(dev));
  833. r = radix_tree_lookup(&tracker->res_tree[type], id);
  834. if (r && (r->owner == slave))
  835. r->state = r->to_state;
  836. spin_unlock_irq(mlx4_tlock(dev));
  837. }
  838. static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
  839. {
  840. return mlx4_is_qp_reserved(dev, qpn);
  841. }
  842. static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  843. u64 in_param, u64 *out_param)
  844. {
  845. int err;
  846. int count;
  847. int align;
  848. int base;
  849. int qpn;
  850. switch (op) {
  851. case RES_OP_RESERVE:
  852. count = get_param_l(&in_param);
  853. align = get_param_h(&in_param);
  854. err = __mlx4_qp_reserve_range(dev, count, align, &base);
  855. if (err)
  856. return err;
  857. err = add_res_range(dev, slave, base, count, RES_QP, 0);
  858. if (err) {
  859. __mlx4_qp_release_range(dev, base, count);
  860. return err;
  861. }
  862. set_param_l(out_param, base);
  863. break;
  864. case RES_OP_MAP_ICM:
  865. qpn = get_param_l(&in_param) & 0x7fffff;
  866. if (valid_reserved(dev, slave, qpn)) {
  867. err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
  868. if (err)
  869. return err;
  870. }
  871. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
  872. NULL, 1);
  873. if (err)
  874. return err;
  875. if (!valid_reserved(dev, slave, qpn)) {
  876. err = __mlx4_qp_alloc_icm(dev, qpn);
  877. if (err) {
  878. res_abort_move(dev, slave, RES_QP, qpn);
  879. return err;
  880. }
  881. }
  882. res_end_move(dev, slave, RES_QP, qpn);
  883. break;
  884. default:
  885. err = -EINVAL;
  886. break;
  887. }
  888. return err;
  889. }
  890. static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  891. u64 in_param, u64 *out_param)
  892. {
  893. int err = -EINVAL;
  894. int base;
  895. int order;
  896. if (op != RES_OP_RESERVE_AND_MAP)
  897. return err;
  898. order = get_param_l(&in_param);
  899. base = __mlx4_alloc_mtt_range(dev, order);
  900. if (base == -1)
  901. return -ENOMEM;
  902. err = add_res_range(dev, slave, base, 1, RES_MTT, order);
  903. if (err)
  904. __mlx4_free_mtt_range(dev, base, order);
  905. else
  906. set_param_l(out_param, base);
  907. return err;
  908. }
  909. static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  910. u64 in_param, u64 *out_param)
  911. {
  912. int err = -EINVAL;
  913. int index;
  914. int id;
  915. struct res_mpt *mpt;
  916. switch (op) {
  917. case RES_OP_RESERVE:
  918. index = __mlx4_mr_reserve(dev);
  919. if (index == -1)
  920. break;
  921. id = index & mpt_mask(dev);
  922. err = add_res_range(dev, slave, id, 1, RES_MPT, index);
  923. if (err) {
  924. __mlx4_mr_release(dev, index);
  925. break;
  926. }
  927. set_param_l(out_param, index);
  928. break;
  929. case RES_OP_MAP_ICM:
  930. index = get_param_l(&in_param);
  931. id = index & mpt_mask(dev);
  932. err = mr_res_start_move_to(dev, slave, id,
  933. RES_MPT_MAPPED, &mpt);
  934. if (err)
  935. return err;
  936. err = __mlx4_mr_alloc_icm(dev, mpt->key);
  937. if (err) {
  938. res_abort_move(dev, slave, RES_MPT, id);
  939. return err;
  940. }
  941. res_end_move(dev, slave, RES_MPT, id);
  942. break;
  943. }
  944. return err;
  945. }
  946. static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  947. u64 in_param, u64 *out_param)
  948. {
  949. int cqn;
  950. int err;
  951. switch (op) {
  952. case RES_OP_RESERVE_AND_MAP:
  953. err = __mlx4_cq_alloc_icm(dev, &cqn);
  954. if (err)
  955. break;
  956. err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
  957. if (err) {
  958. __mlx4_cq_free_icm(dev, cqn);
  959. break;
  960. }
  961. set_param_l(out_param, cqn);
  962. break;
  963. default:
  964. err = -EINVAL;
  965. }
  966. return err;
  967. }
  968. static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  969. u64 in_param, u64 *out_param)
  970. {
  971. int srqn;
  972. int err;
  973. switch (op) {
  974. case RES_OP_RESERVE_AND_MAP:
  975. err = __mlx4_srq_alloc_icm(dev, &srqn);
  976. if (err)
  977. break;
  978. err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
  979. if (err) {
  980. __mlx4_srq_free_icm(dev, srqn);
  981. break;
  982. }
  983. set_param_l(out_param, srqn);
  984. break;
  985. default:
  986. err = -EINVAL;
  987. }
  988. return err;
  989. }
  990. static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
  991. {
  992. struct mlx4_priv *priv = mlx4_priv(dev);
  993. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  994. struct mac_res *res;
  995. res = kzalloc(sizeof *res, GFP_KERNEL);
  996. if (!res)
  997. return -ENOMEM;
  998. res->mac = mac;
  999. res->port = (u8) port;
  1000. list_add_tail(&res->list,
  1001. &tracker->slave_list[slave].res_list[RES_MAC]);
  1002. return 0;
  1003. }
  1004. static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
  1005. int port)
  1006. {
  1007. struct mlx4_priv *priv = mlx4_priv(dev);
  1008. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1009. struct list_head *mac_list =
  1010. &tracker->slave_list[slave].res_list[RES_MAC];
  1011. struct mac_res *res, *tmp;
  1012. list_for_each_entry_safe(res, tmp, mac_list, list) {
  1013. if (res->mac == mac && res->port == (u8) port) {
  1014. list_del(&res->list);
  1015. kfree(res);
  1016. break;
  1017. }
  1018. }
  1019. }
  1020. static void rem_slave_macs(struct mlx4_dev *dev, int slave)
  1021. {
  1022. struct mlx4_priv *priv = mlx4_priv(dev);
  1023. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1024. struct list_head *mac_list =
  1025. &tracker->slave_list[slave].res_list[RES_MAC];
  1026. struct mac_res *res, *tmp;
  1027. list_for_each_entry_safe(res, tmp, mac_list, list) {
  1028. list_del(&res->list);
  1029. __mlx4_unregister_mac(dev, res->port, res->mac);
  1030. kfree(res);
  1031. }
  1032. }
  1033. static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1034. u64 in_param, u64 *out_param)
  1035. {
  1036. int err = -EINVAL;
  1037. int port;
  1038. u64 mac;
  1039. if (op != RES_OP_RESERVE_AND_MAP)
  1040. return err;
  1041. port = get_param_l(out_param);
  1042. mac = in_param;
  1043. err = __mlx4_register_mac(dev, port, mac);
  1044. if (err >= 0) {
  1045. set_param_l(out_param, err);
  1046. err = 0;
  1047. }
  1048. if (!err) {
  1049. err = mac_add_to_slave(dev, slave, mac, port);
  1050. if (err)
  1051. __mlx4_unregister_mac(dev, port, mac);
  1052. }
  1053. return err;
  1054. }
  1055. static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1056. u64 in_param, u64 *out_param)
  1057. {
  1058. return 0;
  1059. }
  1060. int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
  1061. struct mlx4_vhcr *vhcr,
  1062. struct mlx4_cmd_mailbox *inbox,
  1063. struct mlx4_cmd_mailbox *outbox,
  1064. struct mlx4_cmd_info *cmd)
  1065. {
  1066. int err;
  1067. int alop = vhcr->op_modifier;
  1068. switch (vhcr->in_modifier) {
  1069. case RES_QP:
  1070. err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1071. vhcr->in_param, &vhcr->out_param);
  1072. break;
  1073. case RES_MTT:
  1074. err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1075. vhcr->in_param, &vhcr->out_param);
  1076. break;
  1077. case RES_MPT:
  1078. err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1079. vhcr->in_param, &vhcr->out_param);
  1080. break;
  1081. case RES_CQ:
  1082. err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1083. vhcr->in_param, &vhcr->out_param);
  1084. break;
  1085. case RES_SRQ:
  1086. err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1087. vhcr->in_param, &vhcr->out_param);
  1088. break;
  1089. case RES_MAC:
  1090. err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1091. vhcr->in_param, &vhcr->out_param);
  1092. break;
  1093. case RES_VLAN:
  1094. err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
  1095. vhcr->in_param, &vhcr->out_param);
  1096. break;
  1097. default:
  1098. err = -EINVAL;
  1099. break;
  1100. }
  1101. return err;
  1102. }
  1103. static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1104. u64 in_param)
  1105. {
  1106. int err;
  1107. int count;
  1108. int base;
  1109. int qpn;
  1110. switch (op) {
  1111. case RES_OP_RESERVE:
  1112. base = get_param_l(&in_param) & 0x7fffff;
  1113. count = get_param_h(&in_param);
  1114. err = rem_res_range(dev, slave, base, count, RES_QP, 0);
  1115. if (err)
  1116. break;
  1117. __mlx4_qp_release_range(dev, base, count);
  1118. break;
  1119. case RES_OP_MAP_ICM:
  1120. qpn = get_param_l(&in_param) & 0x7fffff;
  1121. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
  1122. NULL, 0);
  1123. if (err)
  1124. return err;
  1125. if (!valid_reserved(dev, slave, qpn))
  1126. __mlx4_qp_free_icm(dev, qpn);
  1127. res_end_move(dev, slave, RES_QP, qpn);
  1128. if (valid_reserved(dev, slave, qpn))
  1129. err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
  1130. break;
  1131. default:
  1132. err = -EINVAL;
  1133. break;
  1134. }
  1135. return err;
  1136. }
  1137. static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1138. u64 in_param, u64 *out_param)
  1139. {
  1140. int err = -EINVAL;
  1141. int base;
  1142. int order;
  1143. if (op != RES_OP_RESERVE_AND_MAP)
  1144. return err;
  1145. base = get_param_l(&in_param);
  1146. order = get_param_h(&in_param);
  1147. err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
  1148. if (!err)
  1149. __mlx4_free_mtt_range(dev, base, order);
  1150. return err;
  1151. }
  1152. static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1153. u64 in_param)
  1154. {
  1155. int err = -EINVAL;
  1156. int index;
  1157. int id;
  1158. struct res_mpt *mpt;
  1159. switch (op) {
  1160. case RES_OP_RESERVE:
  1161. index = get_param_l(&in_param);
  1162. id = index & mpt_mask(dev);
  1163. err = get_res(dev, slave, id, RES_MPT, &mpt);
  1164. if (err)
  1165. break;
  1166. index = mpt->key;
  1167. put_res(dev, slave, id, RES_MPT);
  1168. err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
  1169. if (err)
  1170. break;
  1171. __mlx4_mr_release(dev, index);
  1172. break;
  1173. case RES_OP_MAP_ICM:
  1174. index = get_param_l(&in_param);
  1175. id = index & mpt_mask(dev);
  1176. err = mr_res_start_move_to(dev, slave, id,
  1177. RES_MPT_RESERVED, &mpt);
  1178. if (err)
  1179. return err;
  1180. __mlx4_mr_free_icm(dev, mpt->key);
  1181. res_end_move(dev, slave, RES_MPT, id);
  1182. return err;
  1183. break;
  1184. default:
  1185. err = -EINVAL;
  1186. break;
  1187. }
  1188. return err;
  1189. }
  1190. static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1191. u64 in_param, u64 *out_param)
  1192. {
  1193. int cqn;
  1194. int err;
  1195. switch (op) {
  1196. case RES_OP_RESERVE_AND_MAP:
  1197. cqn = get_param_l(&in_param);
  1198. err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
  1199. if (err)
  1200. break;
  1201. __mlx4_cq_free_icm(dev, cqn);
  1202. break;
  1203. default:
  1204. err = -EINVAL;
  1205. break;
  1206. }
  1207. return err;
  1208. }
  1209. static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1210. u64 in_param, u64 *out_param)
  1211. {
  1212. int srqn;
  1213. int err;
  1214. switch (op) {
  1215. case RES_OP_RESERVE_AND_MAP:
  1216. srqn = get_param_l(&in_param);
  1217. err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
  1218. if (err)
  1219. break;
  1220. __mlx4_srq_free_icm(dev, srqn);
  1221. break;
  1222. default:
  1223. err = -EINVAL;
  1224. break;
  1225. }
  1226. return err;
  1227. }
  1228. static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1229. u64 in_param, u64 *out_param)
  1230. {
  1231. int port;
  1232. int err = 0;
  1233. switch (op) {
  1234. case RES_OP_RESERVE_AND_MAP:
  1235. port = get_param_l(out_param);
  1236. mac_del_from_slave(dev, slave, in_param, port);
  1237. __mlx4_unregister_mac(dev, port, in_param);
  1238. break;
  1239. default:
  1240. err = -EINVAL;
  1241. break;
  1242. }
  1243. return err;
  1244. }
  1245. static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
  1246. u64 in_param, u64 *out_param)
  1247. {
  1248. return 0;
  1249. }
  1250. int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
  1251. struct mlx4_vhcr *vhcr,
  1252. struct mlx4_cmd_mailbox *inbox,
  1253. struct mlx4_cmd_mailbox *outbox,
  1254. struct mlx4_cmd_info *cmd)
  1255. {
  1256. int err = -EINVAL;
  1257. int alop = vhcr->op_modifier;
  1258. switch (vhcr->in_modifier) {
  1259. case RES_QP:
  1260. err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
  1261. vhcr->in_param);
  1262. break;
  1263. case RES_MTT:
  1264. err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
  1265. vhcr->in_param, &vhcr->out_param);
  1266. break;
  1267. case RES_MPT:
  1268. err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
  1269. vhcr->in_param);
  1270. break;
  1271. case RES_CQ:
  1272. err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
  1273. vhcr->in_param, &vhcr->out_param);
  1274. break;
  1275. case RES_SRQ:
  1276. err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
  1277. vhcr->in_param, &vhcr->out_param);
  1278. break;
  1279. case RES_MAC:
  1280. err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
  1281. vhcr->in_param, &vhcr->out_param);
  1282. break;
  1283. case RES_VLAN:
  1284. err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
  1285. vhcr->in_param, &vhcr->out_param);
  1286. break;
  1287. default:
  1288. break;
  1289. }
  1290. return err;
  1291. }
  1292. /* ugly but other choices are uglier */
  1293. static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
  1294. {
  1295. return (be32_to_cpu(mpt->flags) >> 9) & 1;
  1296. }
  1297. static int mr_get_mtt_seg(struct mlx4_mpt_entry *mpt)
  1298. {
  1299. return (int)be64_to_cpu(mpt->mtt_seg) & 0xfffffff8;
  1300. }
  1301. static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
  1302. {
  1303. return be32_to_cpu(mpt->mtt_sz);
  1304. }
  1305. static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
  1306. {
  1307. return be32_to_cpu(mpt->pd_flags) & 0xffffff;
  1308. }
  1309. static int qp_get_mtt_seg(struct mlx4_qp_context *qpc)
  1310. {
  1311. return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
  1312. }
  1313. static int srq_get_mtt_seg(struct mlx4_srq_context *srqc)
  1314. {
  1315. return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
  1316. }
  1317. static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
  1318. {
  1319. int page_shift = (qpc->log_page_size & 0x3f) + 12;
  1320. int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
  1321. int log_sq_sride = qpc->sq_size_stride & 7;
  1322. int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
  1323. int log_rq_stride = qpc->rq_size_stride & 7;
  1324. int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
  1325. int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
  1326. int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
  1327. int sq_size;
  1328. int rq_size;
  1329. int total_pages;
  1330. int total_mem;
  1331. int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
  1332. sq_size = 1 << (log_sq_size + log_sq_sride + 4);
  1333. rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
  1334. total_mem = sq_size + rq_size;
  1335. total_pages =
  1336. roundup_pow_of_two((total_mem + (page_offset << 6)) >>
  1337. page_shift);
  1338. return total_pages;
  1339. }
  1340. static int qp_get_pdn(struct mlx4_qp_context *qpc)
  1341. {
  1342. return be32_to_cpu(qpc->pd) & 0xffffff;
  1343. }
  1344. static int pdn2slave(int pdn)
  1345. {
  1346. return (pdn >> NOT_MASKED_PD_BITS) - 1;
  1347. }
  1348. static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
  1349. int size, struct res_mtt *mtt)
  1350. {
  1351. int res_start = mtt->com.res_id * dev->caps.mtts_per_seg;
  1352. int res_size = (1 << mtt->order) * dev->caps.mtts_per_seg;
  1353. if (start < res_start || start + size > res_start + res_size)
  1354. return -EPERM;
  1355. return 0;
  1356. }
  1357. int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
  1358. struct mlx4_vhcr *vhcr,
  1359. struct mlx4_cmd_mailbox *inbox,
  1360. struct mlx4_cmd_mailbox *outbox,
  1361. struct mlx4_cmd_info *cmd)
  1362. {
  1363. int err;
  1364. int index = vhcr->in_modifier;
  1365. struct res_mtt *mtt;
  1366. struct res_mpt *mpt;
  1367. int mtt_base = (mr_get_mtt_seg(inbox->buf) / dev->caps.mtt_entry_sz) *
  1368. dev->caps.mtts_per_seg;
  1369. int phys;
  1370. int id;
  1371. id = index & mpt_mask(dev);
  1372. err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
  1373. if (err)
  1374. return err;
  1375. phys = mr_phys_mpt(inbox->buf);
  1376. if (!phys) {
  1377. err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg,
  1378. RES_MTT, &mtt);
  1379. if (err)
  1380. goto ex_abort;
  1381. err = check_mtt_range(dev, slave, mtt_base,
  1382. mr_get_mtt_size(inbox->buf), mtt);
  1383. if (err)
  1384. goto ex_put;
  1385. mpt->mtt = mtt;
  1386. }
  1387. if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
  1388. err = -EPERM;
  1389. goto ex_put;
  1390. }
  1391. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1392. if (err)
  1393. goto ex_put;
  1394. if (!phys) {
  1395. atomic_inc(&mtt->ref_count);
  1396. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1397. }
  1398. res_end_move(dev, slave, RES_MPT, id);
  1399. return 0;
  1400. ex_put:
  1401. if (!phys)
  1402. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1403. ex_abort:
  1404. res_abort_move(dev, slave, RES_MPT, id);
  1405. return err;
  1406. }
  1407. int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
  1408. struct mlx4_vhcr *vhcr,
  1409. struct mlx4_cmd_mailbox *inbox,
  1410. struct mlx4_cmd_mailbox *outbox,
  1411. struct mlx4_cmd_info *cmd)
  1412. {
  1413. int err;
  1414. int index = vhcr->in_modifier;
  1415. struct res_mpt *mpt;
  1416. int id;
  1417. id = index & mpt_mask(dev);
  1418. err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
  1419. if (err)
  1420. return err;
  1421. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1422. if (err)
  1423. goto ex_abort;
  1424. if (mpt->mtt)
  1425. atomic_dec(&mpt->mtt->ref_count);
  1426. res_end_move(dev, slave, RES_MPT, id);
  1427. return 0;
  1428. ex_abort:
  1429. res_abort_move(dev, slave, RES_MPT, id);
  1430. return err;
  1431. }
  1432. int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
  1433. struct mlx4_vhcr *vhcr,
  1434. struct mlx4_cmd_mailbox *inbox,
  1435. struct mlx4_cmd_mailbox *outbox,
  1436. struct mlx4_cmd_info *cmd)
  1437. {
  1438. int err;
  1439. int index = vhcr->in_modifier;
  1440. struct res_mpt *mpt;
  1441. int id;
  1442. id = index & mpt_mask(dev);
  1443. err = get_res(dev, slave, id, RES_MPT, &mpt);
  1444. if (err)
  1445. return err;
  1446. if (mpt->com.from_state != RES_MPT_HW) {
  1447. err = -EBUSY;
  1448. goto out;
  1449. }
  1450. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1451. out:
  1452. put_res(dev, slave, id, RES_MPT);
  1453. return err;
  1454. }
  1455. static int qp_get_rcqn(struct mlx4_qp_context *qpc)
  1456. {
  1457. return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
  1458. }
  1459. static int qp_get_scqn(struct mlx4_qp_context *qpc)
  1460. {
  1461. return be32_to_cpu(qpc->cqn_send) & 0xffffff;
  1462. }
  1463. static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
  1464. {
  1465. return be32_to_cpu(qpc->srqn) & 0x1ffffff;
  1466. }
  1467. int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
  1468. struct mlx4_vhcr *vhcr,
  1469. struct mlx4_cmd_mailbox *inbox,
  1470. struct mlx4_cmd_mailbox *outbox,
  1471. struct mlx4_cmd_info *cmd)
  1472. {
  1473. int err;
  1474. int qpn = vhcr->in_modifier & 0x7fffff;
  1475. struct res_mtt *mtt;
  1476. struct res_qp *qp;
  1477. struct mlx4_qp_context *qpc = inbox->buf + 8;
  1478. int mtt_base = (qp_get_mtt_seg(qpc) / dev->caps.mtt_entry_sz) *
  1479. dev->caps.mtts_per_seg;
  1480. int mtt_size = qp_get_mtt_size(qpc);
  1481. struct res_cq *rcq;
  1482. struct res_cq *scq;
  1483. int rcqn = qp_get_rcqn(qpc);
  1484. int scqn = qp_get_scqn(qpc);
  1485. u32 srqn = qp_get_srqn(qpc) & 0xffffff;
  1486. int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
  1487. struct res_srq *srq;
  1488. int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
  1489. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
  1490. if (err)
  1491. return err;
  1492. qp->local_qpn = local_qpn;
  1493. err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
  1494. &mtt);
  1495. if (err)
  1496. goto ex_abort;
  1497. err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
  1498. if (err)
  1499. goto ex_put_mtt;
  1500. if (pdn2slave(qp_get_pdn(qpc)) != slave) {
  1501. err = -EPERM;
  1502. goto ex_put_mtt;
  1503. }
  1504. err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
  1505. if (err)
  1506. goto ex_put_mtt;
  1507. if (scqn != rcqn) {
  1508. err = get_res(dev, slave, scqn, RES_CQ, &scq);
  1509. if (err)
  1510. goto ex_put_rcq;
  1511. } else
  1512. scq = rcq;
  1513. if (use_srq) {
  1514. err = get_res(dev, slave, srqn, RES_SRQ, &srq);
  1515. if (err)
  1516. goto ex_put_scq;
  1517. }
  1518. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1519. if (err)
  1520. goto ex_put_srq;
  1521. atomic_inc(&mtt->ref_count);
  1522. qp->mtt = mtt;
  1523. atomic_inc(&rcq->ref_count);
  1524. qp->rcq = rcq;
  1525. atomic_inc(&scq->ref_count);
  1526. qp->scq = scq;
  1527. if (scqn != rcqn)
  1528. put_res(dev, slave, scqn, RES_CQ);
  1529. if (use_srq) {
  1530. atomic_inc(&srq->ref_count);
  1531. put_res(dev, slave, srqn, RES_SRQ);
  1532. qp->srq = srq;
  1533. }
  1534. put_res(dev, slave, rcqn, RES_CQ);
  1535. put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT);
  1536. res_end_move(dev, slave, RES_QP, qpn);
  1537. return 0;
  1538. ex_put_srq:
  1539. if (use_srq)
  1540. put_res(dev, slave, srqn, RES_SRQ);
  1541. ex_put_scq:
  1542. if (scqn != rcqn)
  1543. put_res(dev, slave, scqn, RES_CQ);
  1544. ex_put_rcq:
  1545. put_res(dev, slave, rcqn, RES_CQ);
  1546. ex_put_mtt:
  1547. put_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT);
  1548. ex_abort:
  1549. res_abort_move(dev, slave, RES_QP, qpn);
  1550. return err;
  1551. }
  1552. static int eq_get_mtt_seg(struct mlx4_eq_context *eqc)
  1553. {
  1554. return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
  1555. }
  1556. static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
  1557. {
  1558. int log_eq_size = eqc->log_eq_size & 0x1f;
  1559. int page_shift = (eqc->log_page_size & 0x3f) + 12;
  1560. if (log_eq_size + 5 < page_shift)
  1561. return 1;
  1562. return 1 << (log_eq_size + 5 - page_shift);
  1563. }
  1564. static int cq_get_mtt_seg(struct mlx4_cq_context *cqc)
  1565. {
  1566. return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
  1567. }
  1568. static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
  1569. {
  1570. int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
  1571. int page_shift = (cqc->log_page_size & 0x3f) + 12;
  1572. if (log_cq_size + 5 < page_shift)
  1573. return 1;
  1574. return 1 << (log_cq_size + 5 - page_shift);
  1575. }
  1576. int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
  1577. struct mlx4_vhcr *vhcr,
  1578. struct mlx4_cmd_mailbox *inbox,
  1579. struct mlx4_cmd_mailbox *outbox,
  1580. struct mlx4_cmd_info *cmd)
  1581. {
  1582. int err;
  1583. int eqn = vhcr->in_modifier;
  1584. int res_id = (slave << 8) | eqn;
  1585. struct mlx4_eq_context *eqc = inbox->buf;
  1586. int mtt_base = (eq_get_mtt_seg(eqc) / dev->caps.mtt_entry_sz) *
  1587. dev->caps.mtts_per_seg;
  1588. int mtt_size = eq_get_mtt_size(eqc);
  1589. struct res_eq *eq;
  1590. struct res_mtt *mtt;
  1591. err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
  1592. if (err)
  1593. return err;
  1594. err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
  1595. if (err)
  1596. goto out_add;
  1597. err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
  1598. &mtt);
  1599. if (err)
  1600. goto out_move;
  1601. err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
  1602. if (err)
  1603. goto out_put;
  1604. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1605. if (err)
  1606. goto out_put;
  1607. atomic_inc(&mtt->ref_count);
  1608. eq->mtt = mtt;
  1609. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1610. res_end_move(dev, slave, RES_EQ, res_id);
  1611. return 0;
  1612. out_put:
  1613. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1614. out_move:
  1615. res_abort_move(dev, slave, RES_EQ, res_id);
  1616. out_add:
  1617. rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
  1618. return err;
  1619. }
  1620. static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
  1621. int len, struct res_mtt **res)
  1622. {
  1623. struct mlx4_priv *priv = mlx4_priv(dev);
  1624. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  1625. struct res_mtt *mtt;
  1626. int err = -EINVAL;
  1627. spin_lock_irq(mlx4_tlock(dev));
  1628. list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
  1629. com.list) {
  1630. if (!check_mtt_range(dev, slave, start, len, mtt)) {
  1631. *res = mtt;
  1632. mtt->com.from_state = mtt->com.state;
  1633. mtt->com.state = RES_MTT_BUSY;
  1634. err = 0;
  1635. break;
  1636. }
  1637. }
  1638. spin_unlock_irq(mlx4_tlock(dev));
  1639. return err;
  1640. }
  1641. int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
  1642. struct mlx4_vhcr *vhcr,
  1643. struct mlx4_cmd_mailbox *inbox,
  1644. struct mlx4_cmd_mailbox *outbox,
  1645. struct mlx4_cmd_info *cmd)
  1646. {
  1647. struct mlx4_mtt mtt;
  1648. __be64 *page_list = inbox->buf;
  1649. u64 *pg_list = (u64 *)page_list;
  1650. int i;
  1651. struct res_mtt *rmtt = NULL;
  1652. int start = be64_to_cpu(page_list[0]);
  1653. int npages = vhcr->in_modifier;
  1654. int err;
  1655. err = get_containing_mtt(dev, slave, start, npages, &rmtt);
  1656. if (err)
  1657. return err;
  1658. /* Call the SW implementation of write_mtt:
  1659. * - Prepare a dummy mtt struct
  1660. * - Translate inbox contents to simple addresses in host endianess */
  1661. mtt.first_seg = 0;
  1662. mtt.order = 0;
  1663. mtt.page_shift = 0;
  1664. for (i = 0; i < npages; ++i)
  1665. pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
  1666. err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
  1667. ((u64 *)page_list + 2));
  1668. if (rmtt)
  1669. put_res(dev, slave, rmtt->com.res_id, RES_MTT);
  1670. return err;
  1671. }
  1672. int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
  1673. struct mlx4_vhcr *vhcr,
  1674. struct mlx4_cmd_mailbox *inbox,
  1675. struct mlx4_cmd_mailbox *outbox,
  1676. struct mlx4_cmd_info *cmd)
  1677. {
  1678. int eqn = vhcr->in_modifier;
  1679. int res_id = eqn | (slave << 8);
  1680. struct res_eq *eq;
  1681. int err;
  1682. err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
  1683. if (err)
  1684. return err;
  1685. err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
  1686. if (err)
  1687. goto ex_abort;
  1688. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1689. if (err)
  1690. goto ex_put;
  1691. atomic_dec(&eq->mtt->ref_count);
  1692. put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
  1693. res_end_move(dev, slave, RES_EQ, res_id);
  1694. rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
  1695. return 0;
  1696. ex_put:
  1697. put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
  1698. ex_abort:
  1699. res_abort_move(dev, slave, RES_EQ, res_id);
  1700. return err;
  1701. }
  1702. int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
  1703. {
  1704. struct mlx4_priv *priv = mlx4_priv(dev);
  1705. struct mlx4_slave_event_eq_info *event_eq;
  1706. struct mlx4_cmd_mailbox *mailbox;
  1707. u32 in_modifier = 0;
  1708. int err;
  1709. int res_id;
  1710. struct res_eq *req;
  1711. if (!priv->mfunc.master.slave_state)
  1712. return -EINVAL;
  1713. event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
  1714. /* Create the event only if the slave is registered */
  1715. if ((event_eq->event_type & (1 << eqe->type)) == 0)
  1716. return 0;
  1717. mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
  1718. res_id = (slave << 8) | event_eq->eqn;
  1719. err = get_res(dev, slave, res_id, RES_EQ, &req);
  1720. if (err)
  1721. goto unlock;
  1722. if (req->com.from_state != RES_EQ_HW) {
  1723. err = -EINVAL;
  1724. goto put;
  1725. }
  1726. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1727. if (IS_ERR(mailbox)) {
  1728. err = PTR_ERR(mailbox);
  1729. goto put;
  1730. }
  1731. if (eqe->type == MLX4_EVENT_TYPE_CMD) {
  1732. ++event_eq->token;
  1733. eqe->event.cmd.token = cpu_to_be16(event_eq->token);
  1734. }
  1735. memcpy(mailbox->buf, (u8 *) eqe, 28);
  1736. in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
  1737. err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
  1738. MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
  1739. MLX4_CMD_NATIVE);
  1740. put_res(dev, slave, res_id, RES_EQ);
  1741. mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
  1742. mlx4_free_cmd_mailbox(dev, mailbox);
  1743. return err;
  1744. put:
  1745. put_res(dev, slave, res_id, RES_EQ);
  1746. unlock:
  1747. mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
  1748. return err;
  1749. }
  1750. int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
  1751. struct mlx4_vhcr *vhcr,
  1752. struct mlx4_cmd_mailbox *inbox,
  1753. struct mlx4_cmd_mailbox *outbox,
  1754. struct mlx4_cmd_info *cmd)
  1755. {
  1756. int eqn = vhcr->in_modifier;
  1757. int res_id = eqn | (slave << 8);
  1758. struct res_eq *eq;
  1759. int err;
  1760. err = get_res(dev, slave, res_id, RES_EQ, &eq);
  1761. if (err)
  1762. return err;
  1763. if (eq->com.from_state != RES_EQ_HW) {
  1764. err = -EINVAL;
  1765. goto ex_put;
  1766. }
  1767. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1768. ex_put:
  1769. put_res(dev, slave, res_id, RES_EQ);
  1770. return err;
  1771. }
  1772. int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
  1773. struct mlx4_vhcr *vhcr,
  1774. struct mlx4_cmd_mailbox *inbox,
  1775. struct mlx4_cmd_mailbox *outbox,
  1776. struct mlx4_cmd_info *cmd)
  1777. {
  1778. int err;
  1779. int cqn = vhcr->in_modifier;
  1780. struct mlx4_cq_context *cqc = inbox->buf;
  1781. int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) *
  1782. dev->caps.mtts_per_seg;
  1783. struct res_cq *cq;
  1784. struct res_mtt *mtt;
  1785. err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
  1786. if (err)
  1787. return err;
  1788. err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
  1789. &mtt);
  1790. if (err)
  1791. goto out_move;
  1792. err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
  1793. if (err)
  1794. goto out_put;
  1795. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1796. if (err)
  1797. goto out_put;
  1798. atomic_inc(&mtt->ref_count);
  1799. cq->mtt = mtt;
  1800. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1801. res_end_move(dev, slave, RES_CQ, cqn);
  1802. return 0;
  1803. out_put:
  1804. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1805. out_move:
  1806. res_abort_move(dev, slave, RES_CQ, cqn);
  1807. return err;
  1808. }
  1809. int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
  1810. struct mlx4_vhcr *vhcr,
  1811. struct mlx4_cmd_mailbox *inbox,
  1812. struct mlx4_cmd_mailbox *outbox,
  1813. struct mlx4_cmd_info *cmd)
  1814. {
  1815. int err;
  1816. int cqn = vhcr->in_modifier;
  1817. struct res_cq *cq;
  1818. err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
  1819. if (err)
  1820. return err;
  1821. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1822. if (err)
  1823. goto out_move;
  1824. atomic_dec(&cq->mtt->ref_count);
  1825. res_end_move(dev, slave, RES_CQ, cqn);
  1826. return 0;
  1827. out_move:
  1828. res_abort_move(dev, slave, RES_CQ, cqn);
  1829. return err;
  1830. }
  1831. int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
  1832. struct mlx4_vhcr *vhcr,
  1833. struct mlx4_cmd_mailbox *inbox,
  1834. struct mlx4_cmd_mailbox *outbox,
  1835. struct mlx4_cmd_info *cmd)
  1836. {
  1837. int cqn = vhcr->in_modifier;
  1838. struct res_cq *cq;
  1839. int err;
  1840. err = get_res(dev, slave, cqn, RES_CQ, &cq);
  1841. if (err)
  1842. return err;
  1843. if (cq->com.from_state != RES_CQ_HW)
  1844. goto ex_put;
  1845. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1846. ex_put:
  1847. put_res(dev, slave, cqn, RES_CQ);
  1848. return err;
  1849. }
  1850. static int handle_resize(struct mlx4_dev *dev, int slave,
  1851. struct mlx4_vhcr *vhcr,
  1852. struct mlx4_cmd_mailbox *inbox,
  1853. struct mlx4_cmd_mailbox *outbox,
  1854. struct mlx4_cmd_info *cmd,
  1855. struct res_cq *cq)
  1856. {
  1857. int err;
  1858. struct res_mtt *orig_mtt;
  1859. struct res_mtt *mtt;
  1860. struct mlx4_cq_context *cqc = inbox->buf;
  1861. int mtt_base = (cq_get_mtt_seg(cqc) / dev->caps.mtt_entry_sz) *
  1862. dev->caps.mtts_per_seg;
  1863. err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
  1864. if (err)
  1865. return err;
  1866. if (orig_mtt != cq->mtt) {
  1867. err = -EINVAL;
  1868. goto ex_put;
  1869. }
  1870. err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg, RES_MTT,
  1871. &mtt);
  1872. if (err)
  1873. goto ex_put;
  1874. err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
  1875. if (err)
  1876. goto ex_put1;
  1877. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1878. if (err)
  1879. goto ex_put1;
  1880. atomic_dec(&orig_mtt->ref_count);
  1881. put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
  1882. atomic_inc(&mtt->ref_count);
  1883. cq->mtt = mtt;
  1884. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1885. return 0;
  1886. ex_put1:
  1887. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1888. ex_put:
  1889. put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
  1890. return err;
  1891. }
  1892. int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
  1893. struct mlx4_vhcr *vhcr,
  1894. struct mlx4_cmd_mailbox *inbox,
  1895. struct mlx4_cmd_mailbox *outbox,
  1896. struct mlx4_cmd_info *cmd)
  1897. {
  1898. int cqn = vhcr->in_modifier;
  1899. struct res_cq *cq;
  1900. int err;
  1901. err = get_res(dev, slave, cqn, RES_CQ, &cq);
  1902. if (err)
  1903. return err;
  1904. if (cq->com.from_state != RES_CQ_HW)
  1905. goto ex_put;
  1906. if (vhcr->op_modifier == 0) {
  1907. err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
  1908. if (err)
  1909. goto ex_put;
  1910. }
  1911. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1912. ex_put:
  1913. put_res(dev, slave, cqn, RES_CQ);
  1914. return err;
  1915. }
  1916. static int srq_get_pdn(struct mlx4_srq_context *srqc)
  1917. {
  1918. return be32_to_cpu(srqc->pd) & 0xffffff;
  1919. }
  1920. static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
  1921. {
  1922. int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
  1923. int log_rq_stride = srqc->logstride & 7;
  1924. int page_shift = (srqc->log_page_size & 0x3f) + 12;
  1925. if (log_srq_size + log_rq_stride + 4 < page_shift)
  1926. return 1;
  1927. return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
  1928. }
  1929. int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  1930. struct mlx4_vhcr *vhcr,
  1931. struct mlx4_cmd_mailbox *inbox,
  1932. struct mlx4_cmd_mailbox *outbox,
  1933. struct mlx4_cmd_info *cmd)
  1934. {
  1935. int err;
  1936. int srqn = vhcr->in_modifier;
  1937. struct res_mtt *mtt;
  1938. struct res_srq *srq;
  1939. struct mlx4_srq_context *srqc = inbox->buf;
  1940. int mtt_base = (srq_get_mtt_seg(srqc) / dev->caps.mtt_entry_sz) *
  1941. dev->caps.mtts_per_seg;
  1942. if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
  1943. return -EINVAL;
  1944. err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
  1945. if (err)
  1946. return err;
  1947. err = get_res(dev, slave, mtt_base / dev->caps.mtts_per_seg,
  1948. RES_MTT, &mtt);
  1949. if (err)
  1950. goto ex_abort;
  1951. err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
  1952. mtt);
  1953. if (err)
  1954. goto ex_put_mtt;
  1955. if (pdn2slave(srq_get_pdn(srqc)) != slave) {
  1956. err = -EPERM;
  1957. goto ex_put_mtt;
  1958. }
  1959. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1960. if (err)
  1961. goto ex_put_mtt;
  1962. atomic_inc(&mtt->ref_count);
  1963. srq->mtt = mtt;
  1964. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1965. res_end_move(dev, slave, RES_SRQ, srqn);
  1966. return 0;
  1967. ex_put_mtt:
  1968. put_res(dev, slave, mtt->com.res_id, RES_MTT);
  1969. ex_abort:
  1970. res_abort_move(dev, slave, RES_SRQ, srqn);
  1971. return err;
  1972. }
  1973. int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  1974. struct mlx4_vhcr *vhcr,
  1975. struct mlx4_cmd_mailbox *inbox,
  1976. struct mlx4_cmd_mailbox *outbox,
  1977. struct mlx4_cmd_info *cmd)
  1978. {
  1979. int err;
  1980. int srqn = vhcr->in_modifier;
  1981. struct res_srq *srq;
  1982. err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
  1983. if (err)
  1984. return err;
  1985. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  1986. if (err)
  1987. goto ex_abort;
  1988. atomic_dec(&srq->mtt->ref_count);
  1989. if (srq->cq)
  1990. atomic_dec(&srq->cq->ref_count);
  1991. res_end_move(dev, slave, RES_SRQ, srqn);
  1992. return 0;
  1993. ex_abort:
  1994. res_abort_move(dev, slave, RES_SRQ, srqn);
  1995. return err;
  1996. }
  1997. int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  1998. struct mlx4_vhcr *vhcr,
  1999. struct mlx4_cmd_mailbox *inbox,
  2000. struct mlx4_cmd_mailbox *outbox,
  2001. struct mlx4_cmd_info *cmd)
  2002. {
  2003. int err;
  2004. int srqn = vhcr->in_modifier;
  2005. struct res_srq *srq;
  2006. err = get_res(dev, slave, srqn, RES_SRQ, &srq);
  2007. if (err)
  2008. return err;
  2009. if (srq->com.from_state != RES_SRQ_HW) {
  2010. err = -EBUSY;
  2011. goto out;
  2012. }
  2013. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2014. out:
  2015. put_res(dev, slave, srqn, RES_SRQ);
  2016. return err;
  2017. }
  2018. int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
  2019. struct mlx4_vhcr *vhcr,
  2020. struct mlx4_cmd_mailbox *inbox,
  2021. struct mlx4_cmd_mailbox *outbox,
  2022. struct mlx4_cmd_info *cmd)
  2023. {
  2024. int err;
  2025. int srqn = vhcr->in_modifier;
  2026. struct res_srq *srq;
  2027. err = get_res(dev, slave, srqn, RES_SRQ, &srq);
  2028. if (err)
  2029. return err;
  2030. if (srq->com.from_state != RES_SRQ_HW) {
  2031. err = -EBUSY;
  2032. goto out;
  2033. }
  2034. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2035. out:
  2036. put_res(dev, slave, srqn, RES_SRQ);
  2037. return err;
  2038. }
  2039. int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
  2040. struct mlx4_vhcr *vhcr,
  2041. struct mlx4_cmd_mailbox *inbox,
  2042. struct mlx4_cmd_mailbox *outbox,
  2043. struct mlx4_cmd_info *cmd)
  2044. {
  2045. int err;
  2046. int qpn = vhcr->in_modifier & 0x7fffff;
  2047. struct res_qp *qp;
  2048. err = get_res(dev, slave, qpn, RES_QP, &qp);
  2049. if (err)
  2050. return err;
  2051. if (qp->com.from_state != RES_QP_HW) {
  2052. err = -EBUSY;
  2053. goto out;
  2054. }
  2055. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2056. out:
  2057. put_res(dev, slave, qpn, RES_QP);
  2058. return err;
  2059. }
  2060. int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
  2061. struct mlx4_vhcr *vhcr,
  2062. struct mlx4_cmd_mailbox *inbox,
  2063. struct mlx4_cmd_mailbox *outbox,
  2064. struct mlx4_cmd_info *cmd)
  2065. {
  2066. struct mlx4_qp_context *qpc = inbox->buf + 8;
  2067. update_ud_gid(dev, qpc, (u8)slave);
  2068. return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2069. }
  2070. int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
  2071. struct mlx4_vhcr *vhcr,
  2072. struct mlx4_cmd_mailbox *inbox,
  2073. struct mlx4_cmd_mailbox *outbox,
  2074. struct mlx4_cmd_info *cmd)
  2075. {
  2076. int err;
  2077. int qpn = vhcr->in_modifier & 0x7fffff;
  2078. struct res_qp *qp;
  2079. err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
  2080. if (err)
  2081. return err;
  2082. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2083. if (err)
  2084. goto ex_abort;
  2085. atomic_dec(&qp->mtt->ref_count);
  2086. atomic_dec(&qp->rcq->ref_count);
  2087. atomic_dec(&qp->scq->ref_count);
  2088. if (qp->srq)
  2089. atomic_dec(&qp->srq->ref_count);
  2090. res_end_move(dev, slave, RES_QP, qpn);
  2091. return 0;
  2092. ex_abort:
  2093. res_abort_move(dev, slave, RES_QP, qpn);
  2094. return err;
  2095. }
  2096. static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
  2097. struct res_qp *rqp, u8 *gid)
  2098. {
  2099. struct res_gid *res;
  2100. list_for_each_entry(res, &rqp->mcg_list, list) {
  2101. if (!memcmp(res->gid, gid, 16))
  2102. return res;
  2103. }
  2104. return NULL;
  2105. }
  2106. static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
  2107. u8 *gid, enum mlx4_protocol prot)
  2108. {
  2109. struct res_gid *res;
  2110. int err;
  2111. res = kzalloc(sizeof *res, GFP_KERNEL);
  2112. if (!res)
  2113. return -ENOMEM;
  2114. spin_lock_irq(&rqp->mcg_spl);
  2115. if (find_gid(dev, slave, rqp, gid)) {
  2116. kfree(res);
  2117. err = -EEXIST;
  2118. } else {
  2119. memcpy(res->gid, gid, 16);
  2120. res->prot = prot;
  2121. list_add_tail(&res->list, &rqp->mcg_list);
  2122. err = 0;
  2123. }
  2124. spin_unlock_irq(&rqp->mcg_spl);
  2125. return err;
  2126. }
  2127. static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
  2128. u8 *gid, enum mlx4_protocol prot)
  2129. {
  2130. struct res_gid *res;
  2131. int err;
  2132. spin_lock_irq(&rqp->mcg_spl);
  2133. res = find_gid(dev, slave, rqp, gid);
  2134. if (!res || res->prot != prot)
  2135. err = -EINVAL;
  2136. else {
  2137. list_del(&res->list);
  2138. kfree(res);
  2139. err = 0;
  2140. }
  2141. spin_unlock_irq(&rqp->mcg_spl);
  2142. return err;
  2143. }
  2144. int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
  2145. struct mlx4_vhcr *vhcr,
  2146. struct mlx4_cmd_mailbox *inbox,
  2147. struct mlx4_cmd_mailbox *outbox,
  2148. struct mlx4_cmd_info *cmd)
  2149. {
  2150. struct mlx4_qp qp; /* dummy for calling attach/detach */
  2151. u8 *gid = inbox->buf;
  2152. enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
  2153. int err, err1;
  2154. int qpn;
  2155. struct res_qp *rqp;
  2156. int attach = vhcr->op_modifier;
  2157. int block_loopback = vhcr->in_modifier >> 31;
  2158. u8 steer_type_mask = 2;
  2159. enum mlx4_steer_type type = gid[7] & steer_type_mask;
  2160. qpn = vhcr->in_modifier & 0xffffff;
  2161. err = get_res(dev, slave, qpn, RES_QP, &rqp);
  2162. if (err)
  2163. return err;
  2164. qp.qpn = qpn;
  2165. if (attach) {
  2166. err = add_mcg_res(dev, slave, rqp, gid, prot);
  2167. if (err)
  2168. goto ex_put;
  2169. err = mlx4_qp_attach_common(dev, &qp, gid,
  2170. block_loopback, prot, type);
  2171. if (err)
  2172. goto ex_rem;
  2173. } else {
  2174. err = rem_mcg_res(dev, slave, rqp, gid, prot);
  2175. if (err)
  2176. goto ex_put;
  2177. err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
  2178. }
  2179. put_res(dev, slave, qpn, RES_QP);
  2180. return 0;
  2181. ex_rem:
  2182. /* ignore error return below, already in error */
  2183. err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
  2184. ex_put:
  2185. put_res(dev, slave, qpn, RES_QP);
  2186. return err;
  2187. }
  2188. enum {
  2189. BUSY_MAX_RETRIES = 10
  2190. };
  2191. int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
  2192. struct mlx4_vhcr *vhcr,
  2193. struct mlx4_cmd_mailbox *inbox,
  2194. struct mlx4_cmd_mailbox *outbox,
  2195. struct mlx4_cmd_info *cmd)
  2196. {
  2197. int err;
  2198. int index = vhcr->in_modifier & 0xffff;
  2199. err = get_res(dev, slave, index, RES_COUNTER, NULL);
  2200. if (err)
  2201. return err;
  2202. err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
  2203. put_res(dev, slave, index, RES_COUNTER);
  2204. return err;
  2205. }
  2206. static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
  2207. {
  2208. struct res_gid *rgid;
  2209. struct res_gid *tmp;
  2210. int err;
  2211. struct mlx4_qp qp; /* dummy for calling attach/detach */
  2212. list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
  2213. qp.qpn = rqp->local_qpn;
  2214. err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
  2215. MLX4_MC_STEER);
  2216. list_del(&rgid->list);
  2217. kfree(rgid);
  2218. }
  2219. }
  2220. static int _move_all_busy(struct mlx4_dev *dev, int slave,
  2221. enum mlx4_resource type, int print)
  2222. {
  2223. struct mlx4_priv *priv = mlx4_priv(dev);
  2224. struct mlx4_resource_tracker *tracker =
  2225. &priv->mfunc.master.res_tracker;
  2226. struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
  2227. struct res_common *r;
  2228. struct res_common *tmp;
  2229. int busy;
  2230. busy = 0;
  2231. spin_lock_irq(mlx4_tlock(dev));
  2232. list_for_each_entry_safe(r, tmp, rlist, list) {
  2233. if (r->owner == slave) {
  2234. if (!r->removing) {
  2235. if (r->state == RES_ANY_BUSY) {
  2236. if (print)
  2237. mlx4_dbg(dev,
  2238. "%s id 0x%x is busy\n",
  2239. ResourceType(type),
  2240. r->res_id);
  2241. ++busy;
  2242. } else {
  2243. r->from_state = r->state;
  2244. r->state = RES_ANY_BUSY;
  2245. r->removing = 1;
  2246. }
  2247. }
  2248. }
  2249. }
  2250. spin_unlock_irq(mlx4_tlock(dev));
  2251. return busy;
  2252. }
  2253. static int move_all_busy(struct mlx4_dev *dev, int slave,
  2254. enum mlx4_resource type)
  2255. {
  2256. unsigned long begin;
  2257. int busy;
  2258. begin = jiffies;
  2259. do {
  2260. busy = _move_all_busy(dev, slave, type, 0);
  2261. if (time_after(jiffies, begin + 5 * HZ))
  2262. break;
  2263. if (busy)
  2264. cond_resched();
  2265. } while (busy);
  2266. if (busy)
  2267. busy = _move_all_busy(dev, slave, type, 1);
  2268. return busy;
  2269. }
  2270. static void rem_slave_qps(struct mlx4_dev *dev, int slave)
  2271. {
  2272. struct mlx4_priv *priv = mlx4_priv(dev);
  2273. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2274. struct list_head *qp_list =
  2275. &tracker->slave_list[slave].res_list[RES_QP];
  2276. struct res_qp *qp;
  2277. struct res_qp *tmp;
  2278. int state;
  2279. u64 in_param;
  2280. int qpn;
  2281. int err;
  2282. err = move_all_busy(dev, slave, RES_QP);
  2283. if (err)
  2284. mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
  2285. "for slave %d\n", slave);
  2286. spin_lock_irq(mlx4_tlock(dev));
  2287. list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
  2288. spin_unlock_irq(mlx4_tlock(dev));
  2289. if (qp->com.owner == slave) {
  2290. qpn = qp->com.res_id;
  2291. detach_qp(dev, slave, qp);
  2292. state = qp->com.from_state;
  2293. while (state != 0) {
  2294. switch (state) {
  2295. case RES_QP_RESERVED:
  2296. spin_lock_irq(mlx4_tlock(dev));
  2297. radix_tree_delete(&tracker->res_tree[RES_QP],
  2298. qp->com.res_id);
  2299. list_del(&qp->com.list);
  2300. spin_unlock_irq(mlx4_tlock(dev));
  2301. kfree(qp);
  2302. state = 0;
  2303. break;
  2304. case RES_QP_MAPPED:
  2305. if (!valid_reserved(dev, slave, qpn))
  2306. __mlx4_qp_free_icm(dev, qpn);
  2307. state = RES_QP_RESERVED;
  2308. break;
  2309. case RES_QP_HW:
  2310. in_param = slave;
  2311. err = mlx4_cmd(dev, in_param,
  2312. qp->local_qpn, 2,
  2313. MLX4_CMD_2RST_QP,
  2314. MLX4_CMD_TIME_CLASS_A,
  2315. MLX4_CMD_NATIVE);
  2316. if (err)
  2317. mlx4_dbg(dev, "rem_slave_qps: failed"
  2318. " to move slave %d qpn %d to"
  2319. " reset\n", slave,
  2320. qp->local_qpn);
  2321. atomic_dec(&qp->rcq->ref_count);
  2322. atomic_dec(&qp->scq->ref_count);
  2323. atomic_dec(&qp->mtt->ref_count);
  2324. if (qp->srq)
  2325. atomic_dec(&qp->srq->ref_count);
  2326. state = RES_QP_MAPPED;
  2327. break;
  2328. default:
  2329. state = 0;
  2330. }
  2331. }
  2332. }
  2333. spin_lock_irq(mlx4_tlock(dev));
  2334. }
  2335. spin_unlock_irq(mlx4_tlock(dev));
  2336. }
  2337. static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
  2338. {
  2339. struct mlx4_priv *priv = mlx4_priv(dev);
  2340. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2341. struct list_head *srq_list =
  2342. &tracker->slave_list[slave].res_list[RES_SRQ];
  2343. struct res_srq *srq;
  2344. struct res_srq *tmp;
  2345. int state;
  2346. u64 in_param;
  2347. LIST_HEAD(tlist);
  2348. int srqn;
  2349. int err;
  2350. err = move_all_busy(dev, slave, RES_SRQ);
  2351. if (err)
  2352. mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
  2353. "busy for slave %d\n", slave);
  2354. spin_lock_irq(mlx4_tlock(dev));
  2355. list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
  2356. spin_unlock_irq(mlx4_tlock(dev));
  2357. if (srq->com.owner == slave) {
  2358. srqn = srq->com.res_id;
  2359. state = srq->com.from_state;
  2360. while (state != 0) {
  2361. switch (state) {
  2362. case RES_SRQ_ALLOCATED:
  2363. __mlx4_srq_free_icm(dev, srqn);
  2364. spin_lock_irq(mlx4_tlock(dev));
  2365. radix_tree_delete(&tracker->res_tree[RES_SRQ],
  2366. srqn);
  2367. list_del(&srq->com.list);
  2368. spin_unlock_irq(mlx4_tlock(dev));
  2369. kfree(srq);
  2370. state = 0;
  2371. break;
  2372. case RES_SRQ_HW:
  2373. in_param = slave;
  2374. err = mlx4_cmd(dev, in_param, srqn, 1,
  2375. MLX4_CMD_HW2SW_SRQ,
  2376. MLX4_CMD_TIME_CLASS_A,
  2377. MLX4_CMD_NATIVE);
  2378. if (err)
  2379. mlx4_dbg(dev, "rem_slave_srqs: failed"
  2380. " to move slave %d srq %d to"
  2381. " SW ownership\n",
  2382. slave, srqn);
  2383. atomic_dec(&srq->mtt->ref_count);
  2384. if (srq->cq)
  2385. atomic_dec(&srq->cq->ref_count);
  2386. state = RES_SRQ_ALLOCATED;
  2387. break;
  2388. default:
  2389. state = 0;
  2390. }
  2391. }
  2392. }
  2393. spin_lock_irq(mlx4_tlock(dev));
  2394. }
  2395. spin_unlock_irq(mlx4_tlock(dev));
  2396. }
  2397. static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
  2398. {
  2399. struct mlx4_priv *priv = mlx4_priv(dev);
  2400. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2401. struct list_head *cq_list =
  2402. &tracker->slave_list[slave].res_list[RES_CQ];
  2403. struct res_cq *cq;
  2404. struct res_cq *tmp;
  2405. int state;
  2406. u64 in_param;
  2407. LIST_HEAD(tlist);
  2408. int cqn;
  2409. int err;
  2410. err = move_all_busy(dev, slave, RES_CQ);
  2411. if (err)
  2412. mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
  2413. "busy for slave %d\n", slave);
  2414. spin_lock_irq(mlx4_tlock(dev));
  2415. list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
  2416. spin_unlock_irq(mlx4_tlock(dev));
  2417. if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
  2418. cqn = cq->com.res_id;
  2419. state = cq->com.from_state;
  2420. while (state != 0) {
  2421. switch (state) {
  2422. case RES_CQ_ALLOCATED:
  2423. __mlx4_cq_free_icm(dev, cqn);
  2424. spin_lock_irq(mlx4_tlock(dev));
  2425. radix_tree_delete(&tracker->res_tree[RES_CQ],
  2426. cqn);
  2427. list_del(&cq->com.list);
  2428. spin_unlock_irq(mlx4_tlock(dev));
  2429. kfree(cq);
  2430. state = 0;
  2431. break;
  2432. case RES_CQ_HW:
  2433. in_param = slave;
  2434. err = mlx4_cmd(dev, in_param, cqn, 1,
  2435. MLX4_CMD_HW2SW_CQ,
  2436. MLX4_CMD_TIME_CLASS_A,
  2437. MLX4_CMD_NATIVE);
  2438. if (err)
  2439. mlx4_dbg(dev, "rem_slave_cqs: failed"
  2440. " to move slave %d cq %d to"
  2441. " SW ownership\n",
  2442. slave, cqn);
  2443. atomic_dec(&cq->mtt->ref_count);
  2444. state = RES_CQ_ALLOCATED;
  2445. break;
  2446. default:
  2447. state = 0;
  2448. }
  2449. }
  2450. }
  2451. spin_lock_irq(mlx4_tlock(dev));
  2452. }
  2453. spin_unlock_irq(mlx4_tlock(dev));
  2454. }
  2455. static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
  2456. {
  2457. struct mlx4_priv *priv = mlx4_priv(dev);
  2458. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2459. struct list_head *mpt_list =
  2460. &tracker->slave_list[slave].res_list[RES_MPT];
  2461. struct res_mpt *mpt;
  2462. struct res_mpt *tmp;
  2463. int state;
  2464. u64 in_param;
  2465. LIST_HEAD(tlist);
  2466. int mptn;
  2467. int err;
  2468. err = move_all_busy(dev, slave, RES_MPT);
  2469. if (err)
  2470. mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
  2471. "busy for slave %d\n", slave);
  2472. spin_lock_irq(mlx4_tlock(dev));
  2473. list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
  2474. spin_unlock_irq(mlx4_tlock(dev));
  2475. if (mpt->com.owner == slave) {
  2476. mptn = mpt->com.res_id;
  2477. state = mpt->com.from_state;
  2478. while (state != 0) {
  2479. switch (state) {
  2480. case RES_MPT_RESERVED:
  2481. __mlx4_mr_release(dev, mpt->key);
  2482. spin_lock_irq(mlx4_tlock(dev));
  2483. radix_tree_delete(&tracker->res_tree[RES_MPT],
  2484. mptn);
  2485. list_del(&mpt->com.list);
  2486. spin_unlock_irq(mlx4_tlock(dev));
  2487. kfree(mpt);
  2488. state = 0;
  2489. break;
  2490. case RES_MPT_MAPPED:
  2491. __mlx4_mr_free_icm(dev, mpt->key);
  2492. state = RES_MPT_RESERVED;
  2493. break;
  2494. case RES_MPT_HW:
  2495. in_param = slave;
  2496. err = mlx4_cmd(dev, in_param, mptn, 0,
  2497. MLX4_CMD_HW2SW_MPT,
  2498. MLX4_CMD_TIME_CLASS_A,
  2499. MLX4_CMD_NATIVE);
  2500. if (err)
  2501. mlx4_dbg(dev, "rem_slave_mrs: failed"
  2502. " to move slave %d mpt %d to"
  2503. " SW ownership\n",
  2504. slave, mptn);
  2505. if (mpt->mtt)
  2506. atomic_dec(&mpt->mtt->ref_count);
  2507. state = RES_MPT_MAPPED;
  2508. break;
  2509. default:
  2510. state = 0;
  2511. }
  2512. }
  2513. }
  2514. spin_lock_irq(mlx4_tlock(dev));
  2515. }
  2516. spin_unlock_irq(mlx4_tlock(dev));
  2517. }
  2518. static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
  2519. {
  2520. struct mlx4_priv *priv = mlx4_priv(dev);
  2521. struct mlx4_resource_tracker *tracker =
  2522. &priv->mfunc.master.res_tracker;
  2523. struct list_head *mtt_list =
  2524. &tracker->slave_list[slave].res_list[RES_MTT];
  2525. struct res_mtt *mtt;
  2526. struct res_mtt *tmp;
  2527. int state;
  2528. LIST_HEAD(tlist);
  2529. int base;
  2530. int err;
  2531. err = move_all_busy(dev, slave, RES_MTT);
  2532. if (err)
  2533. mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
  2534. "busy for slave %d\n", slave);
  2535. spin_lock_irq(mlx4_tlock(dev));
  2536. list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
  2537. spin_unlock_irq(mlx4_tlock(dev));
  2538. if (mtt->com.owner == slave) {
  2539. base = mtt->com.res_id;
  2540. state = mtt->com.from_state;
  2541. while (state != 0) {
  2542. switch (state) {
  2543. case RES_MTT_ALLOCATED:
  2544. __mlx4_free_mtt_range(dev, base,
  2545. mtt->order);
  2546. spin_lock_irq(mlx4_tlock(dev));
  2547. radix_tree_delete(&tracker->res_tree[RES_MTT],
  2548. base);
  2549. list_del(&mtt->com.list);
  2550. spin_unlock_irq(mlx4_tlock(dev));
  2551. kfree(mtt);
  2552. state = 0;
  2553. break;
  2554. default:
  2555. state = 0;
  2556. }
  2557. }
  2558. }
  2559. spin_lock_irq(mlx4_tlock(dev));
  2560. }
  2561. spin_unlock_irq(mlx4_tlock(dev));
  2562. }
  2563. static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
  2564. {
  2565. struct mlx4_priv *priv = mlx4_priv(dev);
  2566. struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
  2567. struct list_head *eq_list =
  2568. &tracker->slave_list[slave].res_list[RES_EQ];
  2569. struct res_eq *eq;
  2570. struct res_eq *tmp;
  2571. int err;
  2572. int state;
  2573. LIST_HEAD(tlist);
  2574. int eqn;
  2575. struct mlx4_cmd_mailbox *mailbox;
  2576. err = move_all_busy(dev, slave, RES_EQ);
  2577. if (err)
  2578. mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
  2579. "busy for slave %d\n", slave);
  2580. spin_lock_irq(mlx4_tlock(dev));
  2581. list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
  2582. spin_unlock_irq(mlx4_tlock(dev));
  2583. if (eq->com.owner == slave) {
  2584. eqn = eq->com.res_id;
  2585. state = eq->com.from_state;
  2586. while (state != 0) {
  2587. switch (state) {
  2588. case RES_EQ_RESERVED:
  2589. spin_lock_irq(mlx4_tlock(dev));
  2590. radix_tree_delete(&tracker->res_tree[RES_EQ],
  2591. eqn);
  2592. list_del(&eq->com.list);
  2593. spin_unlock_irq(mlx4_tlock(dev));
  2594. kfree(eq);
  2595. state = 0;
  2596. break;
  2597. case RES_EQ_HW:
  2598. mailbox = mlx4_alloc_cmd_mailbox(dev);
  2599. if (IS_ERR(mailbox)) {
  2600. cond_resched();
  2601. continue;
  2602. }
  2603. err = mlx4_cmd_box(dev, slave, 0,
  2604. eqn & 0xff, 0,
  2605. MLX4_CMD_HW2SW_EQ,
  2606. MLX4_CMD_TIME_CLASS_A,
  2607. MLX4_CMD_NATIVE);
  2608. mlx4_dbg(dev, "rem_slave_eqs: failed"
  2609. " to move slave %d eqs %d to"
  2610. " SW ownership\n", slave, eqn);
  2611. mlx4_free_cmd_mailbox(dev, mailbox);
  2612. if (!err) {
  2613. atomic_dec(&eq->mtt->ref_count);
  2614. state = RES_EQ_RESERVED;
  2615. }
  2616. break;
  2617. default:
  2618. state = 0;
  2619. }
  2620. }
  2621. }
  2622. spin_lock_irq(mlx4_tlock(dev));
  2623. }
  2624. spin_unlock_irq(mlx4_tlock(dev));
  2625. }
  2626. void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
  2627. {
  2628. struct mlx4_priv *priv = mlx4_priv(dev);
  2629. mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
  2630. /*VLAN*/
  2631. rem_slave_macs(dev, slave);
  2632. rem_slave_qps(dev, slave);
  2633. rem_slave_srqs(dev, slave);
  2634. rem_slave_cqs(dev, slave);
  2635. rem_slave_mrs(dev, slave);
  2636. rem_slave_eqs(dev, slave);
  2637. rem_slave_mtts(dev, slave);
  2638. mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
  2639. }