ehca_mrmw.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * MR/MW functions
  5. *
  6. * Authors: Dietmar Decker <ddecker@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. *
  9. * Copyright (c) 2005 IBM Corporation
  10. *
  11. * All rights reserved.
  12. *
  13. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  14. * BSD.
  15. *
  16. * OpenIB BSD License
  17. *
  18. * Redistribution and use in source and binary forms, with or without
  19. * modification, are permitted provided that the following conditions are met:
  20. *
  21. * Redistributions of source code must retain the above copyright notice, this
  22. * list of conditions and the following disclaimer.
  23. *
  24. * Redistributions in binary form must reproduce the above copyright notice,
  25. * this list of conditions and the following disclaimer in the documentation
  26. * and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  30. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  31. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  32. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  33. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  34. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  35. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  36. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  37. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  38. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  39. * POSSIBILITY OF SUCH DAMAGE.
  40. */
  41. #include <asm/current.h>
  42. #include "ehca_iverbs.h"
  43. #include "ehca_mrmw.h"
  44. #include "hcp_if.h"
  45. #include "hipz_hw.h"
  46. static struct kmem_cache *mr_cache;
  47. static struct kmem_cache *mw_cache;
  48. static struct ehca_mr *ehca_mr_new(void)
  49. {
  50. struct ehca_mr *me;
  51. me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
  52. if (me) {
  53. spin_lock_init(&me->mrlock);
  54. } else
  55. ehca_gen_err("alloc failed");
  56. return me;
  57. }
  58. static void ehca_mr_delete(struct ehca_mr *me)
  59. {
  60. kmem_cache_free(mr_cache, me);
  61. }
  62. static struct ehca_mw *ehca_mw_new(void)
  63. {
  64. struct ehca_mw *me;
  65. me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
  66. if (me) {
  67. spin_lock_init(&me->mwlock);
  68. } else
  69. ehca_gen_err("alloc failed");
  70. return me;
  71. }
  72. static void ehca_mw_delete(struct ehca_mw *me)
  73. {
  74. kmem_cache_free(mw_cache, me);
  75. }
  76. /*----------------------------------------------------------------------*/
  77. struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  78. {
  79. struct ib_mr *ib_mr;
  80. int ret;
  81. struct ehca_mr *e_maxmr;
  82. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  83. struct ehca_shca *shca =
  84. container_of(pd->device, struct ehca_shca, ib_device);
  85. if (shca->maxmr) {
  86. e_maxmr = ehca_mr_new();
  87. if (!e_maxmr) {
  88. ehca_err(&shca->ib_device, "out of memory");
  89. ib_mr = ERR_PTR(-ENOMEM);
  90. goto get_dma_mr_exit0;
  91. }
  92. ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
  93. mr_access_flags, e_pd,
  94. &e_maxmr->ib.ib_mr.lkey,
  95. &e_maxmr->ib.ib_mr.rkey);
  96. if (ret) {
  97. ib_mr = ERR_PTR(ret);
  98. goto get_dma_mr_exit0;
  99. }
  100. ib_mr = &e_maxmr->ib.ib_mr;
  101. } else {
  102. ehca_err(&shca->ib_device, "no internal max-MR exist!");
  103. ib_mr = ERR_PTR(-EINVAL);
  104. goto get_dma_mr_exit0;
  105. }
  106. get_dma_mr_exit0:
  107. if (IS_ERR(ib_mr))
  108. ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
  109. PTR_ERR(ib_mr), pd, mr_access_flags);
  110. return ib_mr;
  111. } /* end ehca_get_dma_mr() */
  112. /*----------------------------------------------------------------------*/
  113. struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
  114. struct ib_phys_buf *phys_buf_array,
  115. int num_phys_buf,
  116. int mr_access_flags,
  117. u64 *iova_start)
  118. {
  119. struct ib_mr *ib_mr;
  120. int ret;
  121. struct ehca_mr *e_mr;
  122. struct ehca_shca *shca =
  123. container_of(pd->device, struct ehca_shca, ib_device);
  124. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  125. u64 size;
  126. struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
  127. u32 num_pages_mr;
  128. u32 num_pages_4k; /* 4k portion "pages" */
  129. if ((num_phys_buf <= 0) || !phys_buf_array) {
  130. ehca_err(pd->device, "bad input values: num_phys_buf=%x "
  131. "phys_buf_array=%p", num_phys_buf, phys_buf_array);
  132. ib_mr = ERR_PTR(-EINVAL);
  133. goto reg_phys_mr_exit0;
  134. }
  135. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  136. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  137. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  138. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  139. /*
  140. * Remote Write Access requires Local Write Access
  141. * Remote Atomic Access requires Local Write Access
  142. */
  143. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  144. mr_access_flags);
  145. ib_mr = ERR_PTR(-EINVAL);
  146. goto reg_phys_mr_exit0;
  147. }
  148. /* check physical buffer list and calculate size */
  149. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
  150. iova_start, &size);
  151. if (ret) {
  152. ib_mr = ERR_PTR(ret);
  153. goto reg_phys_mr_exit0;
  154. }
  155. if ((size == 0) ||
  156. (((u64)iova_start + size) < (u64)iova_start)) {
  157. ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
  158. size, iova_start);
  159. ib_mr = ERR_PTR(-EINVAL);
  160. goto reg_phys_mr_exit0;
  161. }
  162. e_mr = ehca_mr_new();
  163. if (!e_mr) {
  164. ehca_err(pd->device, "out of memory");
  165. ib_mr = ERR_PTR(-ENOMEM);
  166. goto reg_phys_mr_exit0;
  167. }
  168. /* determine number of MR pages */
  169. num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
  170. PAGE_SIZE - 1) / PAGE_SIZE);
  171. num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
  172. EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
  173. /* register MR on HCA */
  174. if (ehca_mr_is_maxmr(size, iova_start)) {
  175. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  176. ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
  177. e_pd, &e_mr->ib.ib_mr.lkey,
  178. &e_mr->ib.ib_mr.rkey);
  179. if (ret) {
  180. ib_mr = ERR_PTR(ret);
  181. goto reg_phys_mr_exit1;
  182. }
  183. } else {
  184. pginfo.type = EHCA_MR_PGI_PHYS;
  185. pginfo.num_pages = num_pages_mr;
  186. pginfo.num_4k = num_pages_4k;
  187. pginfo.num_phys_buf = num_phys_buf;
  188. pginfo.phys_buf_array = phys_buf_array;
  189. pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
  190. EHCA_PAGESIZE);
  191. ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
  192. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  193. &e_mr->ib.ib_mr.rkey);
  194. if (ret) {
  195. ib_mr = ERR_PTR(ret);
  196. goto reg_phys_mr_exit1;
  197. }
  198. }
  199. /* successful registration of all pages */
  200. return &e_mr->ib.ib_mr;
  201. reg_phys_mr_exit1:
  202. ehca_mr_delete(e_mr);
  203. reg_phys_mr_exit0:
  204. if (IS_ERR(ib_mr))
  205. ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
  206. "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
  207. PTR_ERR(ib_mr), pd, phys_buf_array,
  208. num_phys_buf, mr_access_flags, iova_start);
  209. return ib_mr;
  210. } /* end ehca_reg_phys_mr() */
  211. /*----------------------------------------------------------------------*/
  212. struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
  213. struct ib_umem *region,
  214. int mr_access_flags,
  215. struct ib_udata *udata)
  216. {
  217. struct ib_mr *ib_mr;
  218. struct ehca_mr *e_mr;
  219. struct ehca_shca *shca =
  220. container_of(pd->device, struct ehca_shca, ib_device);
  221. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  222. struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
  223. int ret;
  224. u32 num_pages_mr;
  225. u32 num_pages_4k; /* 4k portion "pages" */
  226. if (!pd) {
  227. ehca_gen_err("bad pd=%p", pd);
  228. return ERR_PTR(-EFAULT);
  229. }
  230. if (!region) {
  231. ehca_err(pd->device, "bad input values: region=%p", region);
  232. ib_mr = ERR_PTR(-EINVAL);
  233. goto reg_user_mr_exit0;
  234. }
  235. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  236. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  237. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  238. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  239. /*
  240. * Remote Write Access requires Local Write Access
  241. * Remote Atomic Access requires Local Write Access
  242. */
  243. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  244. mr_access_flags);
  245. ib_mr = ERR_PTR(-EINVAL);
  246. goto reg_user_mr_exit0;
  247. }
  248. if (region->page_size != PAGE_SIZE) {
  249. ehca_err(pd->device, "page size not supported, "
  250. "region->page_size=%x", region->page_size);
  251. ib_mr = ERR_PTR(-EINVAL);
  252. goto reg_user_mr_exit0;
  253. }
  254. if ((region->length == 0) ||
  255. ((region->virt_base + region->length) < region->virt_base)) {
  256. ehca_err(pd->device, "bad input values: length=%lx "
  257. "virt_base=%lx", region->length, region->virt_base);
  258. ib_mr = ERR_PTR(-EINVAL);
  259. goto reg_user_mr_exit0;
  260. }
  261. e_mr = ehca_mr_new();
  262. if (!e_mr) {
  263. ehca_err(pd->device, "out of memory");
  264. ib_mr = ERR_PTR(-ENOMEM);
  265. goto reg_user_mr_exit0;
  266. }
  267. /* determine number of MR pages */
  268. num_pages_mr = (((region->virt_base % PAGE_SIZE) + region->length +
  269. PAGE_SIZE - 1) / PAGE_SIZE);
  270. num_pages_4k = (((region->virt_base % EHCA_PAGESIZE) + region->length +
  271. EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
  272. /* register MR on HCA */
  273. pginfo.type = EHCA_MR_PGI_USER;
  274. pginfo.num_pages = num_pages_mr;
  275. pginfo.num_4k = num_pages_4k;
  276. pginfo.region = region;
  277. pginfo.next_4k = region->offset / EHCA_PAGESIZE;
  278. pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
  279. (&region->chunk_list),
  280. list);
  281. ret = ehca_reg_mr(shca, e_mr, (u64*)region->virt_base,
  282. region->length, mr_access_flags, e_pd, &pginfo,
  283. &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
  284. if (ret) {
  285. ib_mr = ERR_PTR(ret);
  286. goto reg_user_mr_exit1;
  287. }
  288. /* successful registration of all pages */
  289. return &e_mr->ib.ib_mr;
  290. reg_user_mr_exit1:
  291. ehca_mr_delete(e_mr);
  292. reg_user_mr_exit0:
  293. if (IS_ERR(ib_mr))
  294. ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x"
  295. " udata=%p",
  296. PTR_ERR(ib_mr), pd, region, mr_access_flags, udata);
  297. return ib_mr;
  298. } /* end ehca_reg_user_mr() */
  299. /*----------------------------------------------------------------------*/
  300. int ehca_rereg_phys_mr(struct ib_mr *mr,
  301. int mr_rereg_mask,
  302. struct ib_pd *pd,
  303. struct ib_phys_buf *phys_buf_array,
  304. int num_phys_buf,
  305. int mr_access_flags,
  306. u64 *iova_start)
  307. {
  308. int ret;
  309. struct ehca_shca *shca =
  310. container_of(mr->device, struct ehca_shca, ib_device);
  311. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  312. struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  313. u64 new_size;
  314. u64 *new_start;
  315. u32 new_acl;
  316. struct ehca_pd *new_pd;
  317. u32 tmp_lkey, tmp_rkey;
  318. unsigned long sl_flags;
  319. u32 num_pages_mr = 0;
  320. u32 num_pages_4k = 0; /* 4k portion "pages" */
  321. struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
  322. u32 cur_pid = current->tgid;
  323. if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
  324. (my_pd->ownpid != cur_pid)) {
  325. ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
  326. cur_pid, my_pd->ownpid);
  327. ret = -EINVAL;
  328. goto rereg_phys_mr_exit0;
  329. }
  330. if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
  331. /* TODO not supported, because PHYP rereg hCall needs pages */
  332. ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
  333. "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
  334. ret = -EINVAL;
  335. goto rereg_phys_mr_exit0;
  336. }
  337. if (mr_rereg_mask & IB_MR_REREG_PD) {
  338. if (!pd) {
  339. ehca_err(mr->device, "rereg with bad pd, pd=%p "
  340. "mr_rereg_mask=%x", pd, mr_rereg_mask);
  341. ret = -EINVAL;
  342. goto rereg_phys_mr_exit0;
  343. }
  344. }
  345. if ((mr_rereg_mask &
  346. ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
  347. (mr_rereg_mask == 0)) {
  348. ret = -EINVAL;
  349. goto rereg_phys_mr_exit0;
  350. }
  351. /* check other parameters */
  352. if (e_mr == shca->maxmr) {
  353. /* should be impossible, however reject to be sure */
  354. ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
  355. "shca->maxmr=%p mr->lkey=%x",
  356. mr, shca->maxmr, mr->lkey);
  357. ret = -EINVAL;
  358. goto rereg_phys_mr_exit0;
  359. }
  360. if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
  361. if (e_mr->flags & EHCA_MR_FLAG_FMR) {
  362. ehca_err(mr->device, "not supported for FMR, mr=%p "
  363. "flags=%x", mr, e_mr->flags);
  364. ret = -EINVAL;
  365. goto rereg_phys_mr_exit0;
  366. }
  367. if (!phys_buf_array || num_phys_buf <= 0) {
  368. ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
  369. " phys_buf_array=%p num_phys_buf=%x",
  370. mr_rereg_mask, phys_buf_array, num_phys_buf);
  371. ret = -EINVAL;
  372. goto rereg_phys_mr_exit0;
  373. }
  374. }
  375. if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
  376. (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  377. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  378. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  379. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
  380. /*
  381. * Remote Write Access requires Local Write Access
  382. * Remote Atomic Access requires Local Write Access
  383. */
  384. ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
  385. "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
  386. ret = -EINVAL;
  387. goto rereg_phys_mr_exit0;
  388. }
  389. /* set requested values dependent on rereg request */
  390. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  391. new_start = e_mr->start; /* new == old address */
  392. new_size = e_mr->size; /* new == old length */
  393. new_acl = e_mr->acl; /* new == old access control */
  394. new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
  395. if (mr_rereg_mask & IB_MR_REREG_TRANS) {
  396. new_start = iova_start; /* change address */
  397. /* check physical buffer list and calculate size */
  398. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
  399. num_phys_buf, iova_start,
  400. &new_size);
  401. if (ret)
  402. goto rereg_phys_mr_exit1;
  403. if ((new_size == 0) ||
  404. (((u64)iova_start + new_size) < (u64)iova_start)) {
  405. ehca_err(mr->device, "bad input values: new_size=%lx "
  406. "iova_start=%p", new_size, iova_start);
  407. ret = -EINVAL;
  408. goto rereg_phys_mr_exit1;
  409. }
  410. num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
  411. PAGE_SIZE - 1) / PAGE_SIZE);
  412. num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
  413. EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
  414. pginfo.type = EHCA_MR_PGI_PHYS;
  415. pginfo.num_pages = num_pages_mr;
  416. pginfo.num_4k = num_pages_4k;
  417. pginfo.num_phys_buf = num_phys_buf;
  418. pginfo.phys_buf_array = phys_buf_array;
  419. pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
  420. EHCA_PAGESIZE);
  421. }
  422. if (mr_rereg_mask & IB_MR_REREG_ACCESS)
  423. new_acl = mr_access_flags;
  424. if (mr_rereg_mask & IB_MR_REREG_PD)
  425. new_pd = container_of(pd, struct ehca_pd, ib_pd);
  426. ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
  427. new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  428. if (ret)
  429. goto rereg_phys_mr_exit1;
  430. /* successful reregistration */
  431. if (mr_rereg_mask & IB_MR_REREG_PD)
  432. mr->pd = pd;
  433. mr->lkey = tmp_lkey;
  434. mr->rkey = tmp_rkey;
  435. rereg_phys_mr_exit1:
  436. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  437. rereg_phys_mr_exit0:
  438. if (ret)
  439. ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
  440. "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
  441. "iova_start=%p",
  442. ret, mr, mr_rereg_mask, pd, phys_buf_array,
  443. num_phys_buf, mr_access_flags, iova_start);
  444. return ret;
  445. } /* end ehca_rereg_phys_mr() */
  446. /*----------------------------------------------------------------------*/
  447. int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  448. {
  449. int ret = 0;
  450. u64 h_ret;
  451. struct ehca_shca *shca =
  452. container_of(mr->device, struct ehca_shca, ib_device);
  453. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  454. struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  455. u32 cur_pid = current->tgid;
  456. unsigned long sl_flags;
  457. struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
  458. if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
  459. (my_pd->ownpid != cur_pid)) {
  460. ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
  461. cur_pid, my_pd->ownpid);
  462. ret = -EINVAL;
  463. goto query_mr_exit0;
  464. }
  465. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  466. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  467. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  468. ret = -EINVAL;
  469. goto query_mr_exit0;
  470. }
  471. memset(mr_attr, 0, sizeof(struct ib_mr_attr));
  472. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  473. h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
  474. if (h_ret != H_SUCCESS) {
  475. ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
  476. "hca_hndl=%lx mr_hndl=%lx lkey=%x",
  477. h_ret, mr, shca->ipz_hca_handle.handle,
  478. e_mr->ipz_mr_handle.handle, mr->lkey);
  479. ret = ehca_mrmw_map_hrc_query_mr(h_ret);
  480. goto query_mr_exit1;
  481. }
  482. mr_attr->pd = mr->pd;
  483. mr_attr->device_virt_addr = hipzout.vaddr;
  484. mr_attr->size = hipzout.len;
  485. mr_attr->lkey = hipzout.lkey;
  486. mr_attr->rkey = hipzout.rkey;
  487. ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
  488. query_mr_exit1:
  489. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  490. query_mr_exit0:
  491. if (ret)
  492. ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
  493. ret, mr, mr_attr);
  494. return ret;
  495. } /* end ehca_query_mr() */
  496. /*----------------------------------------------------------------------*/
  497. int ehca_dereg_mr(struct ib_mr *mr)
  498. {
  499. int ret = 0;
  500. u64 h_ret;
  501. struct ehca_shca *shca =
  502. container_of(mr->device, struct ehca_shca, ib_device);
  503. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  504. struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  505. u32 cur_pid = current->tgid;
  506. if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
  507. (my_pd->ownpid != cur_pid)) {
  508. ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
  509. cur_pid, my_pd->ownpid);
  510. ret = -EINVAL;
  511. goto dereg_mr_exit0;
  512. }
  513. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  514. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  515. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  516. ret = -EINVAL;
  517. goto dereg_mr_exit0;
  518. } else if (e_mr == shca->maxmr) {
  519. /* should be impossible, however reject to be sure */
  520. ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
  521. "shca->maxmr=%p mr->lkey=%x",
  522. mr, shca->maxmr, mr->lkey);
  523. ret = -EINVAL;
  524. goto dereg_mr_exit0;
  525. }
  526. /* TODO: BUSY: MR still has bound window(s) */
  527. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  528. if (h_ret != H_SUCCESS) {
  529. ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
  530. "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
  531. h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
  532. e_mr->ipz_mr_handle.handle, mr->lkey);
  533. ret = ehca_mrmw_map_hrc_free_mr(h_ret);
  534. goto dereg_mr_exit0;
  535. }
  536. /* successful deregistration */
  537. ehca_mr_delete(e_mr);
  538. dereg_mr_exit0:
  539. if (ret)
  540. ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
  541. return ret;
  542. } /* end ehca_dereg_mr() */
  543. /*----------------------------------------------------------------------*/
  544. struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
  545. {
  546. struct ib_mw *ib_mw;
  547. u64 h_ret;
  548. struct ehca_mw *e_mw;
  549. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  550. struct ehca_shca *shca =
  551. container_of(pd->device, struct ehca_shca, ib_device);
  552. struct ehca_mw_hipzout_parms hipzout = {{0},0};
  553. e_mw = ehca_mw_new();
  554. if (!e_mw) {
  555. ib_mw = ERR_PTR(-ENOMEM);
  556. goto alloc_mw_exit0;
  557. }
  558. h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
  559. e_pd->fw_pd, &hipzout);
  560. if (h_ret != H_SUCCESS) {
  561. ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
  562. "shca=%p hca_hndl=%lx mw=%p",
  563. h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
  564. ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
  565. goto alloc_mw_exit1;
  566. }
  567. /* successful MW allocation */
  568. e_mw->ipz_mw_handle = hipzout.handle;
  569. e_mw->ib_mw.rkey = hipzout.rkey;
  570. return &e_mw->ib_mw;
  571. alloc_mw_exit1:
  572. ehca_mw_delete(e_mw);
  573. alloc_mw_exit0:
  574. if (IS_ERR(ib_mw))
  575. ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
  576. return ib_mw;
  577. } /* end ehca_alloc_mw() */
  578. /*----------------------------------------------------------------------*/
  579. int ehca_bind_mw(struct ib_qp *qp,
  580. struct ib_mw *mw,
  581. struct ib_mw_bind *mw_bind)
  582. {
  583. /* TODO: not supported up to now */
  584. ehca_gen_err("bind MW currently not supported by HCAD");
  585. return -EPERM;
  586. } /* end ehca_bind_mw() */
  587. /*----------------------------------------------------------------------*/
  588. int ehca_dealloc_mw(struct ib_mw *mw)
  589. {
  590. u64 h_ret;
  591. struct ehca_shca *shca =
  592. container_of(mw->device, struct ehca_shca, ib_device);
  593. struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
  594. h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
  595. if (h_ret != H_SUCCESS) {
  596. ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
  597. "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
  598. h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
  599. e_mw->ipz_mw_handle.handle);
  600. return ehca_mrmw_map_hrc_free_mw(h_ret);
  601. }
  602. /* successful deallocation */
  603. ehca_mw_delete(e_mw);
  604. return 0;
  605. } /* end ehca_dealloc_mw() */
  606. /*----------------------------------------------------------------------*/
  607. struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
  608. int mr_access_flags,
  609. struct ib_fmr_attr *fmr_attr)
  610. {
  611. struct ib_fmr *ib_fmr;
  612. struct ehca_shca *shca =
  613. container_of(pd->device, struct ehca_shca, ib_device);
  614. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  615. struct ehca_mr *e_fmr;
  616. int ret;
  617. u32 tmp_lkey, tmp_rkey;
  618. struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
  619. /* check other parameters */
  620. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  621. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  622. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  623. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  624. /*
  625. * Remote Write Access requires Local Write Access
  626. * Remote Atomic Access requires Local Write Access
  627. */
  628. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  629. mr_access_flags);
  630. ib_fmr = ERR_PTR(-EINVAL);
  631. goto alloc_fmr_exit0;
  632. }
  633. if (mr_access_flags & IB_ACCESS_MW_BIND) {
  634. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  635. mr_access_flags);
  636. ib_fmr = ERR_PTR(-EINVAL);
  637. goto alloc_fmr_exit0;
  638. }
  639. if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
  640. ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
  641. "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
  642. fmr_attr->max_pages, fmr_attr->max_maps,
  643. fmr_attr->page_shift);
  644. ib_fmr = ERR_PTR(-EINVAL);
  645. goto alloc_fmr_exit0;
  646. }
  647. if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
  648. ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
  649. ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
  650. fmr_attr->page_shift);
  651. ib_fmr = ERR_PTR(-EINVAL);
  652. goto alloc_fmr_exit0;
  653. }
  654. e_fmr = ehca_mr_new();
  655. if (!e_fmr) {
  656. ib_fmr = ERR_PTR(-ENOMEM);
  657. goto alloc_fmr_exit0;
  658. }
  659. e_fmr->flags |= EHCA_MR_FLAG_FMR;
  660. /* register MR on HCA */
  661. ret = ehca_reg_mr(shca, e_fmr, NULL,
  662. fmr_attr->max_pages * (1 << fmr_attr->page_shift),
  663. mr_access_flags, e_pd, &pginfo,
  664. &tmp_lkey, &tmp_rkey);
  665. if (ret) {
  666. ib_fmr = ERR_PTR(ret);
  667. goto alloc_fmr_exit1;
  668. }
  669. /* successful */
  670. e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
  671. e_fmr->fmr_max_pages = fmr_attr->max_pages;
  672. e_fmr->fmr_max_maps = fmr_attr->max_maps;
  673. e_fmr->fmr_map_cnt = 0;
  674. return &e_fmr->ib.ib_fmr;
  675. alloc_fmr_exit1:
  676. ehca_mr_delete(e_fmr);
  677. alloc_fmr_exit0:
  678. if (IS_ERR(ib_fmr))
  679. ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
  680. "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
  681. mr_access_flags, fmr_attr);
  682. return ib_fmr;
  683. } /* end ehca_alloc_fmr() */
  684. /*----------------------------------------------------------------------*/
  685. int ehca_map_phys_fmr(struct ib_fmr *fmr,
  686. u64 *page_list,
  687. int list_len,
  688. u64 iova)
  689. {
  690. int ret;
  691. struct ehca_shca *shca =
  692. container_of(fmr->device, struct ehca_shca, ib_device);
  693. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  694. struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
  695. struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
  696. u32 tmp_lkey, tmp_rkey;
  697. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  698. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  699. e_fmr, e_fmr->flags);
  700. ret = -EINVAL;
  701. goto map_phys_fmr_exit0;
  702. }
  703. ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
  704. if (ret)
  705. goto map_phys_fmr_exit0;
  706. if (iova % e_fmr->fmr_page_size) {
  707. /* only whole-numbered pages */
  708. ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
  709. iova, e_fmr->fmr_page_size);
  710. ret = -EINVAL;
  711. goto map_phys_fmr_exit0;
  712. }
  713. if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
  714. /* HCAD does not limit the maps, however trace this anyway */
  715. ehca_info(fmr->device, "map limit exceeded, fmr=%p "
  716. "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
  717. fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
  718. }
  719. pginfo.type = EHCA_MR_PGI_FMR;
  720. pginfo.num_pages = list_len;
  721. pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
  722. pginfo.page_list = page_list;
  723. pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) /
  724. EHCA_PAGESIZE);
  725. ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
  726. list_len * e_fmr->fmr_page_size,
  727. e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  728. if (ret)
  729. goto map_phys_fmr_exit0;
  730. /* successful reregistration */
  731. e_fmr->fmr_map_cnt++;
  732. e_fmr->ib.ib_fmr.lkey = tmp_lkey;
  733. e_fmr->ib.ib_fmr.rkey = tmp_rkey;
  734. return 0;
  735. map_phys_fmr_exit0:
  736. if (ret)
  737. ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
  738. "iova=%lx",
  739. ret, fmr, page_list, list_len, iova);
  740. return ret;
  741. } /* end ehca_map_phys_fmr() */
  742. /*----------------------------------------------------------------------*/
  743. int ehca_unmap_fmr(struct list_head *fmr_list)
  744. {
  745. int ret = 0;
  746. struct ib_fmr *ib_fmr;
  747. struct ehca_shca *shca = NULL;
  748. struct ehca_shca *prev_shca;
  749. struct ehca_mr *e_fmr;
  750. u32 num_fmr = 0;
  751. u32 unmap_fmr_cnt = 0;
  752. /* check all FMR belong to same SHCA, and check internal flag */
  753. list_for_each_entry(ib_fmr, fmr_list, list) {
  754. prev_shca = shca;
  755. if (!ib_fmr) {
  756. ehca_gen_err("bad fmr=%p in list", ib_fmr);
  757. ret = -EINVAL;
  758. goto unmap_fmr_exit0;
  759. }
  760. shca = container_of(ib_fmr->device, struct ehca_shca,
  761. ib_device);
  762. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  763. if ((shca != prev_shca) && prev_shca) {
  764. ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
  765. "prev_shca=%p e_fmr=%p",
  766. shca, prev_shca, e_fmr);
  767. ret = -EINVAL;
  768. goto unmap_fmr_exit0;
  769. }
  770. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  771. ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
  772. "e_fmr->flags=%x", e_fmr, e_fmr->flags);
  773. ret = -EINVAL;
  774. goto unmap_fmr_exit0;
  775. }
  776. num_fmr++;
  777. }
  778. /* loop over all FMRs to unmap */
  779. list_for_each_entry(ib_fmr, fmr_list, list) {
  780. unmap_fmr_cnt++;
  781. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  782. shca = container_of(ib_fmr->device, struct ehca_shca,
  783. ib_device);
  784. ret = ehca_unmap_one_fmr(shca, e_fmr);
  785. if (ret) {
  786. /* unmap failed, stop unmapping of rest of FMRs */
  787. ehca_err(&shca->ib_device, "unmap of one FMR failed, "
  788. "stop rest, e_fmr=%p num_fmr=%x "
  789. "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
  790. unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
  791. goto unmap_fmr_exit0;
  792. }
  793. }
  794. unmap_fmr_exit0:
  795. if (ret)
  796. ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
  797. ret, fmr_list, num_fmr, unmap_fmr_cnt);
  798. return ret;
  799. } /* end ehca_unmap_fmr() */
  800. /*----------------------------------------------------------------------*/
  801. int ehca_dealloc_fmr(struct ib_fmr *fmr)
  802. {
  803. int ret;
  804. u64 h_ret;
  805. struct ehca_shca *shca =
  806. container_of(fmr->device, struct ehca_shca, ib_device);
  807. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  808. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  809. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  810. e_fmr, e_fmr->flags);
  811. ret = -EINVAL;
  812. goto free_fmr_exit0;
  813. }
  814. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  815. if (h_ret != H_SUCCESS) {
  816. ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
  817. "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
  818. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  819. e_fmr->ipz_mr_handle.handle, fmr->lkey);
  820. ret = ehca_mrmw_map_hrc_free_mr(h_ret);
  821. goto free_fmr_exit0;
  822. }
  823. /* successful deregistration */
  824. ehca_mr_delete(e_fmr);
  825. return 0;
  826. free_fmr_exit0:
  827. if (ret)
  828. ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
  829. return ret;
  830. } /* end ehca_dealloc_fmr() */
  831. /*----------------------------------------------------------------------*/
  832. int ehca_reg_mr(struct ehca_shca *shca,
  833. struct ehca_mr *e_mr,
  834. u64 *iova_start,
  835. u64 size,
  836. int acl,
  837. struct ehca_pd *e_pd,
  838. struct ehca_mr_pginfo *pginfo,
  839. u32 *lkey, /*OUT*/
  840. u32 *rkey) /*OUT*/
  841. {
  842. int ret;
  843. u64 h_ret;
  844. u32 hipz_acl;
  845. struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
  846. ehca_mrmw_map_acl(acl, &hipz_acl);
  847. ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
  848. if (ehca_use_hp_mr == 1)
  849. hipz_acl |= 0x00000001;
  850. h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
  851. (u64)iova_start, size, hipz_acl,
  852. e_pd->fw_pd, &hipzout);
  853. if (h_ret != H_SUCCESS) {
  854. ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
  855. "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
  856. ret = ehca_mrmw_map_hrc_alloc(h_ret);
  857. goto ehca_reg_mr_exit0;
  858. }
  859. e_mr->ipz_mr_handle = hipzout.handle;
  860. ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
  861. if (ret)
  862. goto ehca_reg_mr_exit1;
  863. /* successful registration */
  864. e_mr->num_pages = pginfo->num_pages;
  865. e_mr->num_4k = pginfo->num_4k;
  866. e_mr->start = iova_start;
  867. e_mr->size = size;
  868. e_mr->acl = acl;
  869. *lkey = hipzout.lkey;
  870. *rkey = hipzout.rkey;
  871. return 0;
  872. ehca_reg_mr_exit1:
  873. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  874. if (h_ret != H_SUCCESS) {
  875. ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
  876. "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
  877. "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
  878. h_ret, shca, e_mr, iova_start, size, acl, e_pd,
  879. hipzout.lkey, pginfo, pginfo->num_pages,
  880. pginfo->num_4k, ret);
  881. ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
  882. "not recoverable");
  883. }
  884. ehca_reg_mr_exit0:
  885. if (ret)
  886. ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
  887. "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
  888. "num_pages=%lx num_4k=%lx",
  889. ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
  890. pginfo->num_pages, pginfo->num_4k);
  891. return ret;
  892. } /* end ehca_reg_mr() */
  893. /*----------------------------------------------------------------------*/
  894. int ehca_reg_mr_rpages(struct ehca_shca *shca,
  895. struct ehca_mr *e_mr,
  896. struct ehca_mr_pginfo *pginfo)
  897. {
  898. int ret = 0;
  899. u64 h_ret;
  900. u32 rnum;
  901. u64 rpage;
  902. u32 i;
  903. u64 *kpage;
  904. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  905. if (!kpage) {
  906. ehca_err(&shca->ib_device, "kpage alloc failed");
  907. ret = -ENOMEM;
  908. goto ehca_reg_mr_rpages_exit0;
  909. }
  910. /* max 512 pages per shot */
  911. for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) {
  912. if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
  913. rnum = pginfo->num_4k % 512; /* last shot */
  914. if (rnum == 0)
  915. rnum = 512; /* last shot is full */
  916. } else
  917. rnum = 512;
  918. if (rnum > 1) {
  919. ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
  920. if (ret) {
  921. ehca_err(&shca->ib_device, "ehca_set_pagebuf "
  922. "bad rc, ret=%x rnum=%x kpage=%p",
  923. ret, rnum, kpage);
  924. ret = -EFAULT;
  925. goto ehca_reg_mr_rpages_exit1;
  926. }
  927. rpage = virt_to_abs(kpage);
  928. if (!rpage) {
  929. ehca_err(&shca->ib_device, "kpage=%p i=%x",
  930. kpage, i);
  931. ret = -EFAULT;
  932. goto ehca_reg_mr_rpages_exit1;
  933. }
  934. } else { /* rnum==1 */
  935. ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
  936. if (ret) {
  937. ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
  938. "bad rc, ret=%x i=%x", ret, i);
  939. ret = -EFAULT;
  940. goto ehca_reg_mr_rpages_exit1;
  941. }
  942. }
  943. h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
  944. 0, /* pagesize 4k */
  945. 0, rpage, rnum);
  946. if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
  947. /*
  948. * check for 'registration complete'==H_SUCCESS
  949. * and for 'page registered'==H_PAGE_REGISTERED
  950. */
  951. if (h_ret != H_SUCCESS) {
  952. ehca_err(&shca->ib_device, "last "
  953. "hipz_reg_rpage_mr failed, h_ret=%lx "
  954. "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
  955. " lkey=%x", h_ret, e_mr, i,
  956. shca->ipz_hca_handle.handle,
  957. e_mr->ipz_mr_handle.handle,
  958. e_mr->ib.ib_mr.lkey);
  959. ret = ehca_mrmw_map_hrc_rrpg_last(h_ret);
  960. break;
  961. } else
  962. ret = 0;
  963. } else if (h_ret != H_PAGE_REGISTERED) {
  964. ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
  965. "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
  966. "mr_hndl=%lx", h_ret, e_mr, i,
  967. e_mr->ib.ib_mr.lkey,
  968. shca->ipz_hca_handle.handle,
  969. e_mr->ipz_mr_handle.handle);
  970. ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret);
  971. break;
  972. } else
  973. ret = 0;
  974. } /* end for(i) */
  975. ehca_reg_mr_rpages_exit1:
  976. ehca_free_fw_ctrlblock(kpage);
  977. ehca_reg_mr_rpages_exit0:
  978. if (ret)
  979. ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
  980. "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
  981. pginfo->num_pages, pginfo->num_4k);
  982. return ret;
  983. } /* end ehca_reg_mr_rpages() */
  984. /*----------------------------------------------------------------------*/
  985. inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
  986. struct ehca_mr *e_mr,
  987. u64 *iova_start,
  988. u64 size,
  989. u32 acl,
  990. struct ehca_pd *e_pd,
  991. struct ehca_mr_pginfo *pginfo,
  992. u32 *lkey, /*OUT*/
  993. u32 *rkey) /*OUT*/
  994. {
  995. int ret;
  996. u64 h_ret;
  997. u32 hipz_acl;
  998. u64 *kpage;
  999. u64 rpage;
  1000. struct ehca_mr_pginfo pginfo_save;
  1001. struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
  1002. ehca_mrmw_map_acl(acl, &hipz_acl);
  1003. ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
  1004. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  1005. if (!kpage) {
  1006. ehca_err(&shca->ib_device, "kpage alloc failed");
  1007. ret = -ENOMEM;
  1008. goto ehca_rereg_mr_rereg1_exit0;
  1009. }
  1010. pginfo_save = *pginfo;
  1011. ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
  1012. if (ret) {
  1013. ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
  1014. "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
  1015. e_mr, pginfo, pginfo->type, pginfo->num_pages,
  1016. pginfo->num_4k,kpage);
  1017. goto ehca_rereg_mr_rereg1_exit1;
  1018. }
  1019. rpage = virt_to_abs(kpage);
  1020. if (!rpage) {
  1021. ehca_err(&shca->ib_device, "kpage=%p", kpage);
  1022. ret = -EFAULT;
  1023. goto ehca_rereg_mr_rereg1_exit1;
  1024. }
  1025. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
  1026. (u64)iova_start, size, hipz_acl,
  1027. e_pd->fw_pd, rpage, &hipzout);
  1028. if (h_ret != H_SUCCESS) {
  1029. /*
  1030. * reregistration unsuccessful, try it again with the 3 hCalls,
  1031. * e.g. this is required in case H_MR_CONDITION
  1032. * (MW bound or MR is shared)
  1033. */
  1034. ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
  1035. "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
  1036. *pginfo = pginfo_save;
  1037. ret = -EAGAIN;
  1038. } else if ((u64*)hipzout.vaddr != iova_start) {
  1039. ehca_err(&shca->ib_device, "PHYP changed iova_start in "
  1040. "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
  1041. "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
  1042. hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
  1043. e_mr->ib.ib_mr.lkey, hipzout.lkey);
  1044. ret = -EFAULT;
  1045. } else {
  1046. /*
  1047. * successful reregistration
  1048. * note: start and start_out are identical for eServer HCAs
  1049. */
  1050. e_mr->num_pages = pginfo->num_pages;
  1051. e_mr->num_4k = pginfo->num_4k;
  1052. e_mr->start = iova_start;
  1053. e_mr->size = size;
  1054. e_mr->acl = acl;
  1055. *lkey = hipzout.lkey;
  1056. *rkey = hipzout.rkey;
  1057. }
  1058. ehca_rereg_mr_rereg1_exit1:
  1059. ehca_free_fw_ctrlblock(kpage);
  1060. ehca_rereg_mr_rereg1_exit0:
  1061. if ( ret && (ret != -EAGAIN) )
  1062. ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
  1063. "pginfo=%p num_pages=%lx num_4k=%lx",
  1064. ret, *lkey, *rkey, pginfo, pginfo->num_pages,
  1065. pginfo->num_4k);
  1066. return ret;
  1067. } /* end ehca_rereg_mr_rereg1() */
  1068. /*----------------------------------------------------------------------*/
  1069. int ehca_rereg_mr(struct ehca_shca *shca,
  1070. struct ehca_mr *e_mr,
  1071. u64 *iova_start,
  1072. u64 size,
  1073. int acl,
  1074. struct ehca_pd *e_pd,
  1075. struct ehca_mr_pginfo *pginfo,
  1076. u32 *lkey,
  1077. u32 *rkey)
  1078. {
  1079. int ret = 0;
  1080. u64 h_ret;
  1081. int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
  1082. int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
  1083. /* first determine reregistration hCall(s) */
  1084. if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
  1085. (pginfo->num_4k > e_mr->num_4k)) {
  1086. ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
  1087. "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
  1088. rereg_1_hcall = 0;
  1089. rereg_3_hcall = 1;
  1090. }
  1091. if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
  1092. rereg_1_hcall = 0;
  1093. rereg_3_hcall = 1;
  1094. e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
  1095. ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
  1096. e_mr);
  1097. }
  1098. if (rereg_1_hcall) {
  1099. ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
  1100. acl, e_pd, pginfo, lkey, rkey);
  1101. if (ret) {
  1102. if (ret == -EAGAIN)
  1103. rereg_3_hcall = 1;
  1104. else
  1105. goto ehca_rereg_mr_exit0;
  1106. }
  1107. }
  1108. if (rereg_3_hcall) {
  1109. struct ehca_mr save_mr;
  1110. /* first deregister old MR */
  1111. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  1112. if (h_ret != H_SUCCESS) {
  1113. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1114. "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
  1115. "mr->lkey=%x",
  1116. h_ret, e_mr, shca->ipz_hca_handle.handle,
  1117. e_mr->ipz_mr_handle.handle,
  1118. e_mr->ib.ib_mr.lkey);
  1119. ret = ehca_mrmw_map_hrc_free_mr(h_ret);
  1120. goto ehca_rereg_mr_exit0;
  1121. }
  1122. /* clean ehca_mr_t, without changing struct ib_mr and lock */
  1123. save_mr = *e_mr;
  1124. ehca_mr_deletenew(e_mr);
  1125. /* set some MR values */
  1126. e_mr->flags = save_mr.flags;
  1127. e_mr->fmr_page_size = save_mr.fmr_page_size;
  1128. e_mr->fmr_max_pages = save_mr.fmr_max_pages;
  1129. e_mr->fmr_max_maps = save_mr.fmr_max_maps;
  1130. e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
  1131. ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
  1132. e_pd, pginfo, lkey, rkey);
  1133. if (ret) {
  1134. u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
  1135. memcpy(&e_mr->flags, &(save_mr.flags),
  1136. sizeof(struct ehca_mr) - offset);
  1137. goto ehca_rereg_mr_exit0;
  1138. }
  1139. }
  1140. ehca_rereg_mr_exit0:
  1141. if (ret)
  1142. ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
  1143. "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
  1144. "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
  1145. "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
  1146. acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
  1147. rereg_1_hcall, rereg_3_hcall);
  1148. return ret;
  1149. } /* end ehca_rereg_mr() */
  1150. /*----------------------------------------------------------------------*/
  1151. int ehca_unmap_one_fmr(struct ehca_shca *shca,
  1152. struct ehca_mr *e_fmr)
  1153. {
  1154. int ret = 0;
  1155. u64 h_ret;
  1156. int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
  1157. int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
  1158. struct ehca_pd *e_pd =
  1159. container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
  1160. struct ehca_mr save_fmr;
  1161. u32 tmp_lkey, tmp_rkey;
  1162. struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
  1163. struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
  1164. /* first check if reregistration hCall can be used for unmap */
  1165. if (e_fmr->fmr_max_pages > 512) {
  1166. rereg_1_hcall = 0;
  1167. rereg_3_hcall = 1;
  1168. }
  1169. if (rereg_1_hcall) {
  1170. /*
  1171. * note: after using rereg hcall with len=0,
  1172. * rereg hcall must be used again for registering pages
  1173. */
  1174. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
  1175. 0, 0, e_pd->fw_pd, 0, &hipzout);
  1176. if (h_ret != H_SUCCESS) {
  1177. /*
  1178. * should not happen, because length checked above,
  1179. * FMRs are not shared and no MW bound to FMRs
  1180. */
  1181. ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
  1182. "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
  1183. "mr_hndl=%lx lkey=%x lkey_out=%x",
  1184. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1185. e_fmr->ipz_mr_handle.handle,
  1186. e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
  1187. rereg_3_hcall = 1;
  1188. } else {
  1189. /* successful reregistration */
  1190. e_fmr->start = NULL;
  1191. e_fmr->size = 0;
  1192. tmp_lkey = hipzout.lkey;
  1193. tmp_rkey = hipzout.rkey;
  1194. }
  1195. }
  1196. if (rereg_3_hcall) {
  1197. struct ehca_mr save_mr;
  1198. /* first free old FMR */
  1199. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  1200. if (h_ret != H_SUCCESS) {
  1201. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1202. "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
  1203. "lkey=%x",
  1204. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1205. e_fmr->ipz_mr_handle.handle,
  1206. e_fmr->ib.ib_fmr.lkey);
  1207. ret = ehca_mrmw_map_hrc_free_mr(h_ret);
  1208. goto ehca_unmap_one_fmr_exit0;
  1209. }
  1210. /* clean ehca_mr_t, without changing lock */
  1211. save_fmr = *e_fmr;
  1212. ehca_mr_deletenew(e_fmr);
  1213. /* set some MR values */
  1214. e_fmr->flags = save_fmr.flags;
  1215. e_fmr->fmr_page_size = save_fmr.fmr_page_size;
  1216. e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
  1217. e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
  1218. e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
  1219. e_fmr->acl = save_fmr.acl;
  1220. pginfo.type = EHCA_MR_PGI_FMR;
  1221. pginfo.num_pages = 0;
  1222. pginfo.num_4k = 0;
  1223. ret = ehca_reg_mr(shca, e_fmr, NULL,
  1224. (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
  1225. e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
  1226. &tmp_rkey);
  1227. if (ret) {
  1228. u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
  1229. memcpy(&e_fmr->flags, &(save_mr.flags),
  1230. sizeof(struct ehca_mr) - offset);
  1231. goto ehca_unmap_one_fmr_exit0;
  1232. }
  1233. }
  1234. ehca_unmap_one_fmr_exit0:
  1235. if (ret)
  1236. ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
  1237. "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
  1238. ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
  1239. rereg_1_hcall, rereg_3_hcall);
  1240. return ret;
  1241. } /* end ehca_unmap_one_fmr() */
  1242. /*----------------------------------------------------------------------*/
  1243. int ehca_reg_smr(struct ehca_shca *shca,
  1244. struct ehca_mr *e_origmr,
  1245. struct ehca_mr *e_newmr,
  1246. u64 *iova_start,
  1247. int acl,
  1248. struct ehca_pd *e_pd,
  1249. u32 *lkey, /*OUT*/
  1250. u32 *rkey) /*OUT*/
  1251. {
  1252. int ret = 0;
  1253. u64 h_ret;
  1254. u32 hipz_acl;
  1255. struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
  1256. ehca_mrmw_map_acl(acl, &hipz_acl);
  1257. ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
  1258. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1259. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1260. &hipzout);
  1261. if (h_ret != H_SUCCESS) {
  1262. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
  1263. "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
  1264. "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
  1265. h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
  1266. shca->ipz_hca_handle.handle,
  1267. e_origmr->ipz_mr_handle.handle,
  1268. e_origmr->ib.ib_mr.lkey);
  1269. ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
  1270. goto ehca_reg_smr_exit0;
  1271. }
  1272. /* successful registration */
  1273. e_newmr->num_pages = e_origmr->num_pages;
  1274. e_newmr->num_4k = e_origmr->num_4k;
  1275. e_newmr->start = iova_start;
  1276. e_newmr->size = e_origmr->size;
  1277. e_newmr->acl = acl;
  1278. e_newmr->ipz_mr_handle = hipzout.handle;
  1279. *lkey = hipzout.lkey;
  1280. *rkey = hipzout.rkey;
  1281. return 0;
  1282. ehca_reg_smr_exit0:
  1283. if (ret)
  1284. ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
  1285. "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
  1286. ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
  1287. return ret;
  1288. } /* end ehca_reg_smr() */
  1289. /*----------------------------------------------------------------------*/
  1290. /* register internal max-MR to internal SHCA */
  1291. int ehca_reg_internal_maxmr(
  1292. struct ehca_shca *shca,
  1293. struct ehca_pd *e_pd,
  1294. struct ehca_mr **e_maxmr) /*OUT*/
  1295. {
  1296. int ret;
  1297. struct ehca_mr *e_mr;
  1298. u64 *iova_start;
  1299. u64 size_maxmr;
  1300. struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
  1301. struct ib_phys_buf ib_pbuf;
  1302. u32 num_pages_mr;
  1303. u32 num_pages_4k; /* 4k portion "pages" */
  1304. e_mr = ehca_mr_new();
  1305. if (!e_mr) {
  1306. ehca_err(&shca->ib_device, "out of memory");
  1307. ret = -ENOMEM;
  1308. goto ehca_reg_internal_maxmr_exit0;
  1309. }
  1310. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  1311. /* register internal max-MR on HCA */
  1312. size_maxmr = (u64)high_memory - PAGE_OFFSET;
  1313. iova_start = (u64*)KERNELBASE;
  1314. ib_pbuf.addr = 0;
  1315. ib_pbuf.size = size_maxmr;
  1316. num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
  1317. PAGE_SIZE - 1) / PAGE_SIZE);
  1318. num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
  1319. EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
  1320. pginfo.type = EHCA_MR_PGI_PHYS;
  1321. pginfo.num_pages = num_pages_mr;
  1322. pginfo.num_4k = num_pages_4k;
  1323. pginfo.num_phys_buf = 1;
  1324. pginfo.phys_buf_array = &ib_pbuf;
  1325. ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
  1326. &pginfo, &e_mr->ib.ib_mr.lkey,
  1327. &e_mr->ib.ib_mr.rkey);
  1328. if (ret) {
  1329. ehca_err(&shca->ib_device, "reg of internal max MR failed, "
  1330. "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
  1331. "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
  1332. num_pages_mr, num_pages_4k);
  1333. goto ehca_reg_internal_maxmr_exit1;
  1334. }
  1335. /* successful registration of all pages */
  1336. e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
  1337. e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
  1338. e_mr->ib.ib_mr.uobject = NULL;
  1339. atomic_inc(&(e_pd->ib_pd.usecnt));
  1340. atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
  1341. *e_maxmr = e_mr;
  1342. return 0;
  1343. ehca_reg_internal_maxmr_exit1:
  1344. ehca_mr_delete(e_mr);
  1345. ehca_reg_internal_maxmr_exit0:
  1346. if (ret)
  1347. ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
  1348. ret, shca, e_pd, e_maxmr);
  1349. return ret;
  1350. } /* end ehca_reg_internal_maxmr() */
  1351. /*----------------------------------------------------------------------*/
  1352. int ehca_reg_maxmr(struct ehca_shca *shca,
  1353. struct ehca_mr *e_newmr,
  1354. u64 *iova_start,
  1355. int acl,
  1356. struct ehca_pd *e_pd,
  1357. u32 *lkey,
  1358. u32 *rkey)
  1359. {
  1360. u64 h_ret;
  1361. struct ehca_mr *e_origmr = shca->maxmr;
  1362. u32 hipz_acl;
  1363. struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
  1364. ehca_mrmw_map_acl(acl, &hipz_acl);
  1365. ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
  1366. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1367. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1368. &hipzout);
  1369. if (h_ret != H_SUCCESS) {
  1370. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
  1371. "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
  1372. h_ret, e_origmr, shca->ipz_hca_handle.handle,
  1373. e_origmr->ipz_mr_handle.handle,
  1374. e_origmr->ib.ib_mr.lkey);
  1375. return ehca_mrmw_map_hrc_reg_smr(h_ret);
  1376. }
  1377. /* successful registration */
  1378. e_newmr->num_pages = e_origmr->num_pages;
  1379. e_newmr->num_4k = e_origmr->num_4k;
  1380. e_newmr->start = iova_start;
  1381. e_newmr->size = e_origmr->size;
  1382. e_newmr->acl = acl;
  1383. e_newmr->ipz_mr_handle = hipzout.handle;
  1384. *lkey = hipzout.lkey;
  1385. *rkey = hipzout.rkey;
  1386. return 0;
  1387. } /* end ehca_reg_maxmr() */
  1388. /*----------------------------------------------------------------------*/
  1389. int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
  1390. {
  1391. int ret;
  1392. struct ehca_mr *e_maxmr;
  1393. struct ib_pd *ib_pd;
  1394. if (!shca->maxmr) {
  1395. ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
  1396. ret = -EINVAL;
  1397. goto ehca_dereg_internal_maxmr_exit0;
  1398. }
  1399. e_maxmr = shca->maxmr;
  1400. ib_pd = e_maxmr->ib.ib_mr.pd;
  1401. shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
  1402. ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
  1403. if (ret) {
  1404. ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
  1405. "ret=%x e_maxmr=%p shca=%p lkey=%x",
  1406. ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
  1407. shca->maxmr = e_maxmr;
  1408. goto ehca_dereg_internal_maxmr_exit0;
  1409. }
  1410. atomic_dec(&ib_pd->usecnt);
  1411. ehca_dereg_internal_maxmr_exit0:
  1412. if (ret)
  1413. ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
  1414. ret, shca, shca->maxmr);
  1415. return ret;
  1416. } /* end ehca_dereg_internal_maxmr() */
  1417. /*----------------------------------------------------------------------*/
  1418. /*
  1419. * check physical buffer array of MR verbs for validness and
  1420. * calculates MR size
  1421. */
  1422. int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
  1423. int num_phys_buf,
  1424. u64 *iova_start,
  1425. u64 *size)
  1426. {
  1427. struct ib_phys_buf *pbuf = phys_buf_array;
  1428. u64 size_count = 0;
  1429. u32 i;
  1430. if (num_phys_buf == 0) {
  1431. ehca_gen_err("bad phys buf array len, num_phys_buf=0");
  1432. return -EINVAL;
  1433. }
  1434. /* check first buffer */
  1435. if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
  1436. ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
  1437. "pbuf->addr=%lx pbuf->size=%lx",
  1438. iova_start, pbuf->addr, pbuf->size);
  1439. return -EINVAL;
  1440. }
  1441. if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
  1442. (num_phys_buf > 1)) {
  1443. ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
  1444. "pbuf->size=%lx", pbuf->addr, pbuf->size);
  1445. return -EINVAL;
  1446. }
  1447. for (i = 0; i < num_phys_buf; i++) {
  1448. if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
  1449. ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
  1450. "pbuf->size=%lx",
  1451. i, pbuf->addr, pbuf->size);
  1452. return -EINVAL;
  1453. }
  1454. if (((i > 0) && /* not 1st */
  1455. (i < (num_phys_buf - 1)) && /* not last */
  1456. (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
  1457. ehca_gen_err("bad size, i=%x pbuf->size=%lx",
  1458. i, pbuf->size);
  1459. return -EINVAL;
  1460. }
  1461. size_count += pbuf->size;
  1462. pbuf++;
  1463. }
  1464. *size = size_count;
  1465. return 0;
  1466. } /* end ehca_mr_chk_buf_and_calc_size() */
  1467. /*----------------------------------------------------------------------*/
  1468. /* check page list of map FMR verb for validness */
  1469. int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
  1470. u64 *page_list,
  1471. int list_len)
  1472. {
  1473. u32 i;
  1474. u64 *page;
  1475. if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
  1476. ehca_gen_err("bad list_len, list_len=%x "
  1477. "e_fmr->fmr_max_pages=%x fmr=%p",
  1478. list_len, e_fmr->fmr_max_pages, e_fmr);
  1479. return -EINVAL;
  1480. }
  1481. /* each page must be aligned */
  1482. page = page_list;
  1483. for (i = 0; i < list_len; i++) {
  1484. if (*page % e_fmr->fmr_page_size) {
  1485. ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
  1486. "fmr_page_size=%x", i, *page, page, e_fmr,
  1487. e_fmr->fmr_page_size);
  1488. return -EINVAL;
  1489. }
  1490. page++;
  1491. }
  1492. return 0;
  1493. } /* end ehca_fmr_check_page_list() */
  1494. /*----------------------------------------------------------------------*/
  1495. /* setup page buffer from page info */
  1496. int ehca_set_pagebuf(struct ehca_mr *e_mr,
  1497. struct ehca_mr_pginfo *pginfo,
  1498. u32 number,
  1499. u64 *kpage)
  1500. {
  1501. int ret = 0;
  1502. struct ib_umem_chunk *prev_chunk;
  1503. struct ib_umem_chunk *chunk;
  1504. struct ib_phys_buf *pbuf;
  1505. u64 *fmrlist;
  1506. u64 num4k, pgaddr, offs4k;
  1507. u32 i = 0;
  1508. u32 j = 0;
  1509. if (pginfo->type == EHCA_MR_PGI_PHYS) {
  1510. /* loop over desired phys_buf_array entries */
  1511. while (i < number) {
  1512. pbuf = pginfo->phys_buf_array + pginfo->next_buf;
  1513. num4k = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
  1514. EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
  1515. offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
  1516. while (pginfo->next_4k < offs4k + num4k) {
  1517. /* sanity check */
  1518. if ((pginfo->page_cnt >= pginfo->num_pages) ||
  1519. (pginfo->page_4k_cnt >= pginfo->num_4k)) {
  1520. ehca_gen_err("page_cnt >= num_pages, "
  1521. "page_cnt=%lx "
  1522. "num_pages=%lx "
  1523. "page_4k_cnt=%lx "
  1524. "num_4k=%lx i=%x",
  1525. pginfo->page_cnt,
  1526. pginfo->num_pages,
  1527. pginfo->page_4k_cnt,
  1528. pginfo->num_4k, i);
  1529. ret = -EFAULT;
  1530. goto ehca_set_pagebuf_exit0;
  1531. }
  1532. *kpage = phys_to_abs(
  1533. (pbuf->addr & EHCA_PAGEMASK)
  1534. + (pginfo->next_4k * EHCA_PAGESIZE));
  1535. if ( !(*kpage) && pbuf->addr ) {
  1536. ehca_gen_err("pbuf->addr=%lx "
  1537. "pbuf->size=%lx "
  1538. "next_4k=%lx", pbuf->addr,
  1539. pbuf->size,
  1540. pginfo->next_4k);
  1541. ret = -EFAULT;
  1542. goto ehca_set_pagebuf_exit0;
  1543. }
  1544. (pginfo->page_4k_cnt)++;
  1545. (pginfo->next_4k)++;
  1546. if (pginfo->next_4k %
  1547. (PAGE_SIZE / EHCA_PAGESIZE) == 0)
  1548. (pginfo->page_cnt)++;
  1549. kpage++;
  1550. i++;
  1551. if (i >= number) break;
  1552. }
  1553. if (pginfo->next_4k >= offs4k + num4k) {
  1554. (pginfo->next_buf)++;
  1555. pginfo->next_4k = 0;
  1556. }
  1557. }
  1558. } else if (pginfo->type == EHCA_MR_PGI_USER) {
  1559. /* loop over desired chunk entries */
  1560. chunk = pginfo->next_chunk;
  1561. prev_chunk = pginfo->next_chunk;
  1562. list_for_each_entry_continue(chunk,
  1563. (&(pginfo->region->chunk_list)),
  1564. list) {
  1565. for (i = pginfo->next_nmap; i < chunk->nmap; ) {
  1566. pgaddr = ( page_to_pfn(chunk->page_list[i].page)
  1567. << PAGE_SHIFT );
  1568. *kpage = phys_to_abs(pgaddr +
  1569. (pginfo->next_4k *
  1570. EHCA_PAGESIZE));
  1571. if ( !(*kpage) ) {
  1572. ehca_gen_err("pgaddr=%lx "
  1573. "chunk->page_list[i]=%lx "
  1574. "i=%x next_4k=%lx mr=%p",
  1575. pgaddr,
  1576. (u64)sg_dma_address(
  1577. &chunk->
  1578. page_list[i]),
  1579. i, pginfo->next_4k, e_mr);
  1580. ret = -EFAULT;
  1581. goto ehca_set_pagebuf_exit0;
  1582. }
  1583. (pginfo->page_4k_cnt)++;
  1584. (pginfo->next_4k)++;
  1585. kpage++;
  1586. if (pginfo->next_4k %
  1587. (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
  1588. (pginfo->page_cnt)++;
  1589. (pginfo->next_nmap)++;
  1590. pginfo->next_4k = 0;
  1591. i++;
  1592. }
  1593. j++;
  1594. if (j >= number) break;
  1595. }
  1596. if ((pginfo->next_nmap >= chunk->nmap) &&
  1597. (j >= number)) {
  1598. pginfo->next_nmap = 0;
  1599. prev_chunk = chunk;
  1600. break;
  1601. } else if (pginfo->next_nmap >= chunk->nmap) {
  1602. pginfo->next_nmap = 0;
  1603. prev_chunk = chunk;
  1604. } else if (j >= number)
  1605. break;
  1606. else
  1607. prev_chunk = chunk;
  1608. }
  1609. pginfo->next_chunk =
  1610. list_prepare_entry(prev_chunk,
  1611. (&(pginfo->region->chunk_list)),
  1612. list);
  1613. } else if (pginfo->type == EHCA_MR_PGI_FMR) {
  1614. /* loop over desired page_list entries */
  1615. fmrlist = pginfo->page_list + pginfo->next_listelem;
  1616. for (i = 0; i < number; i++) {
  1617. *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
  1618. pginfo->next_4k * EHCA_PAGESIZE);
  1619. if ( !(*kpage) ) {
  1620. ehca_gen_err("*fmrlist=%lx fmrlist=%p "
  1621. "next_listelem=%lx next_4k=%lx",
  1622. *fmrlist, fmrlist,
  1623. pginfo->next_listelem,
  1624. pginfo->next_4k);
  1625. ret = -EFAULT;
  1626. goto ehca_set_pagebuf_exit0;
  1627. }
  1628. (pginfo->page_4k_cnt)++;
  1629. (pginfo->next_4k)++;
  1630. kpage++;
  1631. if (pginfo->next_4k %
  1632. (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
  1633. (pginfo->page_cnt)++;
  1634. (pginfo->next_listelem)++;
  1635. fmrlist++;
  1636. pginfo->next_4k = 0;
  1637. }
  1638. }
  1639. } else {
  1640. ehca_gen_err("bad pginfo->type=%x", pginfo->type);
  1641. ret = -EFAULT;
  1642. goto ehca_set_pagebuf_exit0;
  1643. }
  1644. ehca_set_pagebuf_exit0:
  1645. if (ret)
  1646. ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
  1647. "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
  1648. "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
  1649. "next_listelem=%lx region=%p next_chunk=%p "
  1650. "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
  1651. pginfo->num_pages, pginfo->num_4k,
  1652. pginfo->next_buf, pginfo->next_4k, number, kpage,
  1653. pginfo->page_cnt, pginfo->page_4k_cnt, i,
  1654. pginfo->next_listelem, pginfo->region,
  1655. pginfo->next_chunk, pginfo->next_nmap);
  1656. return ret;
  1657. } /* end ehca_set_pagebuf() */
  1658. /*----------------------------------------------------------------------*/
  1659. /* setup 1 page from page info page buffer */
  1660. int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
  1661. struct ehca_mr_pginfo *pginfo,
  1662. u64 *rpage)
  1663. {
  1664. int ret = 0;
  1665. struct ib_phys_buf *tmp_pbuf;
  1666. u64 *fmrlist;
  1667. struct ib_umem_chunk *chunk;
  1668. struct ib_umem_chunk *prev_chunk;
  1669. u64 pgaddr, num4k, offs4k;
  1670. if (pginfo->type == EHCA_MR_PGI_PHYS) {
  1671. /* sanity check */
  1672. if ((pginfo->page_cnt >= pginfo->num_pages) ||
  1673. (pginfo->page_4k_cnt >= pginfo->num_4k)) {
  1674. ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
  1675. "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
  1676. pginfo->page_cnt, pginfo->num_pages,
  1677. pginfo->page_4k_cnt, pginfo->num_4k);
  1678. ret = -EFAULT;
  1679. goto ehca_set_pagebuf_1_exit0;
  1680. }
  1681. tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
  1682. num4k = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
  1683. EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
  1684. offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
  1685. *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
  1686. (pginfo->next_4k * EHCA_PAGESIZE));
  1687. if ( !(*rpage) && tmp_pbuf->addr ) {
  1688. ehca_gen_err("tmp_pbuf->addr=%lx"
  1689. " tmp_pbuf->size=%lx next_4k=%lx",
  1690. tmp_pbuf->addr, tmp_pbuf->size,
  1691. pginfo->next_4k);
  1692. ret = -EFAULT;
  1693. goto ehca_set_pagebuf_1_exit0;
  1694. }
  1695. (pginfo->page_4k_cnt)++;
  1696. (pginfo->next_4k)++;
  1697. if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
  1698. (pginfo->page_cnt)++;
  1699. if (pginfo->next_4k >= offs4k + num4k) {
  1700. (pginfo->next_buf)++;
  1701. pginfo->next_4k = 0;
  1702. }
  1703. } else if (pginfo->type == EHCA_MR_PGI_USER) {
  1704. chunk = pginfo->next_chunk;
  1705. prev_chunk = pginfo->next_chunk;
  1706. list_for_each_entry_continue(chunk,
  1707. (&(pginfo->region->chunk_list)),
  1708. list) {
  1709. pgaddr = ( page_to_pfn(chunk->page_list[
  1710. pginfo->next_nmap].page)
  1711. << PAGE_SHIFT);
  1712. *rpage = phys_to_abs(pgaddr +
  1713. (pginfo->next_4k * EHCA_PAGESIZE));
  1714. if ( !(*rpage) ) {
  1715. ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
  1716. " next_nmap=%lx next_4k=%lx mr=%p",
  1717. pgaddr, (u64)sg_dma_address(
  1718. &chunk->page_list[
  1719. pginfo->
  1720. next_nmap]),
  1721. pginfo->next_nmap, pginfo->next_4k,
  1722. e_mr);
  1723. ret = -EFAULT;
  1724. goto ehca_set_pagebuf_1_exit0;
  1725. }
  1726. (pginfo->page_4k_cnt)++;
  1727. (pginfo->next_4k)++;
  1728. if (pginfo->next_4k %
  1729. (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
  1730. (pginfo->page_cnt)++;
  1731. (pginfo->next_nmap)++;
  1732. pginfo->next_4k = 0;
  1733. }
  1734. if (pginfo->next_nmap >= chunk->nmap) {
  1735. pginfo->next_nmap = 0;
  1736. prev_chunk = chunk;
  1737. }
  1738. break;
  1739. }
  1740. pginfo->next_chunk =
  1741. list_prepare_entry(prev_chunk,
  1742. (&(pginfo->region->chunk_list)),
  1743. list);
  1744. } else if (pginfo->type == EHCA_MR_PGI_FMR) {
  1745. fmrlist = pginfo->page_list + pginfo->next_listelem;
  1746. *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
  1747. pginfo->next_4k * EHCA_PAGESIZE);
  1748. if ( !(*rpage) ) {
  1749. ehca_gen_err("*fmrlist=%lx fmrlist=%p "
  1750. "next_listelem=%lx next_4k=%lx",
  1751. *fmrlist, fmrlist, pginfo->next_listelem,
  1752. pginfo->next_4k);
  1753. ret = -EFAULT;
  1754. goto ehca_set_pagebuf_1_exit0;
  1755. }
  1756. (pginfo->page_4k_cnt)++;
  1757. (pginfo->next_4k)++;
  1758. if (pginfo->next_4k %
  1759. (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
  1760. (pginfo->page_cnt)++;
  1761. (pginfo->next_listelem)++;
  1762. pginfo->next_4k = 0;
  1763. }
  1764. } else {
  1765. ehca_gen_err("bad pginfo->type=%x", pginfo->type);
  1766. ret = -EFAULT;
  1767. goto ehca_set_pagebuf_1_exit0;
  1768. }
  1769. ehca_set_pagebuf_1_exit0:
  1770. if (ret)
  1771. ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
  1772. "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
  1773. "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
  1774. "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
  1775. pginfo, pginfo->type, pginfo->num_pages,
  1776. pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
  1777. rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
  1778. pginfo->next_listelem, pginfo->region,
  1779. pginfo->next_chunk, pginfo->next_nmap);
  1780. return ret;
  1781. } /* end ehca_set_pagebuf_1() */
  1782. /*----------------------------------------------------------------------*/
  1783. /*
  1784. * check MR if it is a max-MR, i.e. uses whole memory
  1785. * in case it's a max-MR 1 is returned, else 0
  1786. */
  1787. int ehca_mr_is_maxmr(u64 size,
  1788. u64 *iova_start)
  1789. {
  1790. /* a MR is treated as max-MR only if it fits following: */
  1791. if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
  1792. (iova_start == (void*)KERNELBASE)) {
  1793. ehca_gen_dbg("this is a max-MR");
  1794. return 1;
  1795. } else
  1796. return 0;
  1797. } /* end ehca_mr_is_maxmr() */
  1798. /*----------------------------------------------------------------------*/
  1799. /* map access control for MR/MW. This routine is used for MR and MW. */
  1800. void ehca_mrmw_map_acl(int ib_acl,
  1801. u32 *hipz_acl)
  1802. {
  1803. *hipz_acl = 0;
  1804. if (ib_acl & IB_ACCESS_REMOTE_READ)
  1805. *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
  1806. if (ib_acl & IB_ACCESS_REMOTE_WRITE)
  1807. *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
  1808. if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
  1809. *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
  1810. if (ib_acl & IB_ACCESS_LOCAL_WRITE)
  1811. *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
  1812. if (ib_acl & IB_ACCESS_MW_BIND)
  1813. *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
  1814. } /* end ehca_mrmw_map_acl() */
  1815. /*----------------------------------------------------------------------*/
  1816. /* sets page size in hipz access control for MR/MW. */
  1817. void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
  1818. {
  1819. return; /* HCA supports only 4k */
  1820. } /* end ehca_mrmw_set_pgsize_hipz_acl() */
  1821. /*----------------------------------------------------------------------*/
  1822. /*
  1823. * reverse map access control for MR/MW.
  1824. * This routine is used for MR and MW.
  1825. */
  1826. void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
  1827. int *ib_acl) /*OUT*/
  1828. {
  1829. *ib_acl = 0;
  1830. if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
  1831. *ib_acl |= IB_ACCESS_REMOTE_READ;
  1832. if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
  1833. *ib_acl |= IB_ACCESS_REMOTE_WRITE;
  1834. if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
  1835. *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
  1836. if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
  1837. *ib_acl |= IB_ACCESS_LOCAL_WRITE;
  1838. if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
  1839. *ib_acl |= IB_ACCESS_MW_BIND;
  1840. } /* end ehca_mrmw_reverse_map_acl() */
  1841. /*----------------------------------------------------------------------*/
  1842. /*
  1843. * map HIPZ rc to IB retcodes for MR/MW allocations
  1844. * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
  1845. */
  1846. int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
  1847. {
  1848. switch (hipz_rc) {
  1849. case H_SUCCESS: /* successful completion */
  1850. return 0;
  1851. case H_ADAPTER_PARM: /* invalid adapter handle */
  1852. case H_RT_PARM: /* invalid resource type */
  1853. case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
  1854. case H_MLENGTH_PARM: /* invalid memory length */
  1855. case H_MEM_ACCESS_PARM: /* invalid access controls */
  1856. case H_CONSTRAINED: /* resource constraint */
  1857. return -EINVAL;
  1858. case H_BUSY: /* long busy */
  1859. return -EBUSY;
  1860. default:
  1861. return -EINVAL;
  1862. }
  1863. } /* end ehca_mrmw_map_hrc_alloc() */
  1864. /*----------------------------------------------------------------------*/
  1865. /*
  1866. * map HIPZ rc to IB retcodes for MR register rpage
  1867. * Used for hipz_h_register_rpage_mr at registering last page
  1868. */
  1869. int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc)
  1870. {
  1871. switch (hipz_rc) {
  1872. case H_SUCCESS: /* registration complete */
  1873. return 0;
  1874. case H_PAGE_REGISTERED: /* page registered */
  1875. case H_ADAPTER_PARM: /* invalid adapter handle */
  1876. case H_RH_PARM: /* invalid resource handle */
  1877. /* case H_QT_PARM: invalid queue type */
  1878. case H_PARAMETER: /*
  1879. * invalid logical address,
  1880. * or count zero or greater 512
  1881. */
  1882. case H_TABLE_FULL: /* page table full */
  1883. case H_HARDWARE: /* HCA not operational */
  1884. return -EINVAL;
  1885. case H_BUSY: /* long busy */
  1886. return -EBUSY;
  1887. default:
  1888. return -EINVAL;
  1889. }
  1890. } /* end ehca_mrmw_map_hrc_rrpg_last() */
  1891. /*----------------------------------------------------------------------*/
  1892. /*
  1893. * map HIPZ rc to IB retcodes for MR register rpage
  1894. * Used for hipz_h_register_rpage_mr at registering one page, but not last page
  1895. */
  1896. int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc)
  1897. {
  1898. switch (hipz_rc) {
  1899. case H_PAGE_REGISTERED: /* page registered */
  1900. return 0;
  1901. case H_SUCCESS: /* registration complete */
  1902. case H_ADAPTER_PARM: /* invalid adapter handle */
  1903. case H_RH_PARM: /* invalid resource handle */
  1904. /* case H_QT_PARM: invalid queue type */
  1905. case H_PARAMETER: /*
  1906. * invalid logical address,
  1907. * or count zero or greater 512
  1908. */
  1909. case H_TABLE_FULL: /* page table full */
  1910. case H_HARDWARE: /* HCA not operational */
  1911. return -EINVAL;
  1912. case H_BUSY: /* long busy */
  1913. return -EBUSY;
  1914. default:
  1915. return -EINVAL;
  1916. }
  1917. } /* end ehca_mrmw_map_hrc_rrpg_notlast() */
  1918. /*----------------------------------------------------------------------*/
  1919. /* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
  1920. int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc)
  1921. {
  1922. switch (hipz_rc) {
  1923. case H_SUCCESS: /* successful completion */
  1924. return 0;
  1925. case H_ADAPTER_PARM: /* invalid adapter handle */
  1926. case H_RH_PARM: /* invalid resource handle */
  1927. return -EINVAL;
  1928. case H_BUSY: /* long busy */
  1929. return -EBUSY;
  1930. default:
  1931. return -EINVAL;
  1932. }
  1933. } /* end ehca_mrmw_map_hrc_query_mr() */
  1934. /*----------------------------------------------------------------------*/
  1935. /*----------------------------------------------------------------------*/
  1936. /*
  1937. * map HIPZ rc to IB retcodes for freeing MR resource
  1938. * Used for hipz_h_free_resource_mr
  1939. */
  1940. int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc)
  1941. {
  1942. switch (hipz_rc) {
  1943. case H_SUCCESS: /* resource freed */
  1944. return 0;
  1945. case H_ADAPTER_PARM: /* invalid adapter handle */
  1946. case H_RH_PARM: /* invalid resource handle */
  1947. case H_R_STATE: /* invalid resource state */
  1948. case H_HARDWARE: /* HCA not operational */
  1949. return -EINVAL;
  1950. case H_RESOURCE: /* Resource in use */
  1951. case H_BUSY: /* long busy */
  1952. return -EBUSY;
  1953. default:
  1954. return -EINVAL;
  1955. }
  1956. } /* end ehca_mrmw_map_hrc_free_mr() */
  1957. /*----------------------------------------------------------------------*/
  1958. /*
  1959. * map HIPZ rc to IB retcodes for freeing MW resource
  1960. * Used for hipz_h_free_resource_mw
  1961. */
  1962. int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc)
  1963. {
  1964. switch (hipz_rc) {
  1965. case H_SUCCESS: /* resource freed */
  1966. return 0;
  1967. case H_ADAPTER_PARM: /* invalid adapter handle */
  1968. case H_RH_PARM: /* invalid resource handle */
  1969. case H_R_STATE: /* invalid resource state */
  1970. case H_HARDWARE: /* HCA not operational */
  1971. return -EINVAL;
  1972. case H_RESOURCE: /* Resource in use */
  1973. case H_BUSY: /* long busy */
  1974. return -EBUSY;
  1975. default:
  1976. return -EINVAL;
  1977. }
  1978. } /* end ehca_mrmw_map_hrc_free_mw() */
  1979. /*----------------------------------------------------------------------*/
  1980. /*
  1981. * map HIPZ rc to IB retcodes for SMR registrations
  1982. * Used for hipz_h_register_smr.
  1983. */
  1984. int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc)
  1985. {
  1986. switch (hipz_rc) {
  1987. case H_SUCCESS: /* successful completion */
  1988. return 0;
  1989. case H_ADAPTER_PARM: /* invalid adapter handle */
  1990. case H_RH_PARM: /* invalid resource handle */
  1991. case H_MEM_PARM: /* invalid MR virtual address */
  1992. case H_MEM_ACCESS_PARM: /* invalid access controls */
  1993. case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
  1994. return -EINVAL;
  1995. case H_BUSY: /* long busy */
  1996. return -EBUSY;
  1997. default:
  1998. return -EINVAL;
  1999. }
  2000. } /* end ehca_mrmw_map_hrc_reg_smr() */
  2001. /*----------------------------------------------------------------------*/
  2002. /*
  2003. * MR destructor and constructor
  2004. * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
  2005. * except struct ib_mr and spinlock
  2006. */
  2007. void ehca_mr_deletenew(struct ehca_mr *mr)
  2008. {
  2009. mr->flags = 0;
  2010. mr->num_pages = 0;
  2011. mr->num_4k = 0;
  2012. mr->acl = 0;
  2013. mr->start = NULL;
  2014. mr->fmr_page_size = 0;
  2015. mr->fmr_max_pages = 0;
  2016. mr->fmr_max_maps = 0;
  2017. mr->fmr_map_cnt = 0;
  2018. memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
  2019. memset(&mr->galpas, 0, sizeof(mr->galpas));
  2020. mr->nr_of_pages = 0;
  2021. mr->pagearray = NULL;
  2022. } /* end ehca_mr_deletenew() */
  2023. int ehca_init_mrmw_cache(void)
  2024. {
  2025. mr_cache = kmem_cache_create("ehca_cache_mr",
  2026. sizeof(struct ehca_mr), 0,
  2027. SLAB_HWCACHE_ALIGN,
  2028. NULL, NULL);
  2029. if (!mr_cache)
  2030. return -ENOMEM;
  2031. mw_cache = kmem_cache_create("ehca_cache_mw",
  2032. sizeof(struct ehca_mw), 0,
  2033. SLAB_HWCACHE_ALIGN,
  2034. NULL, NULL);
  2035. if (!mw_cache) {
  2036. kmem_cache_destroy(mr_cache);
  2037. mr_cache = NULL;
  2038. return -ENOMEM;
  2039. }
  2040. return 0;
  2041. }
  2042. void ehca_cleanup_mrmw_cache(void)
  2043. {
  2044. if (mr_cache)
  2045. kmem_cache_destroy(mr_cache);
  2046. if (mw_cache)
  2047. kmem_cache_destroy(mw_cache);
  2048. }