ehca_mrmw.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * MR/MW functions
  5. *
  6. * Authors: Dietmar Decker <ddecker@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. *
  9. * Copyright (c) 2005 IBM Corporation
  10. *
  11. * All rights reserved.
  12. *
  13. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  14. * BSD.
  15. *
  16. * OpenIB BSD License
  17. *
  18. * Redistribution and use in source and binary forms, with or without
  19. * modification, are permitted provided that the following conditions are met:
  20. *
  21. * Redistributions of source code must retain the above copyright notice, this
  22. * list of conditions and the following disclaimer.
  23. *
  24. * Redistributions in binary form must reproduce the above copyright notice,
  25. * this list of conditions and the following disclaimer in the documentation
  26. * and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  30. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  31. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  32. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  33. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  34. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  35. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  36. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  37. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  38. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  39. * POSSIBILITY OF SUCH DAMAGE.
  40. */
  41. #include <rdma/ib_umem.h>
  42. #include <asm/current.h>
  43. #include "ehca_iverbs.h"
  44. #include "ehca_mrmw.h"
  45. #include "hcp_if.h"
  46. #include "hipz_hw.h"
  47. #define NUM_CHUNKS(length, chunk_size) \
  48. (((length) + (chunk_size - 1)) / (chunk_size))
  49. /* max number of rpages (per hcall register_rpages) */
  50. #define MAX_RPAGES 512
  51. static struct kmem_cache *mr_cache;
  52. static struct kmem_cache *mw_cache;
  53. static struct ehca_mr *ehca_mr_new(void)
  54. {
  55. struct ehca_mr *me;
  56. me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
  57. if (me)
  58. spin_lock_init(&me->mrlock);
  59. else
  60. ehca_gen_err("alloc failed");
  61. return me;
  62. }
  63. static void ehca_mr_delete(struct ehca_mr *me)
  64. {
  65. kmem_cache_free(mr_cache, me);
  66. }
  67. static struct ehca_mw *ehca_mw_new(void)
  68. {
  69. struct ehca_mw *me;
  70. me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
  71. if (me)
  72. spin_lock_init(&me->mwlock);
  73. else
  74. ehca_gen_err("alloc failed");
  75. return me;
  76. }
  77. static void ehca_mw_delete(struct ehca_mw *me)
  78. {
  79. kmem_cache_free(mw_cache, me);
  80. }
  81. /*----------------------------------------------------------------------*/
  82. struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  83. {
  84. struct ib_mr *ib_mr;
  85. int ret;
  86. struct ehca_mr *e_maxmr;
  87. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  88. struct ehca_shca *shca =
  89. container_of(pd->device, struct ehca_shca, ib_device);
  90. if (shca->maxmr) {
  91. e_maxmr = ehca_mr_new();
  92. if (!e_maxmr) {
  93. ehca_err(&shca->ib_device, "out of memory");
  94. ib_mr = ERR_PTR(-ENOMEM);
  95. goto get_dma_mr_exit0;
  96. }
  97. ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
  98. mr_access_flags, e_pd,
  99. &e_maxmr->ib.ib_mr.lkey,
  100. &e_maxmr->ib.ib_mr.rkey);
  101. if (ret) {
  102. ehca_mr_delete(e_maxmr);
  103. ib_mr = ERR_PTR(ret);
  104. goto get_dma_mr_exit0;
  105. }
  106. ib_mr = &e_maxmr->ib.ib_mr;
  107. } else {
  108. ehca_err(&shca->ib_device, "no internal max-MR exist!");
  109. ib_mr = ERR_PTR(-EINVAL);
  110. goto get_dma_mr_exit0;
  111. }
  112. get_dma_mr_exit0:
  113. if (IS_ERR(ib_mr))
  114. ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
  115. PTR_ERR(ib_mr), pd, mr_access_flags);
  116. return ib_mr;
  117. } /* end ehca_get_dma_mr() */
  118. /*----------------------------------------------------------------------*/
  119. struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
  120. struct ib_phys_buf *phys_buf_array,
  121. int num_phys_buf,
  122. int mr_access_flags,
  123. u64 *iova_start)
  124. {
  125. struct ib_mr *ib_mr;
  126. int ret;
  127. struct ehca_mr *e_mr;
  128. struct ehca_shca *shca =
  129. container_of(pd->device, struct ehca_shca, ib_device);
  130. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  131. u64 size;
  132. if ((num_phys_buf <= 0) || !phys_buf_array) {
  133. ehca_err(pd->device, "bad input values: num_phys_buf=%x "
  134. "phys_buf_array=%p", num_phys_buf, phys_buf_array);
  135. ib_mr = ERR_PTR(-EINVAL);
  136. goto reg_phys_mr_exit0;
  137. }
  138. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  139. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  140. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  141. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  142. /*
  143. * Remote Write Access requires Local Write Access
  144. * Remote Atomic Access requires Local Write Access
  145. */
  146. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  147. mr_access_flags);
  148. ib_mr = ERR_PTR(-EINVAL);
  149. goto reg_phys_mr_exit0;
  150. }
  151. /* check physical buffer list and calculate size */
  152. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
  153. iova_start, &size);
  154. if (ret) {
  155. ib_mr = ERR_PTR(ret);
  156. goto reg_phys_mr_exit0;
  157. }
  158. if ((size == 0) ||
  159. (((u64)iova_start + size) < (u64)iova_start)) {
  160. ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
  161. size, iova_start);
  162. ib_mr = ERR_PTR(-EINVAL);
  163. goto reg_phys_mr_exit0;
  164. }
  165. e_mr = ehca_mr_new();
  166. if (!e_mr) {
  167. ehca_err(pd->device, "out of memory");
  168. ib_mr = ERR_PTR(-ENOMEM);
  169. goto reg_phys_mr_exit0;
  170. }
  171. /* register MR on HCA */
  172. if (ehca_mr_is_maxmr(size, iova_start)) {
  173. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  174. ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
  175. e_pd, &e_mr->ib.ib_mr.lkey,
  176. &e_mr->ib.ib_mr.rkey);
  177. if (ret) {
  178. ib_mr = ERR_PTR(ret);
  179. goto reg_phys_mr_exit1;
  180. }
  181. } else {
  182. struct ehca_mr_pginfo pginfo;
  183. u32 num_kpages;
  184. u32 num_hwpages;
  185. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
  186. PAGE_SIZE);
  187. num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) +
  188. size, EHCA_PAGESIZE);
  189. memset(&pginfo, 0, sizeof(pginfo));
  190. pginfo.type = EHCA_MR_PGI_PHYS;
  191. pginfo.num_kpages = num_kpages;
  192. pginfo.num_hwpages = num_hwpages;
  193. pginfo.u.phy.num_phys_buf = num_phys_buf;
  194. pginfo.u.phy.phys_buf_array = phys_buf_array;
  195. pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
  196. EHCA_PAGESIZE);
  197. ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
  198. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  199. &e_mr->ib.ib_mr.rkey);
  200. if (ret) {
  201. ib_mr = ERR_PTR(ret);
  202. goto reg_phys_mr_exit1;
  203. }
  204. }
  205. /* successful registration of all pages */
  206. return &e_mr->ib.ib_mr;
  207. reg_phys_mr_exit1:
  208. ehca_mr_delete(e_mr);
  209. reg_phys_mr_exit0:
  210. if (IS_ERR(ib_mr))
  211. ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
  212. "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
  213. PTR_ERR(ib_mr), pd, phys_buf_array,
  214. num_phys_buf, mr_access_flags, iova_start);
  215. return ib_mr;
  216. } /* end ehca_reg_phys_mr() */
  217. /*----------------------------------------------------------------------*/
  218. struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  219. u64 virt, int mr_access_flags,
  220. struct ib_udata *udata)
  221. {
  222. struct ib_mr *ib_mr;
  223. struct ehca_mr *e_mr;
  224. struct ehca_shca *shca =
  225. container_of(pd->device, struct ehca_shca, ib_device);
  226. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  227. struct ehca_mr_pginfo pginfo;
  228. int ret;
  229. u32 num_kpages;
  230. u32 num_hwpages;
  231. if (!pd) {
  232. ehca_gen_err("bad pd=%p", pd);
  233. return ERR_PTR(-EFAULT);
  234. }
  235. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  236. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  237. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  238. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  239. /*
  240. * Remote Write Access requires Local Write Access
  241. * Remote Atomic Access requires Local Write Access
  242. */
  243. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  244. mr_access_flags);
  245. ib_mr = ERR_PTR(-EINVAL);
  246. goto reg_user_mr_exit0;
  247. }
  248. if (length == 0 || virt + length < virt) {
  249. ehca_err(pd->device, "bad input values: length=%lx "
  250. "virt_base=%lx", length, virt);
  251. ib_mr = ERR_PTR(-EINVAL);
  252. goto reg_user_mr_exit0;
  253. }
  254. e_mr = ehca_mr_new();
  255. if (!e_mr) {
  256. ehca_err(pd->device, "out of memory");
  257. ib_mr = ERR_PTR(-ENOMEM);
  258. goto reg_user_mr_exit0;
  259. }
  260. e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
  261. mr_access_flags);
  262. if (IS_ERR(e_mr->umem)) {
  263. ib_mr = (void *)e_mr->umem;
  264. goto reg_user_mr_exit1;
  265. }
  266. if (e_mr->umem->page_size != PAGE_SIZE) {
  267. ehca_err(pd->device, "page size not supported, "
  268. "e_mr->umem->page_size=%x", e_mr->umem->page_size);
  269. ib_mr = ERR_PTR(-EINVAL);
  270. goto reg_user_mr_exit2;
  271. }
  272. /* determine number of MR pages */
  273. num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
  274. num_hwpages = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length,
  275. EHCA_PAGESIZE);
  276. /* register MR on HCA */
  277. memset(&pginfo, 0, sizeof(pginfo));
  278. pginfo.type = EHCA_MR_PGI_USER;
  279. pginfo.num_kpages = num_kpages;
  280. pginfo.num_hwpages = num_hwpages;
  281. pginfo.u.usr.region = e_mr->umem;
  282. pginfo.next_hwpage = e_mr->umem->offset / EHCA_PAGESIZE;
  283. pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
  284. (&e_mr->umem->chunk_list),
  285. list);
  286. ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
  287. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  288. &e_mr->ib.ib_mr.rkey);
  289. if (ret) {
  290. ib_mr = ERR_PTR(ret);
  291. goto reg_user_mr_exit2;
  292. }
  293. /* successful registration of all pages */
  294. return &e_mr->ib.ib_mr;
  295. reg_user_mr_exit2:
  296. ib_umem_release(e_mr->umem);
  297. reg_user_mr_exit1:
  298. ehca_mr_delete(e_mr);
  299. reg_user_mr_exit0:
  300. if (IS_ERR(ib_mr))
  301. ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
  302. " udata=%p",
  303. PTR_ERR(ib_mr), pd, mr_access_flags, udata);
  304. return ib_mr;
  305. } /* end ehca_reg_user_mr() */
  306. /*----------------------------------------------------------------------*/
  307. int ehca_rereg_phys_mr(struct ib_mr *mr,
  308. int mr_rereg_mask,
  309. struct ib_pd *pd,
  310. struct ib_phys_buf *phys_buf_array,
  311. int num_phys_buf,
  312. int mr_access_flags,
  313. u64 *iova_start)
  314. {
  315. int ret;
  316. struct ehca_shca *shca =
  317. container_of(mr->device, struct ehca_shca, ib_device);
  318. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  319. struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  320. u64 new_size;
  321. u64 *new_start;
  322. u32 new_acl;
  323. struct ehca_pd *new_pd;
  324. u32 tmp_lkey, tmp_rkey;
  325. unsigned long sl_flags;
  326. u32 num_kpages = 0;
  327. u32 num_hwpages = 0;
  328. struct ehca_mr_pginfo pginfo;
  329. u32 cur_pid = current->tgid;
  330. if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
  331. (my_pd->ownpid != cur_pid)) {
  332. ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
  333. cur_pid, my_pd->ownpid);
  334. ret = -EINVAL;
  335. goto rereg_phys_mr_exit0;
  336. }
  337. if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
  338. /* TODO not supported, because PHYP rereg hCall needs pages */
  339. ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
  340. "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
  341. ret = -EINVAL;
  342. goto rereg_phys_mr_exit0;
  343. }
  344. if (mr_rereg_mask & IB_MR_REREG_PD) {
  345. if (!pd) {
  346. ehca_err(mr->device, "rereg with bad pd, pd=%p "
  347. "mr_rereg_mask=%x", pd, mr_rereg_mask);
  348. ret = -EINVAL;
  349. goto rereg_phys_mr_exit0;
  350. }
  351. }
  352. if ((mr_rereg_mask &
  353. ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
  354. (mr_rereg_mask == 0)) {
  355. ret = -EINVAL;
  356. goto rereg_phys_mr_exit0;
  357. }
  358. /* check other parameters */
  359. if (e_mr == shca->maxmr) {
  360. /* should be impossible, however reject to be sure */
  361. ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
  362. "shca->maxmr=%p mr->lkey=%x",
  363. mr, shca->maxmr, mr->lkey);
  364. ret = -EINVAL;
  365. goto rereg_phys_mr_exit0;
  366. }
  367. if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
  368. if (e_mr->flags & EHCA_MR_FLAG_FMR) {
  369. ehca_err(mr->device, "not supported for FMR, mr=%p "
  370. "flags=%x", mr, e_mr->flags);
  371. ret = -EINVAL;
  372. goto rereg_phys_mr_exit0;
  373. }
  374. if (!phys_buf_array || num_phys_buf <= 0) {
  375. ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
  376. " phys_buf_array=%p num_phys_buf=%x",
  377. mr_rereg_mask, phys_buf_array, num_phys_buf);
  378. ret = -EINVAL;
  379. goto rereg_phys_mr_exit0;
  380. }
  381. }
  382. if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
  383. (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  384. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  385. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  386. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
  387. /*
  388. * Remote Write Access requires Local Write Access
  389. * Remote Atomic Access requires Local Write Access
  390. */
  391. ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
  392. "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
  393. ret = -EINVAL;
  394. goto rereg_phys_mr_exit0;
  395. }
  396. /* set requested values dependent on rereg request */
  397. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  398. new_start = e_mr->start;
  399. new_size = e_mr->size;
  400. new_acl = e_mr->acl;
  401. new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  402. if (mr_rereg_mask & IB_MR_REREG_TRANS) {
  403. new_start = iova_start; /* change address */
  404. /* check physical buffer list and calculate size */
  405. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
  406. num_phys_buf, iova_start,
  407. &new_size);
  408. if (ret)
  409. goto rereg_phys_mr_exit1;
  410. if ((new_size == 0) ||
  411. (((u64)iova_start + new_size) < (u64)iova_start)) {
  412. ehca_err(mr->device, "bad input values: new_size=%lx "
  413. "iova_start=%p", new_size, iova_start);
  414. ret = -EINVAL;
  415. goto rereg_phys_mr_exit1;
  416. }
  417. num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
  418. new_size, PAGE_SIZE);
  419. num_hwpages = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) +
  420. new_size, EHCA_PAGESIZE);
  421. memset(&pginfo, 0, sizeof(pginfo));
  422. pginfo.type = EHCA_MR_PGI_PHYS;
  423. pginfo.num_kpages = num_kpages;
  424. pginfo.num_hwpages = num_hwpages;
  425. pginfo.u.phy.num_phys_buf = num_phys_buf;
  426. pginfo.u.phy.phys_buf_array = phys_buf_array;
  427. pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
  428. EHCA_PAGESIZE);
  429. }
  430. if (mr_rereg_mask & IB_MR_REREG_ACCESS)
  431. new_acl = mr_access_flags;
  432. if (mr_rereg_mask & IB_MR_REREG_PD)
  433. new_pd = container_of(pd, struct ehca_pd, ib_pd);
  434. ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
  435. new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  436. if (ret)
  437. goto rereg_phys_mr_exit1;
  438. /* successful reregistration */
  439. if (mr_rereg_mask & IB_MR_REREG_PD)
  440. mr->pd = pd;
  441. mr->lkey = tmp_lkey;
  442. mr->rkey = tmp_rkey;
  443. rereg_phys_mr_exit1:
  444. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  445. rereg_phys_mr_exit0:
  446. if (ret)
  447. ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
  448. "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
  449. "iova_start=%p",
  450. ret, mr, mr_rereg_mask, pd, phys_buf_array,
  451. num_phys_buf, mr_access_flags, iova_start);
  452. return ret;
  453. } /* end ehca_rereg_phys_mr() */
  454. /*----------------------------------------------------------------------*/
  455. int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  456. {
  457. int ret = 0;
  458. u64 h_ret;
  459. struct ehca_shca *shca =
  460. container_of(mr->device, struct ehca_shca, ib_device);
  461. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  462. struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  463. u32 cur_pid = current->tgid;
  464. unsigned long sl_flags;
  465. struct ehca_mr_hipzout_parms hipzout;
  466. if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
  467. (my_pd->ownpid != cur_pid)) {
  468. ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
  469. cur_pid, my_pd->ownpid);
  470. ret = -EINVAL;
  471. goto query_mr_exit0;
  472. }
  473. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  474. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  475. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  476. ret = -EINVAL;
  477. goto query_mr_exit0;
  478. }
  479. memset(mr_attr, 0, sizeof(struct ib_mr_attr));
  480. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  481. h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
  482. if (h_ret != H_SUCCESS) {
  483. ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
  484. "hca_hndl=%lx mr_hndl=%lx lkey=%x",
  485. h_ret, mr, shca->ipz_hca_handle.handle,
  486. e_mr->ipz_mr_handle.handle, mr->lkey);
  487. ret = ehca2ib_return_code(h_ret);
  488. goto query_mr_exit1;
  489. }
  490. mr_attr->pd = mr->pd;
  491. mr_attr->device_virt_addr = hipzout.vaddr;
  492. mr_attr->size = hipzout.len;
  493. mr_attr->lkey = hipzout.lkey;
  494. mr_attr->rkey = hipzout.rkey;
  495. ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
  496. query_mr_exit1:
  497. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  498. query_mr_exit0:
  499. if (ret)
  500. ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
  501. ret, mr, mr_attr);
  502. return ret;
  503. } /* end ehca_query_mr() */
  504. /*----------------------------------------------------------------------*/
  505. int ehca_dereg_mr(struct ib_mr *mr)
  506. {
  507. int ret = 0;
  508. u64 h_ret;
  509. struct ehca_shca *shca =
  510. container_of(mr->device, struct ehca_shca, ib_device);
  511. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  512. struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  513. u32 cur_pid = current->tgid;
  514. if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
  515. (my_pd->ownpid != cur_pid)) {
  516. ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
  517. cur_pid, my_pd->ownpid);
  518. ret = -EINVAL;
  519. goto dereg_mr_exit0;
  520. }
  521. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  522. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  523. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  524. ret = -EINVAL;
  525. goto dereg_mr_exit0;
  526. } else if (e_mr == shca->maxmr) {
  527. /* should be impossible, however reject to be sure */
  528. ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
  529. "shca->maxmr=%p mr->lkey=%x",
  530. mr, shca->maxmr, mr->lkey);
  531. ret = -EINVAL;
  532. goto dereg_mr_exit0;
  533. }
  534. /* TODO: BUSY: MR still has bound window(s) */
  535. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  536. if (h_ret != H_SUCCESS) {
  537. ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
  538. "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
  539. h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
  540. e_mr->ipz_mr_handle.handle, mr->lkey);
  541. ret = ehca2ib_return_code(h_ret);
  542. goto dereg_mr_exit0;
  543. }
  544. if (e_mr->umem)
  545. ib_umem_release(e_mr->umem);
  546. /* successful deregistration */
  547. ehca_mr_delete(e_mr);
  548. dereg_mr_exit0:
  549. if (ret)
  550. ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
  551. return ret;
  552. } /* end ehca_dereg_mr() */
  553. /*----------------------------------------------------------------------*/
  554. struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
  555. {
  556. struct ib_mw *ib_mw;
  557. u64 h_ret;
  558. struct ehca_mw *e_mw;
  559. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  560. struct ehca_shca *shca =
  561. container_of(pd->device, struct ehca_shca, ib_device);
  562. struct ehca_mw_hipzout_parms hipzout;
  563. e_mw = ehca_mw_new();
  564. if (!e_mw) {
  565. ib_mw = ERR_PTR(-ENOMEM);
  566. goto alloc_mw_exit0;
  567. }
  568. h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
  569. e_pd->fw_pd, &hipzout);
  570. if (h_ret != H_SUCCESS) {
  571. ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
  572. "shca=%p hca_hndl=%lx mw=%p",
  573. h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
  574. ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
  575. goto alloc_mw_exit1;
  576. }
  577. /* successful MW allocation */
  578. e_mw->ipz_mw_handle = hipzout.handle;
  579. e_mw->ib_mw.rkey = hipzout.rkey;
  580. return &e_mw->ib_mw;
  581. alloc_mw_exit1:
  582. ehca_mw_delete(e_mw);
  583. alloc_mw_exit0:
  584. if (IS_ERR(ib_mw))
  585. ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
  586. return ib_mw;
  587. } /* end ehca_alloc_mw() */
  588. /*----------------------------------------------------------------------*/
  589. int ehca_bind_mw(struct ib_qp *qp,
  590. struct ib_mw *mw,
  591. struct ib_mw_bind *mw_bind)
  592. {
  593. /* TODO: not supported up to now */
  594. ehca_gen_err("bind MW currently not supported by HCAD");
  595. return -EPERM;
  596. } /* end ehca_bind_mw() */
  597. /*----------------------------------------------------------------------*/
  598. int ehca_dealloc_mw(struct ib_mw *mw)
  599. {
  600. u64 h_ret;
  601. struct ehca_shca *shca =
  602. container_of(mw->device, struct ehca_shca, ib_device);
  603. struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
  604. h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
  605. if (h_ret != H_SUCCESS) {
  606. ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
  607. "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
  608. h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
  609. e_mw->ipz_mw_handle.handle);
  610. return ehca2ib_return_code(h_ret);
  611. }
  612. /* successful deallocation */
  613. ehca_mw_delete(e_mw);
  614. return 0;
  615. } /* end ehca_dealloc_mw() */
  616. /*----------------------------------------------------------------------*/
  617. struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
  618. int mr_access_flags,
  619. struct ib_fmr_attr *fmr_attr)
  620. {
  621. struct ib_fmr *ib_fmr;
  622. struct ehca_shca *shca =
  623. container_of(pd->device, struct ehca_shca, ib_device);
  624. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  625. struct ehca_mr *e_fmr;
  626. int ret;
  627. u32 tmp_lkey, tmp_rkey;
  628. struct ehca_mr_pginfo pginfo;
  629. /* check other parameters */
  630. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  631. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  632. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  633. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  634. /*
  635. * Remote Write Access requires Local Write Access
  636. * Remote Atomic Access requires Local Write Access
  637. */
  638. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  639. mr_access_flags);
  640. ib_fmr = ERR_PTR(-EINVAL);
  641. goto alloc_fmr_exit0;
  642. }
  643. if (mr_access_flags & IB_ACCESS_MW_BIND) {
  644. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  645. mr_access_flags);
  646. ib_fmr = ERR_PTR(-EINVAL);
  647. goto alloc_fmr_exit0;
  648. }
  649. if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
  650. ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
  651. "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
  652. fmr_attr->max_pages, fmr_attr->max_maps,
  653. fmr_attr->page_shift);
  654. ib_fmr = ERR_PTR(-EINVAL);
  655. goto alloc_fmr_exit0;
  656. }
  657. if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
  658. ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
  659. ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
  660. fmr_attr->page_shift);
  661. ib_fmr = ERR_PTR(-EINVAL);
  662. goto alloc_fmr_exit0;
  663. }
  664. e_fmr = ehca_mr_new();
  665. if (!e_fmr) {
  666. ib_fmr = ERR_PTR(-ENOMEM);
  667. goto alloc_fmr_exit0;
  668. }
  669. e_fmr->flags |= EHCA_MR_FLAG_FMR;
  670. /* register MR on HCA */
  671. memset(&pginfo, 0, sizeof(pginfo));
  672. ret = ehca_reg_mr(shca, e_fmr, NULL,
  673. fmr_attr->max_pages * (1 << fmr_attr->page_shift),
  674. mr_access_flags, e_pd, &pginfo,
  675. &tmp_lkey, &tmp_rkey);
  676. if (ret) {
  677. ib_fmr = ERR_PTR(ret);
  678. goto alloc_fmr_exit1;
  679. }
  680. /* successful */
  681. e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
  682. e_fmr->fmr_max_pages = fmr_attr->max_pages;
  683. e_fmr->fmr_max_maps = fmr_attr->max_maps;
  684. e_fmr->fmr_map_cnt = 0;
  685. return &e_fmr->ib.ib_fmr;
  686. alloc_fmr_exit1:
  687. ehca_mr_delete(e_fmr);
  688. alloc_fmr_exit0:
  689. if (IS_ERR(ib_fmr))
  690. ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
  691. "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
  692. mr_access_flags, fmr_attr);
  693. return ib_fmr;
  694. } /* end ehca_alloc_fmr() */
  695. /*----------------------------------------------------------------------*/
  696. int ehca_map_phys_fmr(struct ib_fmr *fmr,
  697. u64 *page_list,
  698. int list_len,
  699. u64 iova)
  700. {
  701. int ret;
  702. struct ehca_shca *shca =
  703. container_of(fmr->device, struct ehca_shca, ib_device);
  704. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  705. struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
  706. struct ehca_mr_pginfo pginfo;
  707. u32 tmp_lkey, tmp_rkey;
  708. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  709. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  710. e_fmr, e_fmr->flags);
  711. ret = -EINVAL;
  712. goto map_phys_fmr_exit0;
  713. }
  714. ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
  715. if (ret)
  716. goto map_phys_fmr_exit0;
  717. if (iova % e_fmr->fmr_page_size) {
  718. /* only whole-numbered pages */
  719. ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
  720. iova, e_fmr->fmr_page_size);
  721. ret = -EINVAL;
  722. goto map_phys_fmr_exit0;
  723. }
  724. if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
  725. /* HCAD does not limit the maps, however trace this anyway */
  726. ehca_info(fmr->device, "map limit exceeded, fmr=%p "
  727. "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
  728. fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
  729. }
  730. memset(&pginfo, 0, sizeof(pginfo));
  731. pginfo.type = EHCA_MR_PGI_FMR;
  732. pginfo.num_kpages = list_len;
  733. pginfo.num_hwpages = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
  734. pginfo.u.fmr.page_list = page_list;
  735. pginfo.next_hwpage = ((iova & (e_fmr->fmr_page_size-1)) /
  736. EHCA_PAGESIZE);
  737. pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
  738. ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
  739. list_len * e_fmr->fmr_page_size,
  740. e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  741. if (ret)
  742. goto map_phys_fmr_exit0;
  743. /* successful reregistration */
  744. e_fmr->fmr_map_cnt++;
  745. e_fmr->ib.ib_fmr.lkey = tmp_lkey;
  746. e_fmr->ib.ib_fmr.rkey = tmp_rkey;
  747. return 0;
  748. map_phys_fmr_exit0:
  749. if (ret)
  750. ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
  751. "iova=%lx", ret, fmr, page_list, list_len, iova);
  752. return ret;
  753. } /* end ehca_map_phys_fmr() */
  754. /*----------------------------------------------------------------------*/
  755. int ehca_unmap_fmr(struct list_head *fmr_list)
  756. {
  757. int ret = 0;
  758. struct ib_fmr *ib_fmr;
  759. struct ehca_shca *shca = NULL;
  760. struct ehca_shca *prev_shca;
  761. struct ehca_mr *e_fmr;
  762. u32 num_fmr = 0;
  763. u32 unmap_fmr_cnt = 0;
  764. /* check all FMR belong to same SHCA, and check internal flag */
  765. list_for_each_entry(ib_fmr, fmr_list, list) {
  766. prev_shca = shca;
  767. if (!ib_fmr) {
  768. ehca_gen_err("bad fmr=%p in list", ib_fmr);
  769. ret = -EINVAL;
  770. goto unmap_fmr_exit0;
  771. }
  772. shca = container_of(ib_fmr->device, struct ehca_shca,
  773. ib_device);
  774. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  775. if ((shca != prev_shca) && prev_shca) {
  776. ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
  777. "prev_shca=%p e_fmr=%p",
  778. shca, prev_shca, e_fmr);
  779. ret = -EINVAL;
  780. goto unmap_fmr_exit0;
  781. }
  782. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  783. ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
  784. "e_fmr->flags=%x", e_fmr, e_fmr->flags);
  785. ret = -EINVAL;
  786. goto unmap_fmr_exit0;
  787. }
  788. num_fmr++;
  789. }
  790. /* loop over all FMRs to unmap */
  791. list_for_each_entry(ib_fmr, fmr_list, list) {
  792. unmap_fmr_cnt++;
  793. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  794. shca = container_of(ib_fmr->device, struct ehca_shca,
  795. ib_device);
  796. ret = ehca_unmap_one_fmr(shca, e_fmr);
  797. if (ret) {
  798. /* unmap failed, stop unmapping of rest of FMRs */
  799. ehca_err(&shca->ib_device, "unmap of one FMR failed, "
  800. "stop rest, e_fmr=%p num_fmr=%x "
  801. "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
  802. unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
  803. goto unmap_fmr_exit0;
  804. }
  805. }
  806. unmap_fmr_exit0:
  807. if (ret)
  808. ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
  809. ret, fmr_list, num_fmr, unmap_fmr_cnt);
  810. return ret;
  811. } /* end ehca_unmap_fmr() */
  812. /*----------------------------------------------------------------------*/
  813. int ehca_dealloc_fmr(struct ib_fmr *fmr)
  814. {
  815. int ret;
  816. u64 h_ret;
  817. struct ehca_shca *shca =
  818. container_of(fmr->device, struct ehca_shca, ib_device);
  819. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  820. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  821. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  822. e_fmr, e_fmr->flags);
  823. ret = -EINVAL;
  824. goto free_fmr_exit0;
  825. }
  826. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  827. if (h_ret != H_SUCCESS) {
  828. ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
  829. "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
  830. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  831. e_fmr->ipz_mr_handle.handle, fmr->lkey);
  832. ret = ehca2ib_return_code(h_ret);
  833. goto free_fmr_exit0;
  834. }
  835. /* successful deregistration */
  836. ehca_mr_delete(e_fmr);
  837. return 0;
  838. free_fmr_exit0:
  839. if (ret)
  840. ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
  841. return ret;
  842. } /* end ehca_dealloc_fmr() */
  843. /*----------------------------------------------------------------------*/
  844. int ehca_reg_mr(struct ehca_shca *shca,
  845. struct ehca_mr *e_mr,
  846. u64 *iova_start,
  847. u64 size,
  848. int acl,
  849. struct ehca_pd *e_pd,
  850. struct ehca_mr_pginfo *pginfo,
  851. u32 *lkey, /*OUT*/
  852. u32 *rkey) /*OUT*/
  853. {
  854. int ret;
  855. u64 h_ret;
  856. u32 hipz_acl;
  857. struct ehca_mr_hipzout_parms hipzout;
  858. ehca_mrmw_map_acl(acl, &hipz_acl);
  859. ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
  860. if (ehca_use_hp_mr == 1)
  861. hipz_acl |= 0x00000001;
  862. h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
  863. (u64)iova_start, size, hipz_acl,
  864. e_pd->fw_pd, &hipzout);
  865. if (h_ret != H_SUCCESS) {
  866. ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
  867. "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
  868. ret = ehca2ib_return_code(h_ret);
  869. goto ehca_reg_mr_exit0;
  870. }
  871. e_mr->ipz_mr_handle = hipzout.handle;
  872. ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
  873. if (ret)
  874. goto ehca_reg_mr_exit1;
  875. /* successful registration */
  876. e_mr->num_kpages = pginfo->num_kpages;
  877. e_mr->num_hwpages = pginfo->num_hwpages;
  878. e_mr->start = iova_start;
  879. e_mr->size = size;
  880. e_mr->acl = acl;
  881. *lkey = hipzout.lkey;
  882. *rkey = hipzout.rkey;
  883. return 0;
  884. ehca_reg_mr_exit1:
  885. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  886. if (h_ret != H_SUCCESS) {
  887. ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
  888. "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
  889. "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
  890. h_ret, shca, e_mr, iova_start, size, acl, e_pd,
  891. hipzout.lkey, pginfo, pginfo->num_kpages,
  892. pginfo->num_hwpages, ret);
  893. ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
  894. "not recoverable");
  895. }
  896. ehca_reg_mr_exit0:
  897. if (ret)
  898. ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
  899. "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
  900. "num_kpages=%lx num_hwpages=%lx",
  901. ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
  902. pginfo->num_kpages, pginfo->num_hwpages);
  903. return ret;
  904. } /* end ehca_reg_mr() */
  905. /*----------------------------------------------------------------------*/
  906. int ehca_reg_mr_rpages(struct ehca_shca *shca,
  907. struct ehca_mr *e_mr,
  908. struct ehca_mr_pginfo *pginfo)
  909. {
  910. int ret = 0;
  911. u64 h_ret;
  912. u32 rnum;
  913. u64 rpage;
  914. u32 i;
  915. u64 *kpage;
  916. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  917. if (!kpage) {
  918. ehca_err(&shca->ib_device, "kpage alloc failed");
  919. ret = -ENOMEM;
  920. goto ehca_reg_mr_rpages_exit0;
  921. }
  922. /* max 512 pages per shot */
  923. for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
  924. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  925. rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
  926. if (rnum == 0)
  927. rnum = MAX_RPAGES; /* last shot is full */
  928. } else
  929. rnum = MAX_RPAGES;
  930. ret = ehca_set_pagebuf(pginfo, rnum, kpage);
  931. if (ret) {
  932. ehca_err(&shca->ib_device, "ehca_set_pagebuf "
  933. "bad rc, ret=%x rnum=%x kpage=%p",
  934. ret, rnum, kpage);
  935. goto ehca_reg_mr_rpages_exit1;
  936. }
  937. if (rnum > 1) {
  938. rpage = virt_to_abs(kpage);
  939. if (!rpage) {
  940. ehca_err(&shca->ib_device, "kpage=%p i=%x",
  941. kpage, i);
  942. ret = -EFAULT;
  943. goto ehca_reg_mr_rpages_exit1;
  944. }
  945. } else
  946. rpage = *kpage;
  947. h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
  948. 0, /* pagesize 4k */
  949. 0, rpage, rnum);
  950. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  951. /*
  952. * check for 'registration complete'==H_SUCCESS
  953. * and for 'page registered'==H_PAGE_REGISTERED
  954. */
  955. if (h_ret != H_SUCCESS) {
  956. ehca_err(&shca->ib_device, "last "
  957. "hipz_reg_rpage_mr failed, h_ret=%lx "
  958. "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
  959. " lkey=%x", h_ret, e_mr, i,
  960. shca->ipz_hca_handle.handle,
  961. e_mr->ipz_mr_handle.handle,
  962. e_mr->ib.ib_mr.lkey);
  963. ret = ehca2ib_return_code(h_ret);
  964. break;
  965. } else
  966. ret = 0;
  967. } else if (h_ret != H_PAGE_REGISTERED) {
  968. ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
  969. "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
  970. "mr_hndl=%lx", h_ret, e_mr, i,
  971. e_mr->ib.ib_mr.lkey,
  972. shca->ipz_hca_handle.handle,
  973. e_mr->ipz_mr_handle.handle);
  974. ret = ehca2ib_return_code(h_ret);
  975. break;
  976. } else
  977. ret = 0;
  978. } /* end for(i) */
  979. ehca_reg_mr_rpages_exit1:
  980. ehca_free_fw_ctrlblock(kpage);
  981. ehca_reg_mr_rpages_exit0:
  982. if (ret)
  983. ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
  984. "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
  985. pginfo, pginfo->num_kpages, pginfo->num_hwpages);
  986. return ret;
  987. } /* end ehca_reg_mr_rpages() */
  988. /*----------------------------------------------------------------------*/
  989. inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
  990. struct ehca_mr *e_mr,
  991. u64 *iova_start,
  992. u64 size,
  993. u32 acl,
  994. struct ehca_pd *e_pd,
  995. struct ehca_mr_pginfo *pginfo,
  996. u32 *lkey, /*OUT*/
  997. u32 *rkey) /*OUT*/
  998. {
  999. int ret;
  1000. u64 h_ret;
  1001. u32 hipz_acl;
  1002. u64 *kpage;
  1003. u64 rpage;
  1004. struct ehca_mr_pginfo pginfo_save;
  1005. struct ehca_mr_hipzout_parms hipzout;
  1006. ehca_mrmw_map_acl(acl, &hipz_acl);
  1007. ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
  1008. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  1009. if (!kpage) {
  1010. ehca_err(&shca->ib_device, "kpage alloc failed");
  1011. ret = -ENOMEM;
  1012. goto ehca_rereg_mr_rereg1_exit0;
  1013. }
  1014. pginfo_save = *pginfo;
  1015. ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
  1016. if (ret) {
  1017. ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
  1018. "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
  1019. "kpage=%p", e_mr, pginfo, pginfo->type,
  1020. pginfo->num_kpages, pginfo->num_hwpages, kpage);
  1021. goto ehca_rereg_mr_rereg1_exit1;
  1022. }
  1023. rpage = virt_to_abs(kpage);
  1024. if (!rpage) {
  1025. ehca_err(&shca->ib_device, "kpage=%p", kpage);
  1026. ret = -EFAULT;
  1027. goto ehca_rereg_mr_rereg1_exit1;
  1028. }
  1029. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
  1030. (u64)iova_start, size, hipz_acl,
  1031. e_pd->fw_pd, rpage, &hipzout);
  1032. if (h_ret != H_SUCCESS) {
  1033. /*
  1034. * reregistration unsuccessful, try it again with the 3 hCalls,
  1035. * e.g. this is required in case H_MR_CONDITION
  1036. * (MW bound or MR is shared)
  1037. */
  1038. ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
  1039. "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
  1040. *pginfo = pginfo_save;
  1041. ret = -EAGAIN;
  1042. } else if ((u64 *)hipzout.vaddr != iova_start) {
  1043. ehca_err(&shca->ib_device, "PHYP changed iova_start in "
  1044. "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
  1045. "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
  1046. hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
  1047. e_mr->ib.ib_mr.lkey, hipzout.lkey);
  1048. ret = -EFAULT;
  1049. } else {
  1050. /*
  1051. * successful reregistration
  1052. * note: start and start_out are identical for eServer HCAs
  1053. */
  1054. e_mr->num_kpages = pginfo->num_kpages;
  1055. e_mr->num_hwpages = pginfo->num_hwpages;
  1056. e_mr->start = iova_start;
  1057. e_mr->size = size;
  1058. e_mr->acl = acl;
  1059. *lkey = hipzout.lkey;
  1060. *rkey = hipzout.rkey;
  1061. }
  1062. ehca_rereg_mr_rereg1_exit1:
  1063. ehca_free_fw_ctrlblock(kpage);
  1064. ehca_rereg_mr_rereg1_exit0:
  1065. if ( ret && (ret != -EAGAIN) )
  1066. ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
  1067. "pginfo=%p num_kpages=%lx num_hwpages=%lx",
  1068. ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
  1069. pginfo->num_hwpages);
  1070. return ret;
  1071. } /* end ehca_rereg_mr_rereg1() */
  1072. /*----------------------------------------------------------------------*/
  1073. int ehca_rereg_mr(struct ehca_shca *shca,
  1074. struct ehca_mr *e_mr,
  1075. u64 *iova_start,
  1076. u64 size,
  1077. int acl,
  1078. struct ehca_pd *e_pd,
  1079. struct ehca_mr_pginfo *pginfo,
  1080. u32 *lkey,
  1081. u32 *rkey)
  1082. {
  1083. int ret = 0;
  1084. u64 h_ret;
  1085. int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
  1086. int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
  1087. /* first determine reregistration hCall(s) */
  1088. if ((pginfo->num_hwpages > MAX_RPAGES) ||
  1089. (e_mr->num_hwpages > MAX_RPAGES) ||
  1090. (pginfo->num_hwpages > e_mr->num_hwpages)) {
  1091. ehca_dbg(&shca->ib_device, "Rereg3 case, "
  1092. "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
  1093. pginfo->num_hwpages, e_mr->num_hwpages);
  1094. rereg_1_hcall = 0;
  1095. rereg_3_hcall = 1;
  1096. }
  1097. if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
  1098. rereg_1_hcall = 0;
  1099. rereg_3_hcall = 1;
  1100. e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
  1101. ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
  1102. e_mr);
  1103. }
  1104. if (rereg_1_hcall) {
  1105. ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
  1106. acl, e_pd, pginfo, lkey, rkey);
  1107. if (ret) {
  1108. if (ret == -EAGAIN)
  1109. rereg_3_hcall = 1;
  1110. else
  1111. goto ehca_rereg_mr_exit0;
  1112. }
  1113. }
  1114. if (rereg_3_hcall) {
  1115. struct ehca_mr save_mr;
  1116. /* first deregister old MR */
  1117. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  1118. if (h_ret != H_SUCCESS) {
  1119. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1120. "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
  1121. "mr->lkey=%x",
  1122. h_ret, e_mr, shca->ipz_hca_handle.handle,
  1123. e_mr->ipz_mr_handle.handle,
  1124. e_mr->ib.ib_mr.lkey);
  1125. ret = ehca2ib_return_code(h_ret);
  1126. goto ehca_rereg_mr_exit0;
  1127. }
  1128. /* clean ehca_mr_t, without changing struct ib_mr and lock */
  1129. save_mr = *e_mr;
  1130. ehca_mr_deletenew(e_mr);
  1131. /* set some MR values */
  1132. e_mr->flags = save_mr.flags;
  1133. e_mr->fmr_page_size = save_mr.fmr_page_size;
  1134. e_mr->fmr_max_pages = save_mr.fmr_max_pages;
  1135. e_mr->fmr_max_maps = save_mr.fmr_max_maps;
  1136. e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
  1137. ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
  1138. e_pd, pginfo, lkey, rkey);
  1139. if (ret) {
  1140. u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
  1141. memcpy(&e_mr->flags, &(save_mr.flags),
  1142. sizeof(struct ehca_mr) - offset);
  1143. goto ehca_rereg_mr_exit0;
  1144. }
  1145. }
  1146. ehca_rereg_mr_exit0:
  1147. if (ret)
  1148. ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
  1149. "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
  1150. "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
  1151. "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
  1152. acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
  1153. rereg_1_hcall, rereg_3_hcall);
  1154. return ret;
  1155. } /* end ehca_rereg_mr() */
  1156. /*----------------------------------------------------------------------*/
  1157. int ehca_unmap_one_fmr(struct ehca_shca *shca,
  1158. struct ehca_mr *e_fmr)
  1159. {
  1160. int ret = 0;
  1161. u64 h_ret;
  1162. struct ehca_pd *e_pd =
  1163. container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
  1164. struct ehca_mr save_fmr;
  1165. u32 tmp_lkey, tmp_rkey;
  1166. struct ehca_mr_pginfo pginfo;
  1167. struct ehca_mr_hipzout_parms hipzout;
  1168. struct ehca_mr save_mr;
  1169. if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
  1170. /*
  1171. * note: after using rereg hcall with len=0,
  1172. * rereg hcall must be used again for registering pages
  1173. */
  1174. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
  1175. 0, 0, e_pd->fw_pd, 0, &hipzout);
  1176. if (h_ret == H_SUCCESS) {
  1177. /* successful reregistration */
  1178. e_fmr->start = NULL;
  1179. e_fmr->size = 0;
  1180. tmp_lkey = hipzout.lkey;
  1181. tmp_rkey = hipzout.rkey;
  1182. return 0;
  1183. }
  1184. /*
  1185. * should not happen, because length checked above,
  1186. * FMRs are not shared and no MW bound to FMRs
  1187. */
  1188. ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
  1189. "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
  1190. "mr_hndl=%lx lkey=%x lkey_out=%x",
  1191. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1192. e_fmr->ipz_mr_handle.handle,
  1193. e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
  1194. /* try free and rereg */
  1195. }
  1196. /* first free old FMR */
  1197. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  1198. if (h_ret != H_SUCCESS) {
  1199. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1200. "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
  1201. "lkey=%x",
  1202. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1203. e_fmr->ipz_mr_handle.handle,
  1204. e_fmr->ib.ib_fmr.lkey);
  1205. ret = ehca2ib_return_code(h_ret);
  1206. goto ehca_unmap_one_fmr_exit0;
  1207. }
  1208. /* clean ehca_mr_t, without changing lock */
  1209. save_fmr = *e_fmr;
  1210. ehca_mr_deletenew(e_fmr);
  1211. /* set some MR values */
  1212. e_fmr->flags = save_fmr.flags;
  1213. e_fmr->fmr_page_size = save_fmr.fmr_page_size;
  1214. e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
  1215. e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
  1216. e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
  1217. e_fmr->acl = save_fmr.acl;
  1218. memset(&pginfo, 0, sizeof(pginfo));
  1219. pginfo.type = EHCA_MR_PGI_FMR;
  1220. pginfo.num_kpages = 0;
  1221. pginfo.num_hwpages = 0;
  1222. ret = ehca_reg_mr(shca, e_fmr, NULL,
  1223. (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
  1224. e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
  1225. &tmp_rkey);
  1226. if (ret) {
  1227. u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
  1228. memcpy(&e_fmr->flags, &(save_mr.flags),
  1229. sizeof(struct ehca_mr) - offset);
  1230. goto ehca_unmap_one_fmr_exit0;
  1231. }
  1232. ehca_unmap_one_fmr_exit0:
  1233. if (ret)
  1234. ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
  1235. "fmr_max_pages=%x",
  1236. ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
  1237. return ret;
  1238. } /* end ehca_unmap_one_fmr() */
  1239. /*----------------------------------------------------------------------*/
  1240. int ehca_reg_smr(struct ehca_shca *shca,
  1241. struct ehca_mr *e_origmr,
  1242. struct ehca_mr *e_newmr,
  1243. u64 *iova_start,
  1244. int acl,
  1245. struct ehca_pd *e_pd,
  1246. u32 *lkey, /*OUT*/
  1247. u32 *rkey) /*OUT*/
  1248. {
  1249. int ret = 0;
  1250. u64 h_ret;
  1251. u32 hipz_acl;
  1252. struct ehca_mr_hipzout_parms hipzout;
  1253. ehca_mrmw_map_acl(acl, &hipz_acl);
  1254. ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
  1255. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1256. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1257. &hipzout);
  1258. if (h_ret != H_SUCCESS) {
  1259. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
  1260. "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
  1261. "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
  1262. h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
  1263. shca->ipz_hca_handle.handle,
  1264. e_origmr->ipz_mr_handle.handle,
  1265. e_origmr->ib.ib_mr.lkey);
  1266. ret = ehca2ib_return_code(h_ret);
  1267. goto ehca_reg_smr_exit0;
  1268. }
  1269. /* successful registration */
  1270. e_newmr->num_kpages = e_origmr->num_kpages;
  1271. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1272. e_newmr->start = iova_start;
  1273. e_newmr->size = e_origmr->size;
  1274. e_newmr->acl = acl;
  1275. e_newmr->ipz_mr_handle = hipzout.handle;
  1276. *lkey = hipzout.lkey;
  1277. *rkey = hipzout.rkey;
  1278. return 0;
  1279. ehca_reg_smr_exit0:
  1280. if (ret)
  1281. ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
  1282. "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
  1283. ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
  1284. return ret;
  1285. } /* end ehca_reg_smr() */
  1286. /*----------------------------------------------------------------------*/
  1287. /* register internal max-MR to internal SHCA */
  1288. int ehca_reg_internal_maxmr(
  1289. struct ehca_shca *shca,
  1290. struct ehca_pd *e_pd,
  1291. struct ehca_mr **e_maxmr) /*OUT*/
  1292. {
  1293. int ret;
  1294. struct ehca_mr *e_mr;
  1295. u64 *iova_start;
  1296. u64 size_maxmr;
  1297. struct ehca_mr_pginfo pginfo;
  1298. struct ib_phys_buf ib_pbuf;
  1299. u32 num_kpages;
  1300. u32 num_hwpages;
  1301. e_mr = ehca_mr_new();
  1302. if (!e_mr) {
  1303. ehca_err(&shca->ib_device, "out of memory");
  1304. ret = -ENOMEM;
  1305. goto ehca_reg_internal_maxmr_exit0;
  1306. }
  1307. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  1308. /* register internal max-MR on HCA */
  1309. size_maxmr = (u64)high_memory - PAGE_OFFSET;
  1310. iova_start = (u64 *)KERNELBASE;
  1311. ib_pbuf.addr = 0;
  1312. ib_pbuf.size = size_maxmr;
  1313. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
  1314. PAGE_SIZE);
  1315. num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size_maxmr,
  1316. EHCA_PAGESIZE);
  1317. memset(&pginfo, 0, sizeof(pginfo));
  1318. pginfo.type = EHCA_MR_PGI_PHYS;
  1319. pginfo.num_kpages = num_kpages;
  1320. pginfo.num_hwpages = num_hwpages;
  1321. pginfo.u.phy.num_phys_buf = 1;
  1322. pginfo.u.phy.phys_buf_array = &ib_pbuf;
  1323. ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
  1324. &pginfo, &e_mr->ib.ib_mr.lkey,
  1325. &e_mr->ib.ib_mr.rkey);
  1326. if (ret) {
  1327. ehca_err(&shca->ib_device, "reg of internal max MR failed, "
  1328. "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
  1329. "num_hwpages=%x", e_mr, iova_start, size_maxmr,
  1330. num_kpages, num_hwpages);
  1331. goto ehca_reg_internal_maxmr_exit1;
  1332. }
  1333. /* successful registration of all pages */
  1334. e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
  1335. e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
  1336. e_mr->ib.ib_mr.uobject = NULL;
  1337. atomic_inc(&(e_pd->ib_pd.usecnt));
  1338. atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
  1339. *e_maxmr = e_mr;
  1340. return 0;
  1341. ehca_reg_internal_maxmr_exit1:
  1342. ehca_mr_delete(e_mr);
  1343. ehca_reg_internal_maxmr_exit0:
  1344. if (ret)
  1345. ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
  1346. ret, shca, e_pd, e_maxmr);
  1347. return ret;
  1348. } /* end ehca_reg_internal_maxmr() */
  1349. /*----------------------------------------------------------------------*/
  1350. int ehca_reg_maxmr(struct ehca_shca *shca,
  1351. struct ehca_mr *e_newmr,
  1352. u64 *iova_start,
  1353. int acl,
  1354. struct ehca_pd *e_pd,
  1355. u32 *lkey,
  1356. u32 *rkey)
  1357. {
  1358. u64 h_ret;
  1359. struct ehca_mr *e_origmr = shca->maxmr;
  1360. u32 hipz_acl;
  1361. struct ehca_mr_hipzout_parms hipzout;
  1362. ehca_mrmw_map_acl(acl, &hipz_acl);
  1363. ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
  1364. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1365. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1366. &hipzout);
  1367. if (h_ret != H_SUCCESS) {
  1368. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
  1369. "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
  1370. h_ret, e_origmr, shca->ipz_hca_handle.handle,
  1371. e_origmr->ipz_mr_handle.handle,
  1372. e_origmr->ib.ib_mr.lkey);
  1373. return ehca2ib_return_code(h_ret);
  1374. }
  1375. /* successful registration */
  1376. e_newmr->num_kpages = e_origmr->num_kpages;
  1377. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1378. e_newmr->start = iova_start;
  1379. e_newmr->size = e_origmr->size;
  1380. e_newmr->acl = acl;
  1381. e_newmr->ipz_mr_handle = hipzout.handle;
  1382. *lkey = hipzout.lkey;
  1383. *rkey = hipzout.rkey;
  1384. return 0;
  1385. } /* end ehca_reg_maxmr() */
  1386. /*----------------------------------------------------------------------*/
  1387. int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
  1388. {
  1389. int ret;
  1390. struct ehca_mr *e_maxmr;
  1391. struct ib_pd *ib_pd;
  1392. if (!shca->maxmr) {
  1393. ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
  1394. ret = -EINVAL;
  1395. goto ehca_dereg_internal_maxmr_exit0;
  1396. }
  1397. e_maxmr = shca->maxmr;
  1398. ib_pd = e_maxmr->ib.ib_mr.pd;
  1399. shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
  1400. ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
  1401. if (ret) {
  1402. ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
  1403. "ret=%x e_maxmr=%p shca=%p lkey=%x",
  1404. ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
  1405. shca->maxmr = e_maxmr;
  1406. goto ehca_dereg_internal_maxmr_exit0;
  1407. }
  1408. atomic_dec(&ib_pd->usecnt);
  1409. ehca_dereg_internal_maxmr_exit0:
  1410. if (ret)
  1411. ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
  1412. ret, shca, shca->maxmr);
  1413. return ret;
  1414. } /* end ehca_dereg_internal_maxmr() */
  1415. /*----------------------------------------------------------------------*/
  1416. /*
  1417. * check physical buffer array of MR verbs for validness and
  1418. * calculates MR size
  1419. */
  1420. int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
  1421. int num_phys_buf,
  1422. u64 *iova_start,
  1423. u64 *size)
  1424. {
  1425. struct ib_phys_buf *pbuf = phys_buf_array;
  1426. u64 size_count = 0;
  1427. u32 i;
  1428. if (num_phys_buf == 0) {
  1429. ehca_gen_err("bad phys buf array len, num_phys_buf=0");
  1430. return -EINVAL;
  1431. }
  1432. /* check first buffer */
  1433. if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
  1434. ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
  1435. "pbuf->addr=%lx pbuf->size=%lx",
  1436. iova_start, pbuf->addr, pbuf->size);
  1437. return -EINVAL;
  1438. }
  1439. if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
  1440. (num_phys_buf > 1)) {
  1441. ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
  1442. "pbuf->size=%lx", pbuf->addr, pbuf->size);
  1443. return -EINVAL;
  1444. }
  1445. for (i = 0; i < num_phys_buf; i++) {
  1446. if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
  1447. ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
  1448. "pbuf->size=%lx",
  1449. i, pbuf->addr, pbuf->size);
  1450. return -EINVAL;
  1451. }
  1452. if (((i > 0) && /* not 1st */
  1453. (i < (num_phys_buf - 1)) && /* not last */
  1454. (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
  1455. ehca_gen_err("bad size, i=%x pbuf->size=%lx",
  1456. i, pbuf->size);
  1457. return -EINVAL;
  1458. }
  1459. size_count += pbuf->size;
  1460. pbuf++;
  1461. }
  1462. *size = size_count;
  1463. return 0;
  1464. } /* end ehca_mr_chk_buf_and_calc_size() */
  1465. /*----------------------------------------------------------------------*/
  1466. /* check page list of map FMR verb for validness */
  1467. int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
  1468. u64 *page_list,
  1469. int list_len)
  1470. {
  1471. u32 i;
  1472. u64 *page;
  1473. if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
  1474. ehca_gen_err("bad list_len, list_len=%x "
  1475. "e_fmr->fmr_max_pages=%x fmr=%p",
  1476. list_len, e_fmr->fmr_max_pages, e_fmr);
  1477. return -EINVAL;
  1478. }
  1479. /* each page must be aligned */
  1480. page = page_list;
  1481. for (i = 0; i < list_len; i++) {
  1482. if (*page % e_fmr->fmr_page_size) {
  1483. ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
  1484. "fmr_page_size=%x", i, *page, page, e_fmr,
  1485. e_fmr->fmr_page_size);
  1486. return -EINVAL;
  1487. }
  1488. page++;
  1489. }
  1490. return 0;
  1491. } /* end ehca_fmr_check_page_list() */
  1492. /*----------------------------------------------------------------------*/
  1493. /* PAGE_SIZE >= pginfo->hwpage_size */
  1494. static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
  1495. u32 number,
  1496. u64 *kpage)
  1497. {
  1498. int ret = 0;
  1499. struct ib_umem_chunk *prev_chunk;
  1500. struct ib_umem_chunk *chunk;
  1501. u64 pgaddr;
  1502. u32 i = 0;
  1503. u32 j = 0;
  1504. /* loop over desired chunk entries */
  1505. chunk = pginfo->u.usr.next_chunk;
  1506. prev_chunk = pginfo->u.usr.next_chunk;
  1507. list_for_each_entry_continue(
  1508. chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
  1509. for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
  1510. pgaddr = page_to_pfn(chunk->page_list[i].page)
  1511. << PAGE_SHIFT ;
  1512. *kpage = phys_to_abs(pgaddr +
  1513. (pginfo->next_hwpage *
  1514. EHCA_PAGESIZE));
  1515. if ( !(*kpage) ) {
  1516. ehca_gen_err("pgaddr=%lx "
  1517. "chunk->page_list[i]=%lx "
  1518. "i=%x next_hwpage=%lx",
  1519. pgaddr, (u64)sg_dma_address(
  1520. &chunk->page_list[i]),
  1521. i, pginfo->next_hwpage);
  1522. return -EFAULT;
  1523. }
  1524. (pginfo->hwpage_cnt)++;
  1525. (pginfo->next_hwpage)++;
  1526. kpage++;
  1527. if (pginfo->next_hwpage %
  1528. (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
  1529. (pginfo->kpage_cnt)++;
  1530. (pginfo->u.usr.next_nmap)++;
  1531. pginfo->next_hwpage = 0;
  1532. i++;
  1533. }
  1534. j++;
  1535. if (j >= number) break;
  1536. }
  1537. if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
  1538. (j >= number)) {
  1539. pginfo->u.usr.next_nmap = 0;
  1540. prev_chunk = chunk;
  1541. break;
  1542. } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
  1543. pginfo->u.usr.next_nmap = 0;
  1544. prev_chunk = chunk;
  1545. } else if (j >= number)
  1546. break;
  1547. else
  1548. prev_chunk = chunk;
  1549. }
  1550. pginfo->u.usr.next_chunk =
  1551. list_prepare_entry(prev_chunk,
  1552. (&(pginfo->u.usr.region->chunk_list)),
  1553. list);
  1554. return ret;
  1555. }
  1556. int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
  1557. u32 number,
  1558. u64 *kpage)
  1559. {
  1560. int ret = 0;
  1561. struct ib_phys_buf *pbuf;
  1562. u64 num_hw, offs_hw;
  1563. u32 i = 0;
  1564. /* loop over desired phys_buf_array entries */
  1565. while (i < number) {
  1566. pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
  1567. num_hw = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) +
  1568. pbuf->size, EHCA_PAGESIZE);
  1569. offs_hw = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
  1570. while (pginfo->next_hwpage < offs_hw + num_hw) {
  1571. /* sanity check */
  1572. if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
  1573. (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
  1574. ehca_gen_err("kpage_cnt >= num_kpages, "
  1575. "kpage_cnt=%lx num_kpages=%lx "
  1576. "hwpage_cnt=%lx "
  1577. "num_hwpages=%lx i=%x",
  1578. pginfo->kpage_cnt,
  1579. pginfo->num_kpages,
  1580. pginfo->hwpage_cnt,
  1581. pginfo->num_hwpages, i);
  1582. return -EFAULT;
  1583. }
  1584. *kpage = phys_to_abs(
  1585. (pbuf->addr & EHCA_PAGEMASK)
  1586. + (pginfo->next_hwpage * EHCA_PAGESIZE));
  1587. if ( !(*kpage) && pbuf->addr ) {
  1588. ehca_gen_err("pbuf->addr=%lx "
  1589. "pbuf->size=%lx "
  1590. "next_hwpage=%lx", pbuf->addr,
  1591. pbuf->size,
  1592. pginfo->next_hwpage);
  1593. return -EFAULT;
  1594. }
  1595. (pginfo->hwpage_cnt)++;
  1596. (pginfo->next_hwpage)++;
  1597. if (pginfo->next_hwpage %
  1598. (PAGE_SIZE / EHCA_PAGESIZE) == 0)
  1599. (pginfo->kpage_cnt)++;
  1600. kpage++;
  1601. i++;
  1602. if (i >= number) break;
  1603. }
  1604. if (pginfo->next_hwpage >= offs_hw + num_hw) {
  1605. (pginfo->u.phy.next_buf)++;
  1606. pginfo->next_hwpage = 0;
  1607. }
  1608. }
  1609. return ret;
  1610. }
  1611. int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
  1612. u32 number,
  1613. u64 *kpage)
  1614. {
  1615. int ret = 0;
  1616. u64 *fmrlist;
  1617. u32 i;
  1618. /* loop over desired page_list entries */
  1619. fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
  1620. for (i = 0; i < number; i++) {
  1621. *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
  1622. pginfo->next_hwpage * EHCA_PAGESIZE);
  1623. if ( !(*kpage) ) {
  1624. ehca_gen_err("*fmrlist=%lx fmrlist=%p "
  1625. "next_listelem=%lx next_hwpage=%lx",
  1626. *fmrlist, fmrlist,
  1627. pginfo->u.fmr.next_listelem,
  1628. pginfo->next_hwpage);
  1629. return -EFAULT;
  1630. }
  1631. (pginfo->hwpage_cnt)++;
  1632. (pginfo->next_hwpage)++;
  1633. kpage++;
  1634. if (pginfo->next_hwpage %
  1635. (pginfo->u.fmr.fmr_pgsize / EHCA_PAGESIZE) == 0) {
  1636. (pginfo->kpage_cnt)++;
  1637. (pginfo->u.fmr.next_listelem)++;
  1638. fmrlist++;
  1639. pginfo->next_hwpage = 0;
  1640. }
  1641. }
  1642. return ret;
  1643. }
  1644. /* setup page buffer from page info */
  1645. int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
  1646. u32 number,
  1647. u64 *kpage)
  1648. {
  1649. int ret;
  1650. switch (pginfo->type) {
  1651. case EHCA_MR_PGI_PHYS:
  1652. ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
  1653. break;
  1654. case EHCA_MR_PGI_USER:
  1655. ret = ehca_set_pagebuf_user1(pginfo, number, kpage);
  1656. break;
  1657. case EHCA_MR_PGI_FMR:
  1658. ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
  1659. break;
  1660. default:
  1661. ehca_gen_err("bad pginfo->type=%x", pginfo->type);
  1662. ret = -EFAULT;
  1663. break;
  1664. }
  1665. return ret;
  1666. } /* end ehca_set_pagebuf() */
  1667. /*----------------------------------------------------------------------*/
  1668. /*
  1669. * check MR if it is a max-MR, i.e. uses whole memory
  1670. * in case it's a max-MR 1 is returned, else 0
  1671. */
  1672. int ehca_mr_is_maxmr(u64 size,
  1673. u64 *iova_start)
  1674. {
  1675. /* a MR is treated as max-MR only if it fits following: */
  1676. if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
  1677. (iova_start == (void *)KERNELBASE)) {
  1678. ehca_gen_dbg("this is a max-MR");
  1679. return 1;
  1680. } else
  1681. return 0;
  1682. } /* end ehca_mr_is_maxmr() */
  1683. /*----------------------------------------------------------------------*/
  1684. /* map access control for MR/MW. This routine is used for MR and MW. */
  1685. void ehca_mrmw_map_acl(int ib_acl,
  1686. u32 *hipz_acl)
  1687. {
  1688. *hipz_acl = 0;
  1689. if (ib_acl & IB_ACCESS_REMOTE_READ)
  1690. *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
  1691. if (ib_acl & IB_ACCESS_REMOTE_WRITE)
  1692. *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
  1693. if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
  1694. *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
  1695. if (ib_acl & IB_ACCESS_LOCAL_WRITE)
  1696. *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
  1697. if (ib_acl & IB_ACCESS_MW_BIND)
  1698. *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
  1699. } /* end ehca_mrmw_map_acl() */
  1700. /*----------------------------------------------------------------------*/
  1701. /* sets page size in hipz access control for MR/MW. */
  1702. void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
  1703. {
  1704. return; /* HCA supports only 4k */
  1705. } /* end ehca_mrmw_set_pgsize_hipz_acl() */
  1706. /*----------------------------------------------------------------------*/
  1707. /*
  1708. * reverse map access control for MR/MW.
  1709. * This routine is used for MR and MW.
  1710. */
  1711. void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
  1712. int *ib_acl) /*OUT*/
  1713. {
  1714. *ib_acl = 0;
  1715. if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
  1716. *ib_acl |= IB_ACCESS_REMOTE_READ;
  1717. if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
  1718. *ib_acl |= IB_ACCESS_REMOTE_WRITE;
  1719. if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
  1720. *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
  1721. if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
  1722. *ib_acl |= IB_ACCESS_LOCAL_WRITE;
  1723. if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
  1724. *ib_acl |= IB_ACCESS_MW_BIND;
  1725. } /* end ehca_mrmw_reverse_map_acl() */
  1726. /*----------------------------------------------------------------------*/
  1727. /*
  1728. * MR destructor and constructor
  1729. * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
  1730. * except struct ib_mr and spinlock
  1731. */
  1732. void ehca_mr_deletenew(struct ehca_mr *mr)
  1733. {
  1734. mr->flags = 0;
  1735. mr->num_kpages = 0;
  1736. mr->num_hwpages = 0;
  1737. mr->acl = 0;
  1738. mr->start = NULL;
  1739. mr->fmr_page_size = 0;
  1740. mr->fmr_max_pages = 0;
  1741. mr->fmr_max_maps = 0;
  1742. mr->fmr_map_cnt = 0;
  1743. memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
  1744. memset(&mr->galpas, 0, sizeof(mr->galpas));
  1745. } /* end ehca_mr_deletenew() */
  1746. int ehca_init_mrmw_cache(void)
  1747. {
  1748. mr_cache = kmem_cache_create("ehca_cache_mr",
  1749. sizeof(struct ehca_mr), 0,
  1750. SLAB_HWCACHE_ALIGN,
  1751. NULL, NULL);
  1752. if (!mr_cache)
  1753. return -ENOMEM;
  1754. mw_cache = kmem_cache_create("ehca_cache_mw",
  1755. sizeof(struct ehca_mw), 0,
  1756. SLAB_HWCACHE_ALIGN,
  1757. NULL, NULL);
  1758. if (!mw_cache) {
  1759. kmem_cache_destroy(mr_cache);
  1760. mr_cache = NULL;
  1761. return -ENOMEM;
  1762. }
  1763. return 0;
  1764. }
  1765. void ehca_cleanup_mrmw_cache(void)
  1766. {
  1767. if (mr_cache)
  1768. kmem_cache_destroy(mr_cache);
  1769. if (mw_cache)
  1770. kmem_cache_destroy(mw_cache);
  1771. }