ehca_mrmw.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * MR/MW functions
  5. *
  6. * Authors: Dietmar Decker <ddecker@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #include <rdma/ib_umem.h>
  43. #include <asm/current.h>
  44. #include "ehca_iverbs.h"
  45. #include "ehca_mrmw.h"
  46. #include "hcp_if.h"
  47. #include "hipz_hw.h"
  48. #define NUM_CHUNKS(length, chunk_size) \
  49. (((length) + (chunk_size - 1)) / (chunk_size))
  50. /* max number of rpages (per hcall register_rpages) */
  51. #define MAX_RPAGES 512
  52. static struct kmem_cache *mr_cache;
  53. static struct kmem_cache *mw_cache;
  54. enum ehca_mr_pgsize {
  55. EHCA_MR_PGSIZE4K = 0x1000L,
  56. EHCA_MR_PGSIZE64K = 0x10000L,
  57. EHCA_MR_PGSIZE1M = 0x100000L,
  58. EHCA_MR_PGSIZE16M = 0x1000000L
  59. };
  60. extern int ehca_mr_largepage;
  61. static u32 ehca_encode_hwpage_size(u32 pgsize)
  62. {
  63. u32 idx = 0;
  64. pgsize >>= 12;
  65. /*
  66. * map mr page size into hw code:
  67. * 0, 1, 2, 3 for 4K, 64K, 1M, 64M
  68. */
  69. while (!(pgsize & 1)) {
  70. idx++;
  71. pgsize >>= 4;
  72. }
  73. return idx;
  74. }
  75. static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
  76. {
  77. if (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)
  78. return EHCA_MR_PGSIZE16M;
  79. return EHCA_MR_PGSIZE4K;
  80. }
  81. static struct ehca_mr *ehca_mr_new(void)
  82. {
  83. struct ehca_mr *me;
  84. me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
  85. if (me)
  86. spin_lock_init(&me->mrlock);
  87. else
  88. ehca_gen_err("alloc failed");
  89. return me;
  90. }
  91. static void ehca_mr_delete(struct ehca_mr *me)
  92. {
  93. kmem_cache_free(mr_cache, me);
  94. }
  95. static struct ehca_mw *ehca_mw_new(void)
  96. {
  97. struct ehca_mw *me;
  98. me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
  99. if (me)
  100. spin_lock_init(&me->mwlock);
  101. else
  102. ehca_gen_err("alloc failed");
  103. return me;
  104. }
  105. static void ehca_mw_delete(struct ehca_mw *me)
  106. {
  107. kmem_cache_free(mw_cache, me);
  108. }
  109. /*----------------------------------------------------------------------*/
  110. struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  111. {
  112. struct ib_mr *ib_mr;
  113. int ret;
  114. struct ehca_mr *e_maxmr;
  115. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  116. struct ehca_shca *shca =
  117. container_of(pd->device, struct ehca_shca, ib_device);
  118. if (shca->maxmr) {
  119. e_maxmr = ehca_mr_new();
  120. if (!e_maxmr) {
  121. ehca_err(&shca->ib_device, "out of memory");
  122. ib_mr = ERR_PTR(-ENOMEM);
  123. goto get_dma_mr_exit0;
  124. }
  125. ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
  126. mr_access_flags, e_pd,
  127. &e_maxmr->ib.ib_mr.lkey,
  128. &e_maxmr->ib.ib_mr.rkey);
  129. if (ret) {
  130. ehca_mr_delete(e_maxmr);
  131. ib_mr = ERR_PTR(ret);
  132. goto get_dma_mr_exit0;
  133. }
  134. ib_mr = &e_maxmr->ib.ib_mr;
  135. } else {
  136. ehca_err(&shca->ib_device, "no internal max-MR exist!");
  137. ib_mr = ERR_PTR(-EINVAL);
  138. goto get_dma_mr_exit0;
  139. }
  140. get_dma_mr_exit0:
  141. if (IS_ERR(ib_mr))
  142. ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
  143. PTR_ERR(ib_mr), pd, mr_access_flags);
  144. return ib_mr;
  145. } /* end ehca_get_dma_mr() */
  146. /*----------------------------------------------------------------------*/
  147. struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
  148. struct ib_phys_buf *phys_buf_array,
  149. int num_phys_buf,
  150. int mr_access_flags,
  151. u64 *iova_start)
  152. {
  153. struct ib_mr *ib_mr;
  154. int ret;
  155. struct ehca_mr *e_mr;
  156. struct ehca_shca *shca =
  157. container_of(pd->device, struct ehca_shca, ib_device);
  158. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  159. u64 size;
  160. if ((num_phys_buf <= 0) || !phys_buf_array) {
  161. ehca_err(pd->device, "bad input values: num_phys_buf=%x "
  162. "phys_buf_array=%p", num_phys_buf, phys_buf_array);
  163. ib_mr = ERR_PTR(-EINVAL);
  164. goto reg_phys_mr_exit0;
  165. }
  166. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  167. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  168. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  169. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  170. /*
  171. * Remote Write Access requires Local Write Access
  172. * Remote Atomic Access requires Local Write Access
  173. */
  174. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  175. mr_access_flags);
  176. ib_mr = ERR_PTR(-EINVAL);
  177. goto reg_phys_mr_exit0;
  178. }
  179. /* check physical buffer list and calculate size */
  180. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
  181. iova_start, &size);
  182. if (ret) {
  183. ib_mr = ERR_PTR(ret);
  184. goto reg_phys_mr_exit0;
  185. }
  186. if ((size == 0) ||
  187. (((u64)iova_start + size) < (u64)iova_start)) {
  188. ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
  189. size, iova_start);
  190. ib_mr = ERR_PTR(-EINVAL);
  191. goto reg_phys_mr_exit0;
  192. }
  193. e_mr = ehca_mr_new();
  194. if (!e_mr) {
  195. ehca_err(pd->device, "out of memory");
  196. ib_mr = ERR_PTR(-ENOMEM);
  197. goto reg_phys_mr_exit0;
  198. }
  199. /* register MR on HCA */
  200. if (ehca_mr_is_maxmr(size, iova_start)) {
  201. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  202. ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
  203. e_pd, &e_mr->ib.ib_mr.lkey,
  204. &e_mr->ib.ib_mr.rkey);
  205. if (ret) {
  206. ib_mr = ERR_PTR(ret);
  207. goto reg_phys_mr_exit1;
  208. }
  209. } else {
  210. struct ehca_mr_pginfo pginfo;
  211. u32 num_kpages;
  212. u32 num_hwpages;
  213. u64 hw_pgsize;
  214. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
  215. PAGE_SIZE);
  216. /* for kernel space we try most possible pgsize */
  217. hw_pgsize = ehca_get_max_hwpage_size(shca);
  218. num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
  219. hw_pgsize);
  220. memset(&pginfo, 0, sizeof(pginfo));
  221. pginfo.type = EHCA_MR_PGI_PHYS;
  222. pginfo.num_kpages = num_kpages;
  223. pginfo.hwpage_size = hw_pgsize;
  224. pginfo.num_hwpages = num_hwpages;
  225. pginfo.u.phy.num_phys_buf = num_phys_buf;
  226. pginfo.u.phy.phys_buf_array = phys_buf_array;
  227. pginfo.next_hwpage =
  228. ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
  229. ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
  230. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  231. &e_mr->ib.ib_mr.rkey);
  232. if (ret) {
  233. ib_mr = ERR_PTR(ret);
  234. goto reg_phys_mr_exit1;
  235. }
  236. }
  237. /* successful registration of all pages */
  238. return &e_mr->ib.ib_mr;
  239. reg_phys_mr_exit1:
  240. ehca_mr_delete(e_mr);
  241. reg_phys_mr_exit0:
  242. if (IS_ERR(ib_mr))
  243. ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
  244. "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
  245. PTR_ERR(ib_mr), pd, phys_buf_array,
  246. num_phys_buf, mr_access_flags, iova_start);
  247. return ib_mr;
  248. } /* end ehca_reg_phys_mr() */
  249. /*----------------------------------------------------------------------*/
  250. struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  251. u64 virt, int mr_access_flags,
  252. struct ib_udata *udata)
  253. {
  254. struct ib_mr *ib_mr;
  255. struct ehca_mr *e_mr;
  256. struct ehca_shca *shca =
  257. container_of(pd->device, struct ehca_shca, ib_device);
  258. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  259. struct ehca_mr_pginfo pginfo;
  260. int ret;
  261. u32 num_kpages;
  262. u32 num_hwpages;
  263. u64 hwpage_size;
  264. if (!pd) {
  265. ehca_gen_err("bad pd=%p", pd);
  266. return ERR_PTR(-EFAULT);
  267. }
  268. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  269. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  270. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  271. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  272. /*
  273. * Remote Write Access requires Local Write Access
  274. * Remote Atomic Access requires Local Write Access
  275. */
  276. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  277. mr_access_flags);
  278. ib_mr = ERR_PTR(-EINVAL);
  279. goto reg_user_mr_exit0;
  280. }
  281. if (length == 0 || virt + length < virt) {
  282. ehca_err(pd->device, "bad input values: length=%lx "
  283. "virt_base=%lx", length, virt);
  284. ib_mr = ERR_PTR(-EINVAL);
  285. goto reg_user_mr_exit0;
  286. }
  287. e_mr = ehca_mr_new();
  288. if (!e_mr) {
  289. ehca_err(pd->device, "out of memory");
  290. ib_mr = ERR_PTR(-ENOMEM);
  291. goto reg_user_mr_exit0;
  292. }
  293. e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
  294. mr_access_flags);
  295. if (IS_ERR(e_mr->umem)) {
  296. ib_mr = (void *)e_mr->umem;
  297. goto reg_user_mr_exit1;
  298. }
  299. if (e_mr->umem->page_size != PAGE_SIZE) {
  300. ehca_err(pd->device, "page size not supported, "
  301. "e_mr->umem->page_size=%x", e_mr->umem->page_size);
  302. ib_mr = ERR_PTR(-EINVAL);
  303. goto reg_user_mr_exit2;
  304. }
  305. /* determine number of MR pages */
  306. num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
  307. /* select proper hw_pgsize */
  308. if (ehca_mr_largepage &&
  309. (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)) {
  310. if (length <= EHCA_MR_PGSIZE4K
  311. && PAGE_SIZE == EHCA_MR_PGSIZE4K)
  312. hwpage_size = EHCA_MR_PGSIZE4K;
  313. else if (length <= EHCA_MR_PGSIZE64K)
  314. hwpage_size = EHCA_MR_PGSIZE64K;
  315. else if (length <= EHCA_MR_PGSIZE1M)
  316. hwpage_size = EHCA_MR_PGSIZE1M;
  317. else
  318. hwpage_size = EHCA_MR_PGSIZE16M;
  319. } else
  320. hwpage_size = EHCA_MR_PGSIZE4K;
  321. ehca_dbg(pd->device, "hwpage_size=%lx", hwpage_size);
  322. reg_user_mr_fallback:
  323. num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
  324. /* register MR on HCA */
  325. memset(&pginfo, 0, sizeof(pginfo));
  326. pginfo.type = EHCA_MR_PGI_USER;
  327. pginfo.hwpage_size = hwpage_size;
  328. pginfo.num_kpages = num_kpages;
  329. pginfo.num_hwpages = num_hwpages;
  330. pginfo.u.usr.region = e_mr->umem;
  331. pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
  332. pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
  333. (&e_mr->umem->chunk_list),
  334. list);
  335. ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
  336. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  337. &e_mr->ib.ib_mr.rkey);
  338. if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
  339. ehca_warn(pd->device, "failed to register mr "
  340. "with hwpage_size=%lx", hwpage_size);
  341. ehca_info(pd->device, "try to register mr with "
  342. "kpage_size=%lx", PAGE_SIZE);
  343. /*
  344. * this means kpages are not contiguous for a hw page
  345. * try kernel page size as fallback solution
  346. */
  347. hwpage_size = PAGE_SIZE;
  348. goto reg_user_mr_fallback;
  349. }
  350. if (ret) {
  351. ib_mr = ERR_PTR(ret);
  352. goto reg_user_mr_exit2;
  353. }
  354. /* successful registration of all pages */
  355. return &e_mr->ib.ib_mr;
  356. reg_user_mr_exit2:
  357. ib_umem_release(e_mr->umem);
  358. reg_user_mr_exit1:
  359. ehca_mr_delete(e_mr);
  360. reg_user_mr_exit0:
  361. if (IS_ERR(ib_mr))
  362. ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
  363. " udata=%p",
  364. PTR_ERR(ib_mr), pd, mr_access_flags, udata);
  365. return ib_mr;
  366. } /* end ehca_reg_user_mr() */
  367. /*----------------------------------------------------------------------*/
  368. int ehca_rereg_phys_mr(struct ib_mr *mr,
  369. int mr_rereg_mask,
  370. struct ib_pd *pd,
  371. struct ib_phys_buf *phys_buf_array,
  372. int num_phys_buf,
  373. int mr_access_flags,
  374. u64 *iova_start)
  375. {
  376. int ret;
  377. struct ehca_shca *shca =
  378. container_of(mr->device, struct ehca_shca, ib_device);
  379. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  380. struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  381. u64 new_size;
  382. u64 *new_start;
  383. u32 new_acl;
  384. struct ehca_pd *new_pd;
  385. u32 tmp_lkey, tmp_rkey;
  386. unsigned long sl_flags;
  387. u32 num_kpages = 0;
  388. u32 num_hwpages = 0;
  389. struct ehca_mr_pginfo pginfo;
  390. u32 cur_pid = current->tgid;
  391. if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
  392. (my_pd->ownpid != cur_pid)) {
  393. ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
  394. cur_pid, my_pd->ownpid);
  395. ret = -EINVAL;
  396. goto rereg_phys_mr_exit0;
  397. }
  398. if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
  399. /* TODO not supported, because PHYP rereg hCall needs pages */
  400. ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
  401. "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
  402. ret = -EINVAL;
  403. goto rereg_phys_mr_exit0;
  404. }
  405. if (mr_rereg_mask & IB_MR_REREG_PD) {
  406. if (!pd) {
  407. ehca_err(mr->device, "rereg with bad pd, pd=%p "
  408. "mr_rereg_mask=%x", pd, mr_rereg_mask);
  409. ret = -EINVAL;
  410. goto rereg_phys_mr_exit0;
  411. }
  412. }
  413. if ((mr_rereg_mask &
  414. ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
  415. (mr_rereg_mask == 0)) {
  416. ret = -EINVAL;
  417. goto rereg_phys_mr_exit0;
  418. }
  419. /* check other parameters */
  420. if (e_mr == shca->maxmr) {
  421. /* should be impossible, however reject to be sure */
  422. ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
  423. "shca->maxmr=%p mr->lkey=%x",
  424. mr, shca->maxmr, mr->lkey);
  425. ret = -EINVAL;
  426. goto rereg_phys_mr_exit0;
  427. }
  428. if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
  429. if (e_mr->flags & EHCA_MR_FLAG_FMR) {
  430. ehca_err(mr->device, "not supported for FMR, mr=%p "
  431. "flags=%x", mr, e_mr->flags);
  432. ret = -EINVAL;
  433. goto rereg_phys_mr_exit0;
  434. }
  435. if (!phys_buf_array || num_phys_buf <= 0) {
  436. ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
  437. " phys_buf_array=%p num_phys_buf=%x",
  438. mr_rereg_mask, phys_buf_array, num_phys_buf);
  439. ret = -EINVAL;
  440. goto rereg_phys_mr_exit0;
  441. }
  442. }
  443. if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
  444. (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  445. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  446. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  447. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
  448. /*
  449. * Remote Write Access requires Local Write Access
  450. * Remote Atomic Access requires Local Write Access
  451. */
  452. ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
  453. "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
  454. ret = -EINVAL;
  455. goto rereg_phys_mr_exit0;
  456. }
  457. /* set requested values dependent on rereg request */
  458. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  459. new_start = e_mr->start;
  460. new_size = e_mr->size;
  461. new_acl = e_mr->acl;
  462. new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  463. if (mr_rereg_mask & IB_MR_REREG_TRANS) {
  464. u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
  465. new_start = iova_start; /* change address */
  466. /* check physical buffer list and calculate size */
  467. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
  468. num_phys_buf, iova_start,
  469. &new_size);
  470. if (ret)
  471. goto rereg_phys_mr_exit1;
  472. if ((new_size == 0) ||
  473. (((u64)iova_start + new_size) < (u64)iova_start)) {
  474. ehca_err(mr->device, "bad input values: new_size=%lx "
  475. "iova_start=%p", new_size, iova_start);
  476. ret = -EINVAL;
  477. goto rereg_phys_mr_exit1;
  478. }
  479. num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
  480. new_size, PAGE_SIZE);
  481. num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
  482. new_size, hw_pgsize);
  483. memset(&pginfo, 0, sizeof(pginfo));
  484. pginfo.type = EHCA_MR_PGI_PHYS;
  485. pginfo.num_kpages = num_kpages;
  486. pginfo.hwpage_size = hw_pgsize;
  487. pginfo.num_hwpages = num_hwpages;
  488. pginfo.u.phy.num_phys_buf = num_phys_buf;
  489. pginfo.u.phy.phys_buf_array = phys_buf_array;
  490. pginfo.next_hwpage =
  491. ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
  492. }
  493. if (mr_rereg_mask & IB_MR_REREG_ACCESS)
  494. new_acl = mr_access_flags;
  495. if (mr_rereg_mask & IB_MR_REREG_PD)
  496. new_pd = container_of(pd, struct ehca_pd, ib_pd);
  497. ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
  498. new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  499. if (ret)
  500. goto rereg_phys_mr_exit1;
  501. /* successful reregistration */
  502. if (mr_rereg_mask & IB_MR_REREG_PD)
  503. mr->pd = pd;
  504. mr->lkey = tmp_lkey;
  505. mr->rkey = tmp_rkey;
  506. rereg_phys_mr_exit1:
  507. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  508. rereg_phys_mr_exit0:
  509. if (ret)
  510. ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
  511. "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
  512. "iova_start=%p",
  513. ret, mr, mr_rereg_mask, pd, phys_buf_array,
  514. num_phys_buf, mr_access_flags, iova_start);
  515. return ret;
  516. } /* end ehca_rereg_phys_mr() */
  517. /*----------------------------------------------------------------------*/
  518. int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  519. {
  520. int ret = 0;
  521. u64 h_ret;
  522. struct ehca_shca *shca =
  523. container_of(mr->device, struct ehca_shca, ib_device);
  524. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  525. struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  526. u32 cur_pid = current->tgid;
  527. unsigned long sl_flags;
  528. struct ehca_mr_hipzout_parms hipzout;
  529. if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
  530. (my_pd->ownpid != cur_pid)) {
  531. ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
  532. cur_pid, my_pd->ownpid);
  533. ret = -EINVAL;
  534. goto query_mr_exit0;
  535. }
  536. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  537. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  538. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  539. ret = -EINVAL;
  540. goto query_mr_exit0;
  541. }
  542. memset(mr_attr, 0, sizeof(struct ib_mr_attr));
  543. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  544. h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
  545. if (h_ret != H_SUCCESS) {
  546. ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
  547. "hca_hndl=%lx mr_hndl=%lx lkey=%x",
  548. h_ret, mr, shca->ipz_hca_handle.handle,
  549. e_mr->ipz_mr_handle.handle, mr->lkey);
  550. ret = ehca2ib_return_code(h_ret);
  551. goto query_mr_exit1;
  552. }
  553. mr_attr->pd = mr->pd;
  554. mr_attr->device_virt_addr = hipzout.vaddr;
  555. mr_attr->size = hipzout.len;
  556. mr_attr->lkey = hipzout.lkey;
  557. mr_attr->rkey = hipzout.rkey;
  558. ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
  559. query_mr_exit1:
  560. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  561. query_mr_exit0:
  562. if (ret)
  563. ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
  564. ret, mr, mr_attr);
  565. return ret;
  566. } /* end ehca_query_mr() */
  567. /*----------------------------------------------------------------------*/
  568. int ehca_dereg_mr(struct ib_mr *mr)
  569. {
  570. int ret = 0;
  571. u64 h_ret;
  572. struct ehca_shca *shca =
  573. container_of(mr->device, struct ehca_shca, ib_device);
  574. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  575. struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  576. u32 cur_pid = current->tgid;
  577. if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
  578. (my_pd->ownpid != cur_pid)) {
  579. ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
  580. cur_pid, my_pd->ownpid);
  581. ret = -EINVAL;
  582. goto dereg_mr_exit0;
  583. }
  584. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  585. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  586. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  587. ret = -EINVAL;
  588. goto dereg_mr_exit0;
  589. } else if (e_mr == shca->maxmr) {
  590. /* should be impossible, however reject to be sure */
  591. ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
  592. "shca->maxmr=%p mr->lkey=%x",
  593. mr, shca->maxmr, mr->lkey);
  594. ret = -EINVAL;
  595. goto dereg_mr_exit0;
  596. }
  597. /* TODO: BUSY: MR still has bound window(s) */
  598. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  599. if (h_ret != H_SUCCESS) {
  600. ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
  601. "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
  602. h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
  603. e_mr->ipz_mr_handle.handle, mr->lkey);
  604. ret = ehca2ib_return_code(h_ret);
  605. goto dereg_mr_exit0;
  606. }
  607. if (e_mr->umem)
  608. ib_umem_release(e_mr->umem);
  609. /* successful deregistration */
  610. ehca_mr_delete(e_mr);
  611. dereg_mr_exit0:
  612. if (ret)
  613. ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
  614. return ret;
  615. } /* end ehca_dereg_mr() */
  616. /*----------------------------------------------------------------------*/
  617. struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
  618. {
  619. struct ib_mw *ib_mw;
  620. u64 h_ret;
  621. struct ehca_mw *e_mw;
  622. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  623. struct ehca_shca *shca =
  624. container_of(pd->device, struct ehca_shca, ib_device);
  625. struct ehca_mw_hipzout_parms hipzout;
  626. e_mw = ehca_mw_new();
  627. if (!e_mw) {
  628. ib_mw = ERR_PTR(-ENOMEM);
  629. goto alloc_mw_exit0;
  630. }
  631. h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
  632. e_pd->fw_pd, &hipzout);
  633. if (h_ret != H_SUCCESS) {
  634. ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
  635. "shca=%p hca_hndl=%lx mw=%p",
  636. h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
  637. ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
  638. goto alloc_mw_exit1;
  639. }
  640. /* successful MW allocation */
  641. e_mw->ipz_mw_handle = hipzout.handle;
  642. e_mw->ib_mw.rkey = hipzout.rkey;
  643. return &e_mw->ib_mw;
  644. alloc_mw_exit1:
  645. ehca_mw_delete(e_mw);
  646. alloc_mw_exit0:
  647. if (IS_ERR(ib_mw))
  648. ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
  649. return ib_mw;
  650. } /* end ehca_alloc_mw() */
  651. /*----------------------------------------------------------------------*/
  652. int ehca_bind_mw(struct ib_qp *qp,
  653. struct ib_mw *mw,
  654. struct ib_mw_bind *mw_bind)
  655. {
  656. /* TODO: not supported up to now */
  657. ehca_gen_err("bind MW currently not supported by HCAD");
  658. return -EPERM;
  659. } /* end ehca_bind_mw() */
  660. /*----------------------------------------------------------------------*/
  661. int ehca_dealloc_mw(struct ib_mw *mw)
  662. {
  663. u64 h_ret;
  664. struct ehca_shca *shca =
  665. container_of(mw->device, struct ehca_shca, ib_device);
  666. struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
  667. h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
  668. if (h_ret != H_SUCCESS) {
  669. ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
  670. "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
  671. h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
  672. e_mw->ipz_mw_handle.handle);
  673. return ehca2ib_return_code(h_ret);
  674. }
  675. /* successful deallocation */
  676. ehca_mw_delete(e_mw);
  677. return 0;
  678. } /* end ehca_dealloc_mw() */
  679. /*----------------------------------------------------------------------*/
  680. struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
  681. int mr_access_flags,
  682. struct ib_fmr_attr *fmr_attr)
  683. {
  684. struct ib_fmr *ib_fmr;
  685. struct ehca_shca *shca =
  686. container_of(pd->device, struct ehca_shca, ib_device);
  687. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  688. struct ehca_mr *e_fmr;
  689. int ret;
  690. u32 tmp_lkey, tmp_rkey;
  691. struct ehca_mr_pginfo pginfo;
  692. u64 hw_pgsize;
  693. /* check other parameters */
  694. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  695. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  696. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  697. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  698. /*
  699. * Remote Write Access requires Local Write Access
  700. * Remote Atomic Access requires Local Write Access
  701. */
  702. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  703. mr_access_flags);
  704. ib_fmr = ERR_PTR(-EINVAL);
  705. goto alloc_fmr_exit0;
  706. }
  707. if (mr_access_flags & IB_ACCESS_MW_BIND) {
  708. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  709. mr_access_flags);
  710. ib_fmr = ERR_PTR(-EINVAL);
  711. goto alloc_fmr_exit0;
  712. }
  713. if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
  714. ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
  715. "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
  716. fmr_attr->max_pages, fmr_attr->max_maps,
  717. fmr_attr->page_shift);
  718. ib_fmr = ERR_PTR(-EINVAL);
  719. goto alloc_fmr_exit0;
  720. }
  721. hw_pgsize = ehca_get_max_hwpage_size(shca);
  722. if ((1 << fmr_attr->page_shift) != hw_pgsize) {
  723. ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
  724. fmr_attr->page_shift);
  725. ib_fmr = ERR_PTR(-EINVAL);
  726. goto alloc_fmr_exit0;
  727. }
  728. e_fmr = ehca_mr_new();
  729. if (!e_fmr) {
  730. ib_fmr = ERR_PTR(-ENOMEM);
  731. goto alloc_fmr_exit0;
  732. }
  733. e_fmr->flags |= EHCA_MR_FLAG_FMR;
  734. /* register MR on HCA */
  735. memset(&pginfo, 0, sizeof(pginfo));
  736. /*
  737. * pginfo.num_hwpages==0, ie register_rpages() will not be called
  738. * but deferred to map_phys_fmr()
  739. */
  740. ret = ehca_reg_mr(shca, e_fmr, NULL,
  741. fmr_attr->max_pages * (1 << fmr_attr->page_shift),
  742. mr_access_flags, e_pd, &pginfo,
  743. &tmp_lkey, &tmp_rkey);
  744. if (ret) {
  745. ib_fmr = ERR_PTR(ret);
  746. goto alloc_fmr_exit1;
  747. }
  748. /* successful */
  749. e_fmr->hwpage_size = hw_pgsize;
  750. e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
  751. e_fmr->fmr_max_pages = fmr_attr->max_pages;
  752. e_fmr->fmr_max_maps = fmr_attr->max_maps;
  753. e_fmr->fmr_map_cnt = 0;
  754. return &e_fmr->ib.ib_fmr;
  755. alloc_fmr_exit1:
  756. ehca_mr_delete(e_fmr);
  757. alloc_fmr_exit0:
  758. if (IS_ERR(ib_fmr))
  759. ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
  760. "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
  761. mr_access_flags, fmr_attr);
  762. return ib_fmr;
  763. } /* end ehca_alloc_fmr() */
  764. /*----------------------------------------------------------------------*/
  765. int ehca_map_phys_fmr(struct ib_fmr *fmr,
  766. u64 *page_list,
  767. int list_len,
  768. u64 iova)
  769. {
  770. int ret;
  771. struct ehca_shca *shca =
  772. container_of(fmr->device, struct ehca_shca, ib_device);
  773. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  774. struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
  775. struct ehca_mr_pginfo pginfo;
  776. u32 tmp_lkey, tmp_rkey;
  777. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  778. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  779. e_fmr, e_fmr->flags);
  780. ret = -EINVAL;
  781. goto map_phys_fmr_exit0;
  782. }
  783. ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
  784. if (ret)
  785. goto map_phys_fmr_exit0;
  786. if (iova % e_fmr->fmr_page_size) {
  787. /* only whole-numbered pages */
  788. ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
  789. iova, e_fmr->fmr_page_size);
  790. ret = -EINVAL;
  791. goto map_phys_fmr_exit0;
  792. }
  793. if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
  794. /* HCAD does not limit the maps, however trace this anyway */
  795. ehca_info(fmr->device, "map limit exceeded, fmr=%p "
  796. "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
  797. fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
  798. }
  799. memset(&pginfo, 0, sizeof(pginfo));
  800. pginfo.type = EHCA_MR_PGI_FMR;
  801. pginfo.num_kpages = list_len;
  802. pginfo.hwpage_size = e_fmr->hwpage_size;
  803. pginfo.num_hwpages =
  804. list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
  805. pginfo.u.fmr.page_list = page_list;
  806. pginfo.next_hwpage =
  807. (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
  808. pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
  809. ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
  810. list_len * e_fmr->fmr_page_size,
  811. e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  812. if (ret)
  813. goto map_phys_fmr_exit0;
  814. /* successful reregistration */
  815. e_fmr->fmr_map_cnt++;
  816. e_fmr->ib.ib_fmr.lkey = tmp_lkey;
  817. e_fmr->ib.ib_fmr.rkey = tmp_rkey;
  818. return 0;
  819. map_phys_fmr_exit0:
  820. if (ret)
  821. ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
  822. "iova=%lx", ret, fmr, page_list, list_len, iova);
  823. return ret;
  824. } /* end ehca_map_phys_fmr() */
  825. /*----------------------------------------------------------------------*/
  826. int ehca_unmap_fmr(struct list_head *fmr_list)
  827. {
  828. int ret = 0;
  829. struct ib_fmr *ib_fmr;
  830. struct ehca_shca *shca = NULL;
  831. struct ehca_shca *prev_shca;
  832. struct ehca_mr *e_fmr;
  833. u32 num_fmr = 0;
  834. u32 unmap_fmr_cnt = 0;
  835. /* check all FMR belong to same SHCA, and check internal flag */
  836. list_for_each_entry(ib_fmr, fmr_list, list) {
  837. prev_shca = shca;
  838. if (!ib_fmr) {
  839. ehca_gen_err("bad fmr=%p in list", ib_fmr);
  840. ret = -EINVAL;
  841. goto unmap_fmr_exit0;
  842. }
  843. shca = container_of(ib_fmr->device, struct ehca_shca,
  844. ib_device);
  845. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  846. if ((shca != prev_shca) && prev_shca) {
  847. ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
  848. "prev_shca=%p e_fmr=%p",
  849. shca, prev_shca, e_fmr);
  850. ret = -EINVAL;
  851. goto unmap_fmr_exit0;
  852. }
  853. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  854. ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
  855. "e_fmr->flags=%x", e_fmr, e_fmr->flags);
  856. ret = -EINVAL;
  857. goto unmap_fmr_exit0;
  858. }
  859. num_fmr++;
  860. }
  861. /* loop over all FMRs to unmap */
  862. list_for_each_entry(ib_fmr, fmr_list, list) {
  863. unmap_fmr_cnt++;
  864. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  865. shca = container_of(ib_fmr->device, struct ehca_shca,
  866. ib_device);
  867. ret = ehca_unmap_one_fmr(shca, e_fmr);
  868. if (ret) {
  869. /* unmap failed, stop unmapping of rest of FMRs */
  870. ehca_err(&shca->ib_device, "unmap of one FMR failed, "
  871. "stop rest, e_fmr=%p num_fmr=%x "
  872. "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
  873. unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
  874. goto unmap_fmr_exit0;
  875. }
  876. }
  877. unmap_fmr_exit0:
  878. if (ret)
  879. ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
  880. ret, fmr_list, num_fmr, unmap_fmr_cnt);
  881. return ret;
  882. } /* end ehca_unmap_fmr() */
  883. /*----------------------------------------------------------------------*/
  884. int ehca_dealloc_fmr(struct ib_fmr *fmr)
  885. {
  886. int ret;
  887. u64 h_ret;
  888. struct ehca_shca *shca =
  889. container_of(fmr->device, struct ehca_shca, ib_device);
  890. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  891. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  892. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  893. e_fmr, e_fmr->flags);
  894. ret = -EINVAL;
  895. goto free_fmr_exit0;
  896. }
  897. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  898. if (h_ret != H_SUCCESS) {
  899. ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
  900. "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
  901. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  902. e_fmr->ipz_mr_handle.handle, fmr->lkey);
  903. ret = ehca2ib_return_code(h_ret);
  904. goto free_fmr_exit0;
  905. }
  906. /* successful deregistration */
  907. ehca_mr_delete(e_fmr);
  908. return 0;
  909. free_fmr_exit0:
  910. if (ret)
  911. ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
  912. return ret;
  913. } /* end ehca_dealloc_fmr() */
  914. /*----------------------------------------------------------------------*/
  915. int ehca_reg_mr(struct ehca_shca *shca,
  916. struct ehca_mr *e_mr,
  917. u64 *iova_start,
  918. u64 size,
  919. int acl,
  920. struct ehca_pd *e_pd,
  921. struct ehca_mr_pginfo *pginfo,
  922. u32 *lkey, /*OUT*/
  923. u32 *rkey) /*OUT*/
  924. {
  925. int ret;
  926. u64 h_ret;
  927. u32 hipz_acl;
  928. struct ehca_mr_hipzout_parms hipzout;
  929. ehca_mrmw_map_acl(acl, &hipz_acl);
  930. ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
  931. if (ehca_use_hp_mr == 1)
  932. hipz_acl |= 0x00000001;
  933. h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
  934. (u64)iova_start, size, hipz_acl,
  935. e_pd->fw_pd, &hipzout);
  936. if (h_ret != H_SUCCESS) {
  937. ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
  938. "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
  939. ret = ehca2ib_return_code(h_ret);
  940. goto ehca_reg_mr_exit0;
  941. }
  942. e_mr->ipz_mr_handle = hipzout.handle;
  943. ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
  944. if (ret)
  945. goto ehca_reg_mr_exit1;
  946. /* successful registration */
  947. e_mr->num_kpages = pginfo->num_kpages;
  948. e_mr->num_hwpages = pginfo->num_hwpages;
  949. e_mr->hwpage_size = pginfo->hwpage_size;
  950. e_mr->start = iova_start;
  951. e_mr->size = size;
  952. e_mr->acl = acl;
  953. *lkey = hipzout.lkey;
  954. *rkey = hipzout.rkey;
  955. return 0;
  956. ehca_reg_mr_exit1:
  957. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  958. if (h_ret != H_SUCCESS) {
  959. ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
  960. "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
  961. "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
  962. h_ret, shca, e_mr, iova_start, size, acl, e_pd,
  963. hipzout.lkey, pginfo, pginfo->num_kpages,
  964. pginfo->num_hwpages, ret);
  965. ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
  966. "not recoverable");
  967. }
  968. ehca_reg_mr_exit0:
  969. if (ret)
  970. ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
  971. "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
  972. "num_kpages=%lx num_hwpages=%lx",
  973. ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
  974. pginfo->num_kpages, pginfo->num_hwpages);
  975. return ret;
  976. } /* end ehca_reg_mr() */
  977. /*----------------------------------------------------------------------*/
  978. int ehca_reg_mr_rpages(struct ehca_shca *shca,
  979. struct ehca_mr *e_mr,
  980. struct ehca_mr_pginfo *pginfo)
  981. {
  982. int ret = 0;
  983. u64 h_ret;
  984. u32 rnum;
  985. u64 rpage;
  986. u32 i;
  987. u64 *kpage;
  988. if (!pginfo->num_hwpages) /* in case of fmr */
  989. return 0;
  990. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  991. if (!kpage) {
  992. ehca_err(&shca->ib_device, "kpage alloc failed");
  993. ret = -ENOMEM;
  994. goto ehca_reg_mr_rpages_exit0;
  995. }
  996. /* max MAX_RPAGES ehca mr pages per register call */
  997. for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
  998. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  999. rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
  1000. if (rnum == 0)
  1001. rnum = MAX_RPAGES; /* last shot is full */
  1002. } else
  1003. rnum = MAX_RPAGES;
  1004. ret = ehca_set_pagebuf(pginfo, rnum, kpage);
  1005. if (ret) {
  1006. ehca_err(&shca->ib_device, "ehca_set_pagebuf "
  1007. "bad rc, ret=%x rnum=%x kpage=%p",
  1008. ret, rnum, kpage);
  1009. goto ehca_reg_mr_rpages_exit1;
  1010. }
  1011. if (rnum > 1) {
  1012. rpage = virt_to_abs(kpage);
  1013. if (!rpage) {
  1014. ehca_err(&shca->ib_device, "kpage=%p i=%x",
  1015. kpage, i);
  1016. ret = -EFAULT;
  1017. goto ehca_reg_mr_rpages_exit1;
  1018. }
  1019. } else
  1020. rpage = *kpage;
  1021. h_ret = hipz_h_register_rpage_mr(
  1022. shca->ipz_hca_handle, e_mr,
  1023. ehca_encode_hwpage_size(pginfo->hwpage_size),
  1024. 0, rpage, rnum);
  1025. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  1026. /*
  1027. * check for 'registration complete'==H_SUCCESS
  1028. * and for 'page registered'==H_PAGE_REGISTERED
  1029. */
  1030. if (h_ret != H_SUCCESS) {
  1031. ehca_err(&shca->ib_device, "last "
  1032. "hipz_reg_rpage_mr failed, h_ret=%lx "
  1033. "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
  1034. " lkey=%x", h_ret, e_mr, i,
  1035. shca->ipz_hca_handle.handle,
  1036. e_mr->ipz_mr_handle.handle,
  1037. e_mr->ib.ib_mr.lkey);
  1038. ret = ehca2ib_return_code(h_ret);
  1039. break;
  1040. } else
  1041. ret = 0;
  1042. } else if (h_ret != H_PAGE_REGISTERED) {
  1043. ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
  1044. "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
  1045. "mr_hndl=%lx", h_ret, e_mr, i,
  1046. e_mr->ib.ib_mr.lkey,
  1047. shca->ipz_hca_handle.handle,
  1048. e_mr->ipz_mr_handle.handle);
  1049. ret = ehca2ib_return_code(h_ret);
  1050. break;
  1051. } else
  1052. ret = 0;
  1053. } /* end for(i) */
  1054. ehca_reg_mr_rpages_exit1:
  1055. ehca_free_fw_ctrlblock(kpage);
  1056. ehca_reg_mr_rpages_exit0:
  1057. if (ret)
  1058. ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
  1059. "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
  1060. pginfo, pginfo->num_kpages, pginfo->num_hwpages);
  1061. return ret;
  1062. } /* end ehca_reg_mr_rpages() */
  1063. /*----------------------------------------------------------------------*/
  1064. inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
  1065. struct ehca_mr *e_mr,
  1066. u64 *iova_start,
  1067. u64 size,
  1068. u32 acl,
  1069. struct ehca_pd *e_pd,
  1070. struct ehca_mr_pginfo *pginfo,
  1071. u32 *lkey, /*OUT*/
  1072. u32 *rkey) /*OUT*/
  1073. {
  1074. int ret;
  1075. u64 h_ret;
  1076. u32 hipz_acl;
  1077. u64 *kpage;
  1078. u64 rpage;
  1079. struct ehca_mr_pginfo pginfo_save;
  1080. struct ehca_mr_hipzout_parms hipzout;
  1081. ehca_mrmw_map_acl(acl, &hipz_acl);
  1082. ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
  1083. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  1084. if (!kpage) {
  1085. ehca_err(&shca->ib_device, "kpage alloc failed");
  1086. ret = -ENOMEM;
  1087. goto ehca_rereg_mr_rereg1_exit0;
  1088. }
  1089. pginfo_save = *pginfo;
  1090. ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
  1091. if (ret) {
  1092. ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
  1093. "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
  1094. "kpage=%p", e_mr, pginfo, pginfo->type,
  1095. pginfo->num_kpages, pginfo->num_hwpages, kpage);
  1096. goto ehca_rereg_mr_rereg1_exit1;
  1097. }
  1098. rpage = virt_to_abs(kpage);
  1099. if (!rpage) {
  1100. ehca_err(&shca->ib_device, "kpage=%p", kpage);
  1101. ret = -EFAULT;
  1102. goto ehca_rereg_mr_rereg1_exit1;
  1103. }
  1104. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
  1105. (u64)iova_start, size, hipz_acl,
  1106. e_pd->fw_pd, rpage, &hipzout);
  1107. if (h_ret != H_SUCCESS) {
  1108. /*
  1109. * reregistration unsuccessful, try it again with the 3 hCalls,
  1110. * e.g. this is required in case H_MR_CONDITION
  1111. * (MW bound or MR is shared)
  1112. */
  1113. ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
  1114. "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
  1115. *pginfo = pginfo_save;
  1116. ret = -EAGAIN;
  1117. } else if ((u64 *)hipzout.vaddr != iova_start) {
  1118. ehca_err(&shca->ib_device, "PHYP changed iova_start in "
  1119. "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
  1120. "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
  1121. hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
  1122. e_mr->ib.ib_mr.lkey, hipzout.lkey);
  1123. ret = -EFAULT;
  1124. } else {
  1125. /*
  1126. * successful reregistration
  1127. * note: start and start_out are identical for eServer HCAs
  1128. */
  1129. e_mr->num_kpages = pginfo->num_kpages;
  1130. e_mr->num_hwpages = pginfo->num_hwpages;
  1131. e_mr->hwpage_size = pginfo->hwpage_size;
  1132. e_mr->start = iova_start;
  1133. e_mr->size = size;
  1134. e_mr->acl = acl;
  1135. *lkey = hipzout.lkey;
  1136. *rkey = hipzout.rkey;
  1137. }
  1138. ehca_rereg_mr_rereg1_exit1:
  1139. ehca_free_fw_ctrlblock(kpage);
  1140. ehca_rereg_mr_rereg1_exit0:
  1141. if ( ret && (ret != -EAGAIN) )
  1142. ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
  1143. "pginfo=%p num_kpages=%lx num_hwpages=%lx",
  1144. ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
  1145. pginfo->num_hwpages);
  1146. return ret;
  1147. } /* end ehca_rereg_mr_rereg1() */
  1148. /*----------------------------------------------------------------------*/
  1149. int ehca_rereg_mr(struct ehca_shca *shca,
  1150. struct ehca_mr *e_mr,
  1151. u64 *iova_start,
  1152. u64 size,
  1153. int acl,
  1154. struct ehca_pd *e_pd,
  1155. struct ehca_mr_pginfo *pginfo,
  1156. u32 *lkey,
  1157. u32 *rkey)
  1158. {
  1159. int ret = 0;
  1160. u64 h_ret;
  1161. int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
  1162. int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
  1163. /* first determine reregistration hCall(s) */
  1164. if ((pginfo->num_hwpages > MAX_RPAGES) ||
  1165. (e_mr->num_hwpages > MAX_RPAGES) ||
  1166. (pginfo->num_hwpages > e_mr->num_hwpages)) {
  1167. ehca_dbg(&shca->ib_device, "Rereg3 case, "
  1168. "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
  1169. pginfo->num_hwpages, e_mr->num_hwpages);
  1170. rereg_1_hcall = 0;
  1171. rereg_3_hcall = 1;
  1172. }
  1173. if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
  1174. rereg_1_hcall = 0;
  1175. rereg_3_hcall = 1;
  1176. e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
  1177. ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
  1178. e_mr);
  1179. }
  1180. if (rereg_1_hcall) {
  1181. ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
  1182. acl, e_pd, pginfo, lkey, rkey);
  1183. if (ret) {
  1184. if (ret == -EAGAIN)
  1185. rereg_3_hcall = 1;
  1186. else
  1187. goto ehca_rereg_mr_exit0;
  1188. }
  1189. }
  1190. if (rereg_3_hcall) {
  1191. struct ehca_mr save_mr;
  1192. /* first deregister old MR */
  1193. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  1194. if (h_ret != H_SUCCESS) {
  1195. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1196. "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
  1197. "mr->lkey=%x",
  1198. h_ret, e_mr, shca->ipz_hca_handle.handle,
  1199. e_mr->ipz_mr_handle.handle,
  1200. e_mr->ib.ib_mr.lkey);
  1201. ret = ehca2ib_return_code(h_ret);
  1202. goto ehca_rereg_mr_exit0;
  1203. }
  1204. /* clean ehca_mr_t, without changing struct ib_mr and lock */
  1205. save_mr = *e_mr;
  1206. ehca_mr_deletenew(e_mr);
  1207. /* set some MR values */
  1208. e_mr->flags = save_mr.flags;
  1209. e_mr->hwpage_size = save_mr.hwpage_size;
  1210. e_mr->fmr_page_size = save_mr.fmr_page_size;
  1211. e_mr->fmr_max_pages = save_mr.fmr_max_pages;
  1212. e_mr->fmr_max_maps = save_mr.fmr_max_maps;
  1213. e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
  1214. ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
  1215. e_pd, pginfo, lkey, rkey);
  1216. if (ret) {
  1217. u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
  1218. memcpy(&e_mr->flags, &(save_mr.flags),
  1219. sizeof(struct ehca_mr) - offset);
  1220. goto ehca_rereg_mr_exit0;
  1221. }
  1222. }
  1223. ehca_rereg_mr_exit0:
  1224. if (ret)
  1225. ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
  1226. "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
  1227. "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
  1228. "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
  1229. acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
  1230. rereg_1_hcall, rereg_3_hcall);
  1231. return ret;
  1232. } /* end ehca_rereg_mr() */
  1233. /*----------------------------------------------------------------------*/
  1234. int ehca_unmap_one_fmr(struct ehca_shca *shca,
  1235. struct ehca_mr *e_fmr)
  1236. {
  1237. int ret = 0;
  1238. u64 h_ret;
  1239. struct ehca_pd *e_pd =
  1240. container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
  1241. struct ehca_mr save_fmr;
  1242. u32 tmp_lkey, tmp_rkey;
  1243. struct ehca_mr_pginfo pginfo;
  1244. struct ehca_mr_hipzout_parms hipzout;
  1245. struct ehca_mr save_mr;
  1246. if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
  1247. /*
  1248. * note: after using rereg hcall with len=0,
  1249. * rereg hcall must be used again for registering pages
  1250. */
  1251. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
  1252. 0, 0, e_pd->fw_pd, 0, &hipzout);
  1253. if (h_ret == H_SUCCESS) {
  1254. /* successful reregistration */
  1255. e_fmr->start = NULL;
  1256. e_fmr->size = 0;
  1257. tmp_lkey = hipzout.lkey;
  1258. tmp_rkey = hipzout.rkey;
  1259. return 0;
  1260. }
  1261. /*
  1262. * should not happen, because length checked above,
  1263. * FMRs are not shared and no MW bound to FMRs
  1264. */
  1265. ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
  1266. "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
  1267. "mr_hndl=%lx lkey=%x lkey_out=%x",
  1268. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1269. e_fmr->ipz_mr_handle.handle,
  1270. e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
  1271. /* try free and rereg */
  1272. }
  1273. /* first free old FMR */
  1274. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  1275. if (h_ret != H_SUCCESS) {
  1276. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1277. "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
  1278. "lkey=%x",
  1279. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1280. e_fmr->ipz_mr_handle.handle,
  1281. e_fmr->ib.ib_fmr.lkey);
  1282. ret = ehca2ib_return_code(h_ret);
  1283. goto ehca_unmap_one_fmr_exit0;
  1284. }
  1285. /* clean ehca_mr_t, without changing lock */
  1286. save_fmr = *e_fmr;
  1287. ehca_mr_deletenew(e_fmr);
  1288. /* set some MR values */
  1289. e_fmr->flags = save_fmr.flags;
  1290. e_fmr->hwpage_size = save_fmr.hwpage_size;
  1291. e_fmr->fmr_page_size = save_fmr.fmr_page_size;
  1292. e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
  1293. e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
  1294. e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
  1295. e_fmr->acl = save_fmr.acl;
  1296. memset(&pginfo, 0, sizeof(pginfo));
  1297. pginfo.type = EHCA_MR_PGI_FMR;
  1298. ret = ehca_reg_mr(shca, e_fmr, NULL,
  1299. (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
  1300. e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
  1301. &tmp_rkey);
  1302. if (ret) {
  1303. u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
  1304. memcpy(&e_fmr->flags, &(save_mr.flags),
  1305. sizeof(struct ehca_mr) - offset);
  1306. }
  1307. ehca_unmap_one_fmr_exit0:
  1308. if (ret)
  1309. ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
  1310. "fmr_max_pages=%x",
  1311. ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
  1312. return ret;
  1313. } /* end ehca_unmap_one_fmr() */
  1314. /*----------------------------------------------------------------------*/
  1315. int ehca_reg_smr(struct ehca_shca *shca,
  1316. struct ehca_mr *e_origmr,
  1317. struct ehca_mr *e_newmr,
  1318. u64 *iova_start,
  1319. int acl,
  1320. struct ehca_pd *e_pd,
  1321. u32 *lkey, /*OUT*/
  1322. u32 *rkey) /*OUT*/
  1323. {
  1324. int ret = 0;
  1325. u64 h_ret;
  1326. u32 hipz_acl;
  1327. struct ehca_mr_hipzout_parms hipzout;
  1328. ehca_mrmw_map_acl(acl, &hipz_acl);
  1329. ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
  1330. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1331. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1332. &hipzout);
  1333. if (h_ret != H_SUCCESS) {
  1334. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
  1335. "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
  1336. "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
  1337. h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
  1338. shca->ipz_hca_handle.handle,
  1339. e_origmr->ipz_mr_handle.handle,
  1340. e_origmr->ib.ib_mr.lkey);
  1341. ret = ehca2ib_return_code(h_ret);
  1342. goto ehca_reg_smr_exit0;
  1343. }
  1344. /* successful registration */
  1345. e_newmr->num_kpages = e_origmr->num_kpages;
  1346. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1347. e_newmr->hwpage_size = e_origmr->hwpage_size;
  1348. e_newmr->start = iova_start;
  1349. e_newmr->size = e_origmr->size;
  1350. e_newmr->acl = acl;
  1351. e_newmr->ipz_mr_handle = hipzout.handle;
  1352. *lkey = hipzout.lkey;
  1353. *rkey = hipzout.rkey;
  1354. return 0;
  1355. ehca_reg_smr_exit0:
  1356. if (ret)
  1357. ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
  1358. "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
  1359. ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
  1360. return ret;
  1361. } /* end ehca_reg_smr() */
  1362. /*----------------------------------------------------------------------*/
  1363. /* register internal max-MR to internal SHCA */
  1364. int ehca_reg_internal_maxmr(
  1365. struct ehca_shca *shca,
  1366. struct ehca_pd *e_pd,
  1367. struct ehca_mr **e_maxmr) /*OUT*/
  1368. {
  1369. int ret;
  1370. struct ehca_mr *e_mr;
  1371. u64 *iova_start;
  1372. u64 size_maxmr;
  1373. struct ehca_mr_pginfo pginfo;
  1374. struct ib_phys_buf ib_pbuf;
  1375. u32 num_kpages;
  1376. u32 num_hwpages;
  1377. u64 hw_pgsize;
  1378. e_mr = ehca_mr_new();
  1379. if (!e_mr) {
  1380. ehca_err(&shca->ib_device, "out of memory");
  1381. ret = -ENOMEM;
  1382. goto ehca_reg_internal_maxmr_exit0;
  1383. }
  1384. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  1385. /* register internal max-MR on HCA */
  1386. size_maxmr = (u64)high_memory - PAGE_OFFSET;
  1387. iova_start = (u64 *)KERNELBASE;
  1388. ib_pbuf.addr = 0;
  1389. ib_pbuf.size = size_maxmr;
  1390. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
  1391. PAGE_SIZE);
  1392. hw_pgsize = ehca_get_max_hwpage_size(shca);
  1393. num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
  1394. hw_pgsize);
  1395. memset(&pginfo, 0, sizeof(pginfo));
  1396. pginfo.type = EHCA_MR_PGI_PHYS;
  1397. pginfo.num_kpages = num_kpages;
  1398. pginfo.num_hwpages = num_hwpages;
  1399. pginfo.hwpage_size = hw_pgsize;
  1400. pginfo.u.phy.num_phys_buf = 1;
  1401. pginfo.u.phy.phys_buf_array = &ib_pbuf;
  1402. ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
  1403. &pginfo, &e_mr->ib.ib_mr.lkey,
  1404. &e_mr->ib.ib_mr.rkey);
  1405. if (ret) {
  1406. ehca_err(&shca->ib_device, "reg of internal max MR failed, "
  1407. "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
  1408. "num_hwpages=%x", e_mr, iova_start, size_maxmr,
  1409. num_kpages, num_hwpages);
  1410. goto ehca_reg_internal_maxmr_exit1;
  1411. }
  1412. /* successful registration of all pages */
  1413. e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
  1414. e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
  1415. e_mr->ib.ib_mr.uobject = NULL;
  1416. atomic_inc(&(e_pd->ib_pd.usecnt));
  1417. atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
  1418. *e_maxmr = e_mr;
  1419. return 0;
  1420. ehca_reg_internal_maxmr_exit1:
  1421. ehca_mr_delete(e_mr);
  1422. ehca_reg_internal_maxmr_exit0:
  1423. if (ret)
  1424. ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
  1425. ret, shca, e_pd, e_maxmr);
  1426. return ret;
  1427. } /* end ehca_reg_internal_maxmr() */
  1428. /*----------------------------------------------------------------------*/
  1429. int ehca_reg_maxmr(struct ehca_shca *shca,
  1430. struct ehca_mr *e_newmr,
  1431. u64 *iova_start,
  1432. int acl,
  1433. struct ehca_pd *e_pd,
  1434. u32 *lkey,
  1435. u32 *rkey)
  1436. {
  1437. u64 h_ret;
  1438. struct ehca_mr *e_origmr = shca->maxmr;
  1439. u32 hipz_acl;
  1440. struct ehca_mr_hipzout_parms hipzout;
  1441. ehca_mrmw_map_acl(acl, &hipz_acl);
  1442. ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
  1443. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1444. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1445. &hipzout);
  1446. if (h_ret != H_SUCCESS) {
  1447. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
  1448. "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
  1449. h_ret, e_origmr, shca->ipz_hca_handle.handle,
  1450. e_origmr->ipz_mr_handle.handle,
  1451. e_origmr->ib.ib_mr.lkey);
  1452. return ehca2ib_return_code(h_ret);
  1453. }
  1454. /* successful registration */
  1455. e_newmr->num_kpages = e_origmr->num_kpages;
  1456. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1457. e_newmr->hwpage_size = e_origmr->hwpage_size;
  1458. e_newmr->start = iova_start;
  1459. e_newmr->size = e_origmr->size;
  1460. e_newmr->acl = acl;
  1461. e_newmr->ipz_mr_handle = hipzout.handle;
  1462. *lkey = hipzout.lkey;
  1463. *rkey = hipzout.rkey;
  1464. return 0;
  1465. } /* end ehca_reg_maxmr() */
  1466. /*----------------------------------------------------------------------*/
  1467. int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
  1468. {
  1469. int ret;
  1470. struct ehca_mr *e_maxmr;
  1471. struct ib_pd *ib_pd;
  1472. if (!shca->maxmr) {
  1473. ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
  1474. ret = -EINVAL;
  1475. goto ehca_dereg_internal_maxmr_exit0;
  1476. }
  1477. e_maxmr = shca->maxmr;
  1478. ib_pd = e_maxmr->ib.ib_mr.pd;
  1479. shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
  1480. ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
  1481. if (ret) {
  1482. ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
  1483. "ret=%x e_maxmr=%p shca=%p lkey=%x",
  1484. ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
  1485. shca->maxmr = e_maxmr;
  1486. goto ehca_dereg_internal_maxmr_exit0;
  1487. }
  1488. atomic_dec(&ib_pd->usecnt);
  1489. ehca_dereg_internal_maxmr_exit0:
  1490. if (ret)
  1491. ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
  1492. ret, shca, shca->maxmr);
  1493. return ret;
  1494. } /* end ehca_dereg_internal_maxmr() */
  1495. /*----------------------------------------------------------------------*/
  1496. /*
  1497. * check physical buffer array of MR verbs for validness and
  1498. * calculates MR size
  1499. */
  1500. int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
  1501. int num_phys_buf,
  1502. u64 *iova_start,
  1503. u64 *size)
  1504. {
  1505. struct ib_phys_buf *pbuf = phys_buf_array;
  1506. u64 size_count = 0;
  1507. u32 i;
  1508. if (num_phys_buf == 0) {
  1509. ehca_gen_err("bad phys buf array len, num_phys_buf=0");
  1510. return -EINVAL;
  1511. }
  1512. /* check first buffer */
  1513. if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
  1514. ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
  1515. "pbuf->addr=%lx pbuf->size=%lx",
  1516. iova_start, pbuf->addr, pbuf->size);
  1517. return -EINVAL;
  1518. }
  1519. if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
  1520. (num_phys_buf > 1)) {
  1521. ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
  1522. "pbuf->size=%lx", pbuf->addr, pbuf->size);
  1523. return -EINVAL;
  1524. }
  1525. for (i = 0; i < num_phys_buf; i++) {
  1526. if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
  1527. ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
  1528. "pbuf->size=%lx",
  1529. i, pbuf->addr, pbuf->size);
  1530. return -EINVAL;
  1531. }
  1532. if (((i > 0) && /* not 1st */
  1533. (i < (num_phys_buf - 1)) && /* not last */
  1534. (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
  1535. ehca_gen_err("bad size, i=%x pbuf->size=%lx",
  1536. i, pbuf->size);
  1537. return -EINVAL;
  1538. }
  1539. size_count += pbuf->size;
  1540. pbuf++;
  1541. }
  1542. *size = size_count;
  1543. return 0;
  1544. } /* end ehca_mr_chk_buf_and_calc_size() */
  1545. /*----------------------------------------------------------------------*/
  1546. /* check page list of map FMR verb for validness */
  1547. int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
  1548. u64 *page_list,
  1549. int list_len)
  1550. {
  1551. u32 i;
  1552. u64 *page;
  1553. if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
  1554. ehca_gen_err("bad list_len, list_len=%x "
  1555. "e_fmr->fmr_max_pages=%x fmr=%p",
  1556. list_len, e_fmr->fmr_max_pages, e_fmr);
  1557. return -EINVAL;
  1558. }
  1559. /* each page must be aligned */
  1560. page = page_list;
  1561. for (i = 0; i < list_len; i++) {
  1562. if (*page % e_fmr->fmr_page_size) {
  1563. ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
  1564. "fmr_page_size=%x", i, *page, page, e_fmr,
  1565. e_fmr->fmr_page_size);
  1566. return -EINVAL;
  1567. }
  1568. page++;
  1569. }
  1570. return 0;
  1571. } /* end ehca_fmr_check_page_list() */
  1572. /*----------------------------------------------------------------------*/
  1573. /* PAGE_SIZE >= pginfo->hwpage_size */
  1574. static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
  1575. u32 number,
  1576. u64 *kpage)
  1577. {
  1578. int ret = 0;
  1579. struct ib_umem_chunk *prev_chunk;
  1580. struct ib_umem_chunk *chunk;
  1581. u64 pgaddr;
  1582. u32 i = 0;
  1583. u32 j = 0;
  1584. int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
  1585. /* loop over desired chunk entries */
  1586. chunk = pginfo->u.usr.next_chunk;
  1587. prev_chunk = pginfo->u.usr.next_chunk;
  1588. list_for_each_entry_continue(
  1589. chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
  1590. for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
  1591. pgaddr = page_to_pfn(chunk->page_list[i].page)
  1592. << PAGE_SHIFT ;
  1593. *kpage = phys_to_abs(pgaddr +
  1594. (pginfo->next_hwpage *
  1595. pginfo->hwpage_size));
  1596. if ( !(*kpage) ) {
  1597. ehca_gen_err("pgaddr=%lx "
  1598. "chunk->page_list[i]=%lx "
  1599. "i=%x next_hwpage=%lx",
  1600. pgaddr, (u64)sg_dma_address(
  1601. &chunk->page_list[i]),
  1602. i, pginfo->next_hwpage);
  1603. return -EFAULT;
  1604. }
  1605. (pginfo->hwpage_cnt)++;
  1606. (pginfo->next_hwpage)++;
  1607. kpage++;
  1608. if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
  1609. (pginfo->kpage_cnt)++;
  1610. (pginfo->u.usr.next_nmap)++;
  1611. pginfo->next_hwpage = 0;
  1612. i++;
  1613. }
  1614. j++;
  1615. if (j >= number) break;
  1616. }
  1617. if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
  1618. (j >= number)) {
  1619. pginfo->u.usr.next_nmap = 0;
  1620. prev_chunk = chunk;
  1621. break;
  1622. } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
  1623. pginfo->u.usr.next_nmap = 0;
  1624. prev_chunk = chunk;
  1625. } else if (j >= number)
  1626. break;
  1627. else
  1628. prev_chunk = chunk;
  1629. }
  1630. pginfo->u.usr.next_chunk =
  1631. list_prepare_entry(prev_chunk,
  1632. (&(pginfo->u.usr.region->chunk_list)),
  1633. list);
  1634. return ret;
  1635. }
  1636. /*
  1637. * check given pages for contiguous layout
  1638. * last page addr is returned in prev_pgaddr for further check
  1639. */
  1640. static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
  1641. int start_idx, int end_idx,
  1642. u64 *prev_pgaddr)
  1643. {
  1644. int t;
  1645. for (t = start_idx; t <= end_idx; t++) {
  1646. u64 pgaddr = page_to_pfn(page_list[t].page) << PAGE_SHIFT;
  1647. ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
  1648. *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
  1649. if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
  1650. ehca_gen_err("uncontiguous page found pgaddr=%lx "
  1651. "prev_pgaddr=%lx page_list_i=%x",
  1652. pgaddr, *prev_pgaddr, t);
  1653. return -EINVAL;
  1654. }
  1655. *prev_pgaddr = pgaddr;
  1656. }
  1657. return 0;
  1658. }
  1659. /* PAGE_SIZE < pginfo->hwpage_size */
  1660. static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
  1661. u32 number,
  1662. u64 *kpage)
  1663. {
  1664. int ret = 0;
  1665. struct ib_umem_chunk *prev_chunk;
  1666. struct ib_umem_chunk *chunk;
  1667. u64 pgaddr, prev_pgaddr;
  1668. u32 i = 0;
  1669. u32 j = 0;
  1670. int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
  1671. int nr_kpages = kpages_per_hwpage;
  1672. /* loop over desired chunk entries */
  1673. chunk = pginfo->u.usr.next_chunk;
  1674. prev_chunk = pginfo->u.usr.next_chunk;
  1675. list_for_each_entry_continue(
  1676. chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
  1677. for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
  1678. if (nr_kpages == kpages_per_hwpage) {
  1679. pgaddr = ( page_to_pfn(chunk->page_list[i].page)
  1680. << PAGE_SHIFT );
  1681. *kpage = phys_to_abs(pgaddr);
  1682. if ( !(*kpage) ) {
  1683. ehca_gen_err("pgaddr=%lx i=%x",
  1684. pgaddr, i);
  1685. ret = -EFAULT;
  1686. return ret;
  1687. }
  1688. /*
  1689. * The first page in a hwpage must be aligned;
  1690. * the first MR page is exempt from this rule.
  1691. */
  1692. if (pgaddr & (pginfo->hwpage_size - 1)) {
  1693. if (pginfo->hwpage_cnt) {
  1694. ehca_gen_err(
  1695. "invalid alignment "
  1696. "pgaddr=%lx i=%x "
  1697. "mr_pgsize=%lx",
  1698. pgaddr, i,
  1699. pginfo->hwpage_size);
  1700. ret = -EFAULT;
  1701. return ret;
  1702. }
  1703. /* first MR page */
  1704. pginfo->kpage_cnt =
  1705. (pgaddr &
  1706. (pginfo->hwpage_size - 1)) >>
  1707. PAGE_SHIFT;
  1708. nr_kpages -= pginfo->kpage_cnt;
  1709. *kpage = phys_to_abs(
  1710. pgaddr &
  1711. ~(pginfo->hwpage_size - 1));
  1712. }
  1713. ehca_gen_dbg("kpage=%lx chunk_page=%lx "
  1714. "value=%016lx", *kpage, pgaddr,
  1715. *(u64 *)abs_to_virt(
  1716. phys_to_abs(pgaddr)));
  1717. prev_pgaddr = pgaddr;
  1718. i++;
  1719. pginfo->kpage_cnt++;
  1720. pginfo->u.usr.next_nmap++;
  1721. nr_kpages--;
  1722. if (!nr_kpages)
  1723. goto next_kpage;
  1724. continue;
  1725. }
  1726. if (i + nr_kpages > chunk->nmap) {
  1727. ret = ehca_check_kpages_per_ate(
  1728. chunk->page_list, i,
  1729. chunk->nmap - 1, &prev_pgaddr);
  1730. if (ret) return ret;
  1731. pginfo->kpage_cnt += chunk->nmap - i;
  1732. pginfo->u.usr.next_nmap += chunk->nmap - i;
  1733. nr_kpages -= chunk->nmap - i;
  1734. break;
  1735. }
  1736. ret = ehca_check_kpages_per_ate(chunk->page_list, i,
  1737. i + nr_kpages - 1,
  1738. &prev_pgaddr);
  1739. if (ret) return ret;
  1740. i += nr_kpages;
  1741. pginfo->kpage_cnt += nr_kpages;
  1742. pginfo->u.usr.next_nmap += nr_kpages;
  1743. next_kpage:
  1744. nr_kpages = kpages_per_hwpage;
  1745. (pginfo->hwpage_cnt)++;
  1746. kpage++;
  1747. j++;
  1748. if (j >= number) break;
  1749. }
  1750. if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
  1751. (j >= number)) {
  1752. pginfo->u.usr.next_nmap = 0;
  1753. prev_chunk = chunk;
  1754. break;
  1755. } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
  1756. pginfo->u.usr.next_nmap = 0;
  1757. prev_chunk = chunk;
  1758. } else if (j >= number)
  1759. break;
  1760. else
  1761. prev_chunk = chunk;
  1762. }
  1763. pginfo->u.usr.next_chunk =
  1764. list_prepare_entry(prev_chunk,
  1765. (&(pginfo->u.usr.region->chunk_list)),
  1766. list);
  1767. return ret;
  1768. }
  1769. int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
  1770. u32 number,
  1771. u64 *kpage)
  1772. {
  1773. int ret = 0;
  1774. struct ib_phys_buf *pbuf;
  1775. u64 num_hw, offs_hw;
  1776. u32 i = 0;
  1777. /* loop over desired phys_buf_array entries */
  1778. while (i < number) {
  1779. pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
  1780. num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
  1781. pbuf->size, pginfo->hwpage_size);
  1782. offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
  1783. pginfo->hwpage_size;
  1784. while (pginfo->next_hwpage < offs_hw + num_hw) {
  1785. /* sanity check */
  1786. if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
  1787. (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
  1788. ehca_gen_err("kpage_cnt >= num_kpages, "
  1789. "kpage_cnt=%lx num_kpages=%lx "
  1790. "hwpage_cnt=%lx "
  1791. "num_hwpages=%lx i=%x",
  1792. pginfo->kpage_cnt,
  1793. pginfo->num_kpages,
  1794. pginfo->hwpage_cnt,
  1795. pginfo->num_hwpages, i);
  1796. return -EFAULT;
  1797. }
  1798. *kpage = phys_to_abs(
  1799. (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
  1800. (pginfo->next_hwpage * pginfo->hwpage_size));
  1801. if ( !(*kpage) && pbuf->addr ) {
  1802. ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx "
  1803. "next_hwpage=%lx", pbuf->addr,
  1804. pbuf->size, pginfo->next_hwpage);
  1805. return -EFAULT;
  1806. }
  1807. (pginfo->hwpage_cnt)++;
  1808. (pginfo->next_hwpage)++;
  1809. if (PAGE_SIZE >= pginfo->hwpage_size) {
  1810. if (pginfo->next_hwpage %
  1811. (PAGE_SIZE / pginfo->hwpage_size) == 0)
  1812. (pginfo->kpage_cnt)++;
  1813. } else
  1814. pginfo->kpage_cnt += pginfo->hwpage_size /
  1815. PAGE_SIZE;
  1816. kpage++;
  1817. i++;
  1818. if (i >= number) break;
  1819. }
  1820. if (pginfo->next_hwpage >= offs_hw + num_hw) {
  1821. (pginfo->u.phy.next_buf)++;
  1822. pginfo->next_hwpage = 0;
  1823. }
  1824. }
  1825. return ret;
  1826. }
  1827. int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
  1828. u32 number,
  1829. u64 *kpage)
  1830. {
  1831. int ret = 0;
  1832. u64 *fmrlist;
  1833. u32 i;
  1834. /* loop over desired page_list entries */
  1835. fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
  1836. for (i = 0; i < number; i++) {
  1837. *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
  1838. pginfo->next_hwpage * pginfo->hwpage_size);
  1839. if ( !(*kpage) ) {
  1840. ehca_gen_err("*fmrlist=%lx fmrlist=%p "
  1841. "next_listelem=%lx next_hwpage=%lx",
  1842. *fmrlist, fmrlist,
  1843. pginfo->u.fmr.next_listelem,
  1844. pginfo->next_hwpage);
  1845. return -EFAULT;
  1846. }
  1847. (pginfo->hwpage_cnt)++;
  1848. if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
  1849. if (pginfo->next_hwpage %
  1850. (pginfo->u.fmr.fmr_pgsize /
  1851. pginfo->hwpage_size) == 0) {
  1852. (pginfo->kpage_cnt)++;
  1853. (pginfo->u.fmr.next_listelem)++;
  1854. fmrlist++;
  1855. pginfo->next_hwpage = 0;
  1856. } else
  1857. (pginfo->next_hwpage)++;
  1858. } else {
  1859. unsigned int cnt_per_hwpage = pginfo->hwpage_size /
  1860. pginfo->u.fmr.fmr_pgsize;
  1861. unsigned int j;
  1862. u64 prev = *kpage;
  1863. /* check if adrs are contiguous */
  1864. for (j = 1; j < cnt_per_hwpage; j++) {
  1865. u64 p = phys_to_abs(fmrlist[j] &
  1866. ~(pginfo->hwpage_size - 1));
  1867. if (prev + pginfo->u.fmr.fmr_pgsize != p) {
  1868. ehca_gen_err("uncontiguous fmr pages "
  1869. "found prev=%lx p=%lx "
  1870. "idx=%x", prev, p, i + j);
  1871. return -EINVAL;
  1872. }
  1873. prev = p;
  1874. }
  1875. pginfo->kpage_cnt += cnt_per_hwpage;
  1876. pginfo->u.fmr.next_listelem += cnt_per_hwpage;
  1877. fmrlist += cnt_per_hwpage;
  1878. }
  1879. kpage++;
  1880. }
  1881. return ret;
  1882. }
  1883. /* setup page buffer from page info */
  1884. int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
  1885. u32 number,
  1886. u64 *kpage)
  1887. {
  1888. int ret;
  1889. switch (pginfo->type) {
  1890. case EHCA_MR_PGI_PHYS:
  1891. ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
  1892. break;
  1893. case EHCA_MR_PGI_USER:
  1894. ret = PAGE_SIZE >= pginfo->hwpage_size ?
  1895. ehca_set_pagebuf_user1(pginfo, number, kpage) :
  1896. ehca_set_pagebuf_user2(pginfo, number, kpage);
  1897. break;
  1898. case EHCA_MR_PGI_FMR:
  1899. ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
  1900. break;
  1901. default:
  1902. ehca_gen_err("bad pginfo->type=%x", pginfo->type);
  1903. ret = -EFAULT;
  1904. break;
  1905. }
  1906. return ret;
  1907. } /* end ehca_set_pagebuf() */
  1908. /*----------------------------------------------------------------------*/
  1909. /*
  1910. * check MR if it is a max-MR, i.e. uses whole memory
  1911. * in case it's a max-MR 1 is returned, else 0
  1912. */
  1913. int ehca_mr_is_maxmr(u64 size,
  1914. u64 *iova_start)
  1915. {
  1916. /* a MR is treated as max-MR only if it fits following: */
  1917. if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
  1918. (iova_start == (void *)KERNELBASE)) {
  1919. ehca_gen_dbg("this is a max-MR");
  1920. return 1;
  1921. } else
  1922. return 0;
  1923. } /* end ehca_mr_is_maxmr() */
  1924. /*----------------------------------------------------------------------*/
  1925. /* map access control for MR/MW. This routine is used for MR and MW. */
  1926. void ehca_mrmw_map_acl(int ib_acl,
  1927. u32 *hipz_acl)
  1928. {
  1929. *hipz_acl = 0;
  1930. if (ib_acl & IB_ACCESS_REMOTE_READ)
  1931. *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
  1932. if (ib_acl & IB_ACCESS_REMOTE_WRITE)
  1933. *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
  1934. if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
  1935. *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
  1936. if (ib_acl & IB_ACCESS_LOCAL_WRITE)
  1937. *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
  1938. if (ib_acl & IB_ACCESS_MW_BIND)
  1939. *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
  1940. } /* end ehca_mrmw_map_acl() */
  1941. /*----------------------------------------------------------------------*/
  1942. /* sets page size in hipz access control for MR/MW. */
  1943. void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
  1944. {
  1945. *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
  1946. } /* end ehca_mrmw_set_pgsize_hipz_acl() */
  1947. /*----------------------------------------------------------------------*/
  1948. /*
  1949. * reverse map access control for MR/MW.
  1950. * This routine is used for MR and MW.
  1951. */
  1952. void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
  1953. int *ib_acl) /*OUT*/
  1954. {
  1955. *ib_acl = 0;
  1956. if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
  1957. *ib_acl |= IB_ACCESS_REMOTE_READ;
  1958. if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
  1959. *ib_acl |= IB_ACCESS_REMOTE_WRITE;
  1960. if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
  1961. *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
  1962. if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
  1963. *ib_acl |= IB_ACCESS_LOCAL_WRITE;
  1964. if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
  1965. *ib_acl |= IB_ACCESS_MW_BIND;
  1966. } /* end ehca_mrmw_reverse_map_acl() */
  1967. /*----------------------------------------------------------------------*/
  1968. /*
  1969. * MR destructor and constructor
  1970. * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
  1971. * except struct ib_mr and spinlock
  1972. */
  1973. void ehca_mr_deletenew(struct ehca_mr *mr)
  1974. {
  1975. mr->flags = 0;
  1976. mr->num_kpages = 0;
  1977. mr->num_hwpages = 0;
  1978. mr->acl = 0;
  1979. mr->start = NULL;
  1980. mr->fmr_page_size = 0;
  1981. mr->fmr_max_pages = 0;
  1982. mr->fmr_max_maps = 0;
  1983. mr->fmr_map_cnt = 0;
  1984. memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
  1985. memset(&mr->galpas, 0, sizeof(mr->galpas));
  1986. } /* end ehca_mr_deletenew() */
  1987. int ehca_init_mrmw_cache(void)
  1988. {
  1989. mr_cache = kmem_cache_create("ehca_cache_mr",
  1990. sizeof(struct ehca_mr), 0,
  1991. SLAB_HWCACHE_ALIGN,
  1992. NULL);
  1993. if (!mr_cache)
  1994. return -ENOMEM;
  1995. mw_cache = kmem_cache_create("ehca_cache_mw",
  1996. sizeof(struct ehca_mw), 0,
  1997. SLAB_HWCACHE_ALIGN,
  1998. NULL);
  1999. if (!mw_cache) {
  2000. kmem_cache_destroy(mr_cache);
  2001. mr_cache = NULL;
  2002. return -ENOMEM;
  2003. }
  2004. return 0;
  2005. }
  2006. void ehca_cleanup_mrmw_cache(void)
  2007. {
  2008. if (mr_cache)
  2009. kmem_cache_destroy(mr_cache);
  2010. if (mw_cache)
  2011. kmem_cache_destroy(mw_cache);
  2012. }