ehca_mrmw.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * MR/MW functions
  5. *
  6. * Authors: Dietmar Decker <ddecker@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #include <linux/slab.h>
  43. #include <rdma/ib_umem.h>
  44. #include "ehca_iverbs.h"
  45. #include "ehca_mrmw.h"
  46. #include "hcp_if.h"
  47. #include "hipz_hw.h"
  48. #define NUM_CHUNKS(length, chunk_size) \
  49. (((length) + (chunk_size - 1)) / (chunk_size))
  50. /* max number of rpages (per hcall register_rpages) */
  51. #define MAX_RPAGES 512
  52. /* DMEM toleration management */
  53. #define EHCA_SECTSHIFT SECTION_SIZE_BITS
  54. #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
  55. #define EHCA_HUGEPAGESHIFT 34
  56. #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
  57. #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
  58. #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
  59. #define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
  60. #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
  61. #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
  62. #define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
  63. #define EHCA_DIR_MAP_SIZE (0x10000)
  64. #define EHCA_ENT_MAP_SIZE (0x10000)
  65. #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
  66. static unsigned long ehca_mr_len;
  67. /*
  68. * Memory map data structures
  69. */
  70. struct ehca_dir_bmap {
  71. u64 ent[EHCA_MAP_ENTRIES];
  72. };
  73. struct ehca_top_bmap {
  74. struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
  75. };
  76. struct ehca_bmap {
  77. struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
  78. };
  79. static struct ehca_bmap *ehca_bmap;
  80. static struct kmem_cache *mr_cache;
  81. static struct kmem_cache *mw_cache;
  82. enum ehca_mr_pgsize {
  83. EHCA_MR_PGSIZE4K = 0x1000L,
  84. EHCA_MR_PGSIZE64K = 0x10000L,
  85. EHCA_MR_PGSIZE1M = 0x100000L,
  86. EHCA_MR_PGSIZE16M = 0x1000000L
  87. };
  88. #define EHCA_MR_PGSHIFT4K 12
  89. #define EHCA_MR_PGSHIFT64K 16
  90. #define EHCA_MR_PGSHIFT1M 20
  91. #define EHCA_MR_PGSHIFT16M 24
  92. static u64 ehca_map_vaddr(void *caddr);
  93. static u32 ehca_encode_hwpage_size(u32 pgsize)
  94. {
  95. int log = ilog2(pgsize);
  96. WARN_ON(log < 12 || log > 24 || log & 3);
  97. return (log - 12) / 4;
  98. }
  99. static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
  100. {
  101. return 1UL << ilog2(shca->hca_cap_mr_pgsize);
  102. }
  103. static struct ehca_mr *ehca_mr_new(void)
  104. {
  105. struct ehca_mr *me;
  106. me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
  107. if (me)
  108. spin_lock_init(&me->mrlock);
  109. else
  110. ehca_gen_err("alloc failed");
  111. return me;
  112. }
  113. static void ehca_mr_delete(struct ehca_mr *me)
  114. {
  115. kmem_cache_free(mr_cache, me);
  116. }
  117. static struct ehca_mw *ehca_mw_new(void)
  118. {
  119. struct ehca_mw *me;
  120. me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
  121. if (me)
  122. spin_lock_init(&me->mwlock);
  123. else
  124. ehca_gen_err("alloc failed");
  125. return me;
  126. }
  127. static void ehca_mw_delete(struct ehca_mw *me)
  128. {
  129. kmem_cache_free(mw_cache, me);
  130. }
  131. /*----------------------------------------------------------------------*/
  132. struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  133. {
  134. struct ib_mr *ib_mr;
  135. int ret;
  136. struct ehca_mr *e_maxmr;
  137. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  138. struct ehca_shca *shca =
  139. container_of(pd->device, struct ehca_shca, ib_device);
  140. if (shca->maxmr) {
  141. e_maxmr = ehca_mr_new();
  142. if (!e_maxmr) {
  143. ehca_err(&shca->ib_device, "out of memory");
  144. ib_mr = ERR_PTR(-ENOMEM);
  145. goto get_dma_mr_exit0;
  146. }
  147. ret = ehca_reg_maxmr(shca, e_maxmr,
  148. (void *)ehca_map_vaddr((void *)KERNELBASE),
  149. mr_access_flags, e_pd,
  150. &e_maxmr->ib.ib_mr.lkey,
  151. &e_maxmr->ib.ib_mr.rkey);
  152. if (ret) {
  153. ehca_mr_delete(e_maxmr);
  154. ib_mr = ERR_PTR(ret);
  155. goto get_dma_mr_exit0;
  156. }
  157. ib_mr = &e_maxmr->ib.ib_mr;
  158. } else {
  159. ehca_err(&shca->ib_device, "no internal max-MR exist!");
  160. ib_mr = ERR_PTR(-EINVAL);
  161. goto get_dma_mr_exit0;
  162. }
  163. get_dma_mr_exit0:
  164. if (IS_ERR(ib_mr))
  165. ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
  166. PTR_ERR(ib_mr), pd, mr_access_flags);
  167. return ib_mr;
  168. } /* end ehca_get_dma_mr() */
  169. /*----------------------------------------------------------------------*/
  170. struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
  171. struct ib_phys_buf *phys_buf_array,
  172. int num_phys_buf,
  173. int mr_access_flags,
  174. u64 *iova_start)
  175. {
  176. struct ib_mr *ib_mr;
  177. int ret;
  178. struct ehca_mr *e_mr;
  179. struct ehca_shca *shca =
  180. container_of(pd->device, struct ehca_shca, ib_device);
  181. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  182. u64 size;
  183. if ((num_phys_buf <= 0) || !phys_buf_array) {
  184. ehca_err(pd->device, "bad input values: num_phys_buf=%x "
  185. "phys_buf_array=%p", num_phys_buf, phys_buf_array);
  186. ib_mr = ERR_PTR(-EINVAL);
  187. goto reg_phys_mr_exit0;
  188. }
  189. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  190. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  191. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  192. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  193. /*
  194. * Remote Write Access requires Local Write Access
  195. * Remote Atomic Access requires Local Write Access
  196. */
  197. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  198. mr_access_flags);
  199. ib_mr = ERR_PTR(-EINVAL);
  200. goto reg_phys_mr_exit0;
  201. }
  202. /* check physical buffer list and calculate size */
  203. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
  204. iova_start, &size);
  205. if (ret) {
  206. ib_mr = ERR_PTR(ret);
  207. goto reg_phys_mr_exit0;
  208. }
  209. if ((size == 0) ||
  210. (((u64)iova_start + size) < (u64)iova_start)) {
  211. ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
  212. size, iova_start);
  213. ib_mr = ERR_PTR(-EINVAL);
  214. goto reg_phys_mr_exit0;
  215. }
  216. e_mr = ehca_mr_new();
  217. if (!e_mr) {
  218. ehca_err(pd->device, "out of memory");
  219. ib_mr = ERR_PTR(-ENOMEM);
  220. goto reg_phys_mr_exit0;
  221. }
  222. /* register MR on HCA */
  223. if (ehca_mr_is_maxmr(size, iova_start)) {
  224. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  225. ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
  226. e_pd, &e_mr->ib.ib_mr.lkey,
  227. &e_mr->ib.ib_mr.rkey);
  228. if (ret) {
  229. ib_mr = ERR_PTR(ret);
  230. goto reg_phys_mr_exit1;
  231. }
  232. } else {
  233. struct ehca_mr_pginfo pginfo;
  234. u32 num_kpages;
  235. u32 num_hwpages;
  236. u64 hw_pgsize;
  237. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
  238. PAGE_SIZE);
  239. /* for kernel space we try most possible pgsize */
  240. hw_pgsize = ehca_get_max_hwpage_size(shca);
  241. num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
  242. hw_pgsize);
  243. memset(&pginfo, 0, sizeof(pginfo));
  244. pginfo.type = EHCA_MR_PGI_PHYS;
  245. pginfo.num_kpages = num_kpages;
  246. pginfo.hwpage_size = hw_pgsize;
  247. pginfo.num_hwpages = num_hwpages;
  248. pginfo.u.phy.num_phys_buf = num_phys_buf;
  249. pginfo.u.phy.phys_buf_array = phys_buf_array;
  250. pginfo.next_hwpage =
  251. ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
  252. ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
  253. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  254. &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
  255. if (ret) {
  256. ib_mr = ERR_PTR(ret);
  257. goto reg_phys_mr_exit1;
  258. }
  259. }
  260. /* successful registration of all pages */
  261. return &e_mr->ib.ib_mr;
  262. reg_phys_mr_exit1:
  263. ehca_mr_delete(e_mr);
  264. reg_phys_mr_exit0:
  265. if (IS_ERR(ib_mr))
  266. ehca_err(pd->device, "h_ret=%li pd=%p phys_buf_array=%p "
  267. "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
  268. PTR_ERR(ib_mr), pd, phys_buf_array,
  269. num_phys_buf, mr_access_flags, iova_start);
  270. return ib_mr;
  271. } /* end ehca_reg_phys_mr() */
  272. /*----------------------------------------------------------------------*/
  273. struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  274. u64 virt, int mr_access_flags,
  275. struct ib_udata *udata)
  276. {
  277. struct ib_mr *ib_mr;
  278. struct ehca_mr *e_mr;
  279. struct ehca_shca *shca =
  280. container_of(pd->device, struct ehca_shca, ib_device);
  281. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  282. struct ehca_mr_pginfo pginfo;
  283. int ret, page_shift;
  284. u32 num_kpages;
  285. u32 num_hwpages;
  286. u64 hwpage_size;
  287. if (!pd) {
  288. ehca_gen_err("bad pd=%p", pd);
  289. return ERR_PTR(-EFAULT);
  290. }
  291. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  292. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  293. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  294. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  295. /*
  296. * Remote Write Access requires Local Write Access
  297. * Remote Atomic Access requires Local Write Access
  298. */
  299. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  300. mr_access_flags);
  301. ib_mr = ERR_PTR(-EINVAL);
  302. goto reg_user_mr_exit0;
  303. }
  304. if (length == 0 || virt + length < virt) {
  305. ehca_err(pd->device, "bad input values: length=%llx "
  306. "virt_base=%llx", length, virt);
  307. ib_mr = ERR_PTR(-EINVAL);
  308. goto reg_user_mr_exit0;
  309. }
  310. e_mr = ehca_mr_new();
  311. if (!e_mr) {
  312. ehca_err(pd->device, "out of memory");
  313. ib_mr = ERR_PTR(-ENOMEM);
  314. goto reg_user_mr_exit0;
  315. }
  316. e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
  317. mr_access_flags, 0);
  318. if (IS_ERR(e_mr->umem)) {
  319. ib_mr = (void *)e_mr->umem;
  320. goto reg_user_mr_exit1;
  321. }
  322. if (e_mr->umem->page_size != PAGE_SIZE) {
  323. ehca_err(pd->device, "page size not supported, "
  324. "e_mr->umem->page_size=%x", e_mr->umem->page_size);
  325. ib_mr = ERR_PTR(-EINVAL);
  326. goto reg_user_mr_exit2;
  327. }
  328. /* determine number of MR pages */
  329. num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
  330. /* select proper hw_pgsize */
  331. page_shift = PAGE_SHIFT;
  332. if (e_mr->umem->hugetlb) {
  333. /* determine page_shift, clamp between 4K and 16M */
  334. page_shift = (fls64(length - 1) + 3) & ~3;
  335. page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
  336. EHCA_MR_PGSHIFT16M);
  337. }
  338. hwpage_size = 1UL << page_shift;
  339. /* now that we have the desired page size, shift until it's
  340. * supported, too. 4K is always supported, so this terminates.
  341. */
  342. while (!(hwpage_size & shca->hca_cap_mr_pgsize))
  343. hwpage_size >>= 4;
  344. reg_user_mr_fallback:
  345. num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
  346. /* register MR on HCA */
  347. memset(&pginfo, 0, sizeof(pginfo));
  348. pginfo.type = EHCA_MR_PGI_USER;
  349. pginfo.hwpage_size = hwpage_size;
  350. pginfo.num_kpages = num_kpages;
  351. pginfo.num_hwpages = num_hwpages;
  352. pginfo.u.usr.region = e_mr->umem;
  353. pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
  354. pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
  355. (&e_mr->umem->chunk_list),
  356. list);
  357. ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
  358. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  359. &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
  360. if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
  361. ehca_warn(pd->device, "failed to register mr "
  362. "with hwpage_size=%llx", hwpage_size);
  363. ehca_info(pd->device, "try to register mr with "
  364. "kpage_size=%lx", PAGE_SIZE);
  365. /*
  366. * this means kpages are not contiguous for a hw page
  367. * try kernel page size as fallback solution
  368. */
  369. hwpage_size = PAGE_SIZE;
  370. goto reg_user_mr_fallback;
  371. }
  372. if (ret) {
  373. ib_mr = ERR_PTR(ret);
  374. goto reg_user_mr_exit2;
  375. }
  376. /* successful registration of all pages */
  377. return &e_mr->ib.ib_mr;
  378. reg_user_mr_exit2:
  379. ib_umem_release(e_mr->umem);
  380. reg_user_mr_exit1:
  381. ehca_mr_delete(e_mr);
  382. reg_user_mr_exit0:
  383. if (IS_ERR(ib_mr))
  384. ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
  385. PTR_ERR(ib_mr), pd, mr_access_flags, udata);
  386. return ib_mr;
  387. } /* end ehca_reg_user_mr() */
  388. /*----------------------------------------------------------------------*/
  389. int ehca_rereg_phys_mr(struct ib_mr *mr,
  390. int mr_rereg_mask,
  391. struct ib_pd *pd,
  392. struct ib_phys_buf *phys_buf_array,
  393. int num_phys_buf,
  394. int mr_access_flags,
  395. u64 *iova_start)
  396. {
  397. int ret;
  398. struct ehca_shca *shca =
  399. container_of(mr->device, struct ehca_shca, ib_device);
  400. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  401. u64 new_size;
  402. u64 *new_start;
  403. u32 new_acl;
  404. struct ehca_pd *new_pd;
  405. u32 tmp_lkey, tmp_rkey;
  406. unsigned long sl_flags;
  407. u32 num_kpages = 0;
  408. u32 num_hwpages = 0;
  409. struct ehca_mr_pginfo pginfo;
  410. if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
  411. /* TODO not supported, because PHYP rereg hCall needs pages */
  412. ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
  413. "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
  414. ret = -EINVAL;
  415. goto rereg_phys_mr_exit0;
  416. }
  417. if (mr_rereg_mask & IB_MR_REREG_PD) {
  418. if (!pd) {
  419. ehca_err(mr->device, "rereg with bad pd, pd=%p "
  420. "mr_rereg_mask=%x", pd, mr_rereg_mask);
  421. ret = -EINVAL;
  422. goto rereg_phys_mr_exit0;
  423. }
  424. }
  425. if ((mr_rereg_mask &
  426. ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
  427. (mr_rereg_mask == 0)) {
  428. ret = -EINVAL;
  429. goto rereg_phys_mr_exit0;
  430. }
  431. /* check other parameters */
  432. if (e_mr == shca->maxmr) {
  433. /* should be impossible, however reject to be sure */
  434. ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
  435. "shca->maxmr=%p mr->lkey=%x",
  436. mr, shca->maxmr, mr->lkey);
  437. ret = -EINVAL;
  438. goto rereg_phys_mr_exit0;
  439. }
  440. if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
  441. if (e_mr->flags & EHCA_MR_FLAG_FMR) {
  442. ehca_err(mr->device, "not supported for FMR, mr=%p "
  443. "flags=%x", mr, e_mr->flags);
  444. ret = -EINVAL;
  445. goto rereg_phys_mr_exit0;
  446. }
  447. if (!phys_buf_array || num_phys_buf <= 0) {
  448. ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
  449. " phys_buf_array=%p num_phys_buf=%x",
  450. mr_rereg_mask, phys_buf_array, num_phys_buf);
  451. ret = -EINVAL;
  452. goto rereg_phys_mr_exit0;
  453. }
  454. }
  455. if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
  456. (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  457. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  458. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  459. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
  460. /*
  461. * Remote Write Access requires Local Write Access
  462. * Remote Atomic Access requires Local Write Access
  463. */
  464. ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
  465. "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
  466. ret = -EINVAL;
  467. goto rereg_phys_mr_exit0;
  468. }
  469. /* set requested values dependent on rereg request */
  470. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  471. new_start = e_mr->start;
  472. new_size = e_mr->size;
  473. new_acl = e_mr->acl;
  474. new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  475. if (mr_rereg_mask & IB_MR_REREG_TRANS) {
  476. u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
  477. new_start = iova_start; /* change address */
  478. /* check physical buffer list and calculate size */
  479. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
  480. num_phys_buf, iova_start,
  481. &new_size);
  482. if (ret)
  483. goto rereg_phys_mr_exit1;
  484. if ((new_size == 0) ||
  485. (((u64)iova_start + new_size) < (u64)iova_start)) {
  486. ehca_err(mr->device, "bad input values: new_size=%llx "
  487. "iova_start=%p", new_size, iova_start);
  488. ret = -EINVAL;
  489. goto rereg_phys_mr_exit1;
  490. }
  491. num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
  492. new_size, PAGE_SIZE);
  493. num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
  494. new_size, hw_pgsize);
  495. memset(&pginfo, 0, sizeof(pginfo));
  496. pginfo.type = EHCA_MR_PGI_PHYS;
  497. pginfo.num_kpages = num_kpages;
  498. pginfo.hwpage_size = hw_pgsize;
  499. pginfo.num_hwpages = num_hwpages;
  500. pginfo.u.phy.num_phys_buf = num_phys_buf;
  501. pginfo.u.phy.phys_buf_array = phys_buf_array;
  502. pginfo.next_hwpage =
  503. ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
  504. }
  505. if (mr_rereg_mask & IB_MR_REREG_ACCESS)
  506. new_acl = mr_access_flags;
  507. if (mr_rereg_mask & IB_MR_REREG_PD)
  508. new_pd = container_of(pd, struct ehca_pd, ib_pd);
  509. ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
  510. new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  511. if (ret)
  512. goto rereg_phys_mr_exit1;
  513. /* successful reregistration */
  514. if (mr_rereg_mask & IB_MR_REREG_PD)
  515. mr->pd = pd;
  516. mr->lkey = tmp_lkey;
  517. mr->rkey = tmp_rkey;
  518. rereg_phys_mr_exit1:
  519. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  520. rereg_phys_mr_exit0:
  521. if (ret)
  522. ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p "
  523. "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
  524. "iova_start=%p",
  525. ret, mr, mr_rereg_mask, pd, phys_buf_array,
  526. num_phys_buf, mr_access_flags, iova_start);
  527. return ret;
  528. } /* end ehca_rereg_phys_mr() */
  529. /*----------------------------------------------------------------------*/
  530. int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  531. {
  532. int ret = 0;
  533. u64 h_ret;
  534. struct ehca_shca *shca =
  535. container_of(mr->device, struct ehca_shca, ib_device);
  536. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  537. unsigned long sl_flags;
  538. struct ehca_mr_hipzout_parms hipzout;
  539. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  540. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  541. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  542. ret = -EINVAL;
  543. goto query_mr_exit0;
  544. }
  545. memset(mr_attr, 0, sizeof(struct ib_mr_attr));
  546. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  547. h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
  548. if (h_ret != H_SUCCESS) {
  549. ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
  550. "hca_hndl=%llx mr_hndl=%llx lkey=%x",
  551. h_ret, mr, shca->ipz_hca_handle.handle,
  552. e_mr->ipz_mr_handle.handle, mr->lkey);
  553. ret = ehca2ib_return_code(h_ret);
  554. goto query_mr_exit1;
  555. }
  556. mr_attr->pd = mr->pd;
  557. mr_attr->device_virt_addr = hipzout.vaddr;
  558. mr_attr->size = hipzout.len;
  559. mr_attr->lkey = hipzout.lkey;
  560. mr_attr->rkey = hipzout.rkey;
  561. ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
  562. query_mr_exit1:
  563. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  564. query_mr_exit0:
  565. if (ret)
  566. ehca_err(mr->device, "ret=%i mr=%p mr_attr=%p",
  567. ret, mr, mr_attr);
  568. return ret;
  569. } /* end ehca_query_mr() */
  570. /*----------------------------------------------------------------------*/
  571. int ehca_dereg_mr(struct ib_mr *mr)
  572. {
  573. int ret = 0;
  574. u64 h_ret;
  575. struct ehca_shca *shca =
  576. container_of(mr->device, struct ehca_shca, ib_device);
  577. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  578. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  579. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  580. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  581. ret = -EINVAL;
  582. goto dereg_mr_exit0;
  583. } else if (e_mr == shca->maxmr) {
  584. /* should be impossible, however reject to be sure */
  585. ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
  586. "shca->maxmr=%p mr->lkey=%x",
  587. mr, shca->maxmr, mr->lkey);
  588. ret = -EINVAL;
  589. goto dereg_mr_exit0;
  590. }
  591. /* TODO: BUSY: MR still has bound window(s) */
  592. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  593. if (h_ret != H_SUCCESS) {
  594. ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
  595. "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
  596. h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
  597. e_mr->ipz_mr_handle.handle, mr->lkey);
  598. ret = ehca2ib_return_code(h_ret);
  599. goto dereg_mr_exit0;
  600. }
  601. if (e_mr->umem)
  602. ib_umem_release(e_mr->umem);
  603. /* successful deregistration */
  604. ehca_mr_delete(e_mr);
  605. dereg_mr_exit0:
  606. if (ret)
  607. ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
  608. return ret;
  609. } /* end ehca_dereg_mr() */
  610. /*----------------------------------------------------------------------*/
  611. struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
  612. {
  613. struct ib_mw *ib_mw;
  614. u64 h_ret;
  615. struct ehca_mw *e_mw;
  616. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  617. struct ehca_shca *shca =
  618. container_of(pd->device, struct ehca_shca, ib_device);
  619. struct ehca_mw_hipzout_parms hipzout;
  620. e_mw = ehca_mw_new();
  621. if (!e_mw) {
  622. ib_mw = ERR_PTR(-ENOMEM);
  623. goto alloc_mw_exit0;
  624. }
  625. h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
  626. e_pd->fw_pd, &hipzout);
  627. if (h_ret != H_SUCCESS) {
  628. ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
  629. "shca=%p hca_hndl=%llx mw=%p",
  630. h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
  631. ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
  632. goto alloc_mw_exit1;
  633. }
  634. /* successful MW allocation */
  635. e_mw->ipz_mw_handle = hipzout.handle;
  636. e_mw->ib_mw.rkey = hipzout.rkey;
  637. return &e_mw->ib_mw;
  638. alloc_mw_exit1:
  639. ehca_mw_delete(e_mw);
  640. alloc_mw_exit0:
  641. if (IS_ERR(ib_mw))
  642. ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
  643. return ib_mw;
  644. } /* end ehca_alloc_mw() */
  645. /*----------------------------------------------------------------------*/
  646. int ehca_bind_mw(struct ib_qp *qp,
  647. struct ib_mw *mw,
  648. struct ib_mw_bind *mw_bind)
  649. {
  650. /* TODO: not supported up to now */
  651. ehca_gen_err("bind MW currently not supported by HCAD");
  652. return -EPERM;
  653. } /* end ehca_bind_mw() */
  654. /*----------------------------------------------------------------------*/
  655. int ehca_dealloc_mw(struct ib_mw *mw)
  656. {
  657. u64 h_ret;
  658. struct ehca_shca *shca =
  659. container_of(mw->device, struct ehca_shca, ib_device);
  660. struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
  661. h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
  662. if (h_ret != H_SUCCESS) {
  663. ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
  664. "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
  665. h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
  666. e_mw->ipz_mw_handle.handle);
  667. return ehca2ib_return_code(h_ret);
  668. }
  669. /* successful deallocation */
  670. ehca_mw_delete(e_mw);
  671. return 0;
  672. } /* end ehca_dealloc_mw() */
  673. /*----------------------------------------------------------------------*/
  674. struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
  675. int mr_access_flags,
  676. struct ib_fmr_attr *fmr_attr)
  677. {
  678. struct ib_fmr *ib_fmr;
  679. struct ehca_shca *shca =
  680. container_of(pd->device, struct ehca_shca, ib_device);
  681. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  682. struct ehca_mr *e_fmr;
  683. int ret;
  684. u32 tmp_lkey, tmp_rkey;
  685. struct ehca_mr_pginfo pginfo;
  686. u64 hw_pgsize;
  687. /* check other parameters */
  688. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  689. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  690. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  691. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  692. /*
  693. * Remote Write Access requires Local Write Access
  694. * Remote Atomic Access requires Local Write Access
  695. */
  696. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  697. mr_access_flags);
  698. ib_fmr = ERR_PTR(-EINVAL);
  699. goto alloc_fmr_exit0;
  700. }
  701. if (mr_access_flags & IB_ACCESS_MW_BIND) {
  702. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  703. mr_access_flags);
  704. ib_fmr = ERR_PTR(-EINVAL);
  705. goto alloc_fmr_exit0;
  706. }
  707. if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
  708. ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
  709. "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
  710. fmr_attr->max_pages, fmr_attr->max_maps,
  711. fmr_attr->page_shift);
  712. ib_fmr = ERR_PTR(-EINVAL);
  713. goto alloc_fmr_exit0;
  714. }
  715. hw_pgsize = 1 << fmr_attr->page_shift;
  716. if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
  717. ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
  718. fmr_attr->page_shift);
  719. ib_fmr = ERR_PTR(-EINVAL);
  720. goto alloc_fmr_exit0;
  721. }
  722. e_fmr = ehca_mr_new();
  723. if (!e_fmr) {
  724. ib_fmr = ERR_PTR(-ENOMEM);
  725. goto alloc_fmr_exit0;
  726. }
  727. e_fmr->flags |= EHCA_MR_FLAG_FMR;
  728. /* register MR on HCA */
  729. memset(&pginfo, 0, sizeof(pginfo));
  730. pginfo.hwpage_size = hw_pgsize;
  731. /*
  732. * pginfo.num_hwpages==0, ie register_rpages() will not be called
  733. * but deferred to map_phys_fmr()
  734. */
  735. ret = ehca_reg_mr(shca, e_fmr, NULL,
  736. fmr_attr->max_pages * (1 << fmr_attr->page_shift),
  737. mr_access_flags, e_pd, &pginfo,
  738. &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
  739. if (ret) {
  740. ib_fmr = ERR_PTR(ret);
  741. goto alloc_fmr_exit1;
  742. }
  743. /* successful */
  744. e_fmr->hwpage_size = hw_pgsize;
  745. e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
  746. e_fmr->fmr_max_pages = fmr_attr->max_pages;
  747. e_fmr->fmr_max_maps = fmr_attr->max_maps;
  748. e_fmr->fmr_map_cnt = 0;
  749. return &e_fmr->ib.ib_fmr;
  750. alloc_fmr_exit1:
  751. ehca_mr_delete(e_fmr);
  752. alloc_fmr_exit0:
  753. return ib_fmr;
  754. } /* end ehca_alloc_fmr() */
  755. /*----------------------------------------------------------------------*/
  756. int ehca_map_phys_fmr(struct ib_fmr *fmr,
  757. u64 *page_list,
  758. int list_len,
  759. u64 iova)
  760. {
  761. int ret;
  762. struct ehca_shca *shca =
  763. container_of(fmr->device, struct ehca_shca, ib_device);
  764. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  765. struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
  766. struct ehca_mr_pginfo pginfo;
  767. u32 tmp_lkey, tmp_rkey;
  768. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  769. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  770. e_fmr, e_fmr->flags);
  771. ret = -EINVAL;
  772. goto map_phys_fmr_exit0;
  773. }
  774. ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
  775. if (ret)
  776. goto map_phys_fmr_exit0;
  777. if (iova % e_fmr->fmr_page_size) {
  778. /* only whole-numbered pages */
  779. ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
  780. iova, e_fmr->fmr_page_size);
  781. ret = -EINVAL;
  782. goto map_phys_fmr_exit0;
  783. }
  784. if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
  785. /* HCAD does not limit the maps, however trace this anyway */
  786. ehca_info(fmr->device, "map limit exceeded, fmr=%p "
  787. "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
  788. fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
  789. }
  790. memset(&pginfo, 0, sizeof(pginfo));
  791. pginfo.type = EHCA_MR_PGI_FMR;
  792. pginfo.num_kpages = list_len;
  793. pginfo.hwpage_size = e_fmr->hwpage_size;
  794. pginfo.num_hwpages =
  795. list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
  796. pginfo.u.fmr.page_list = page_list;
  797. pginfo.next_hwpage =
  798. (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
  799. pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
  800. ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
  801. list_len * e_fmr->fmr_page_size,
  802. e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  803. if (ret)
  804. goto map_phys_fmr_exit0;
  805. /* successful reregistration */
  806. e_fmr->fmr_map_cnt++;
  807. e_fmr->ib.ib_fmr.lkey = tmp_lkey;
  808. e_fmr->ib.ib_fmr.rkey = tmp_rkey;
  809. return 0;
  810. map_phys_fmr_exit0:
  811. if (ret)
  812. ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
  813. "iova=%llx", ret, fmr, page_list, list_len, iova);
  814. return ret;
  815. } /* end ehca_map_phys_fmr() */
  816. /*----------------------------------------------------------------------*/
  817. int ehca_unmap_fmr(struct list_head *fmr_list)
  818. {
  819. int ret = 0;
  820. struct ib_fmr *ib_fmr;
  821. struct ehca_shca *shca = NULL;
  822. struct ehca_shca *prev_shca;
  823. struct ehca_mr *e_fmr;
  824. u32 num_fmr = 0;
  825. u32 unmap_fmr_cnt = 0;
  826. /* check all FMR belong to same SHCA, and check internal flag */
  827. list_for_each_entry(ib_fmr, fmr_list, list) {
  828. prev_shca = shca;
  829. if (!ib_fmr) {
  830. ehca_gen_err("bad fmr=%p in list", ib_fmr);
  831. ret = -EINVAL;
  832. goto unmap_fmr_exit0;
  833. }
  834. shca = container_of(ib_fmr->device, struct ehca_shca,
  835. ib_device);
  836. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  837. if ((shca != prev_shca) && prev_shca) {
  838. ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
  839. "prev_shca=%p e_fmr=%p",
  840. shca, prev_shca, e_fmr);
  841. ret = -EINVAL;
  842. goto unmap_fmr_exit0;
  843. }
  844. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  845. ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
  846. "e_fmr->flags=%x", e_fmr, e_fmr->flags);
  847. ret = -EINVAL;
  848. goto unmap_fmr_exit0;
  849. }
  850. num_fmr++;
  851. }
  852. /* loop over all FMRs to unmap */
  853. list_for_each_entry(ib_fmr, fmr_list, list) {
  854. unmap_fmr_cnt++;
  855. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  856. shca = container_of(ib_fmr->device, struct ehca_shca,
  857. ib_device);
  858. ret = ehca_unmap_one_fmr(shca, e_fmr);
  859. if (ret) {
  860. /* unmap failed, stop unmapping of rest of FMRs */
  861. ehca_err(&shca->ib_device, "unmap of one FMR failed, "
  862. "stop rest, e_fmr=%p num_fmr=%x "
  863. "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
  864. unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
  865. goto unmap_fmr_exit0;
  866. }
  867. }
  868. unmap_fmr_exit0:
  869. if (ret)
  870. ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
  871. ret, fmr_list, num_fmr, unmap_fmr_cnt);
  872. return ret;
  873. } /* end ehca_unmap_fmr() */
  874. /*----------------------------------------------------------------------*/
  875. int ehca_dealloc_fmr(struct ib_fmr *fmr)
  876. {
  877. int ret;
  878. u64 h_ret;
  879. struct ehca_shca *shca =
  880. container_of(fmr->device, struct ehca_shca, ib_device);
  881. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  882. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  883. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  884. e_fmr, e_fmr->flags);
  885. ret = -EINVAL;
  886. goto free_fmr_exit0;
  887. }
  888. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  889. if (h_ret != H_SUCCESS) {
  890. ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
  891. "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
  892. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  893. e_fmr->ipz_mr_handle.handle, fmr->lkey);
  894. ret = ehca2ib_return_code(h_ret);
  895. goto free_fmr_exit0;
  896. }
  897. /* successful deregistration */
  898. ehca_mr_delete(e_fmr);
  899. return 0;
  900. free_fmr_exit0:
  901. if (ret)
  902. ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
  903. return ret;
  904. } /* end ehca_dealloc_fmr() */
  905. /*----------------------------------------------------------------------*/
  906. static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
  907. struct ehca_mr *e_mr,
  908. struct ehca_mr_pginfo *pginfo);
  909. int ehca_reg_mr(struct ehca_shca *shca,
  910. struct ehca_mr *e_mr,
  911. u64 *iova_start,
  912. u64 size,
  913. int acl,
  914. struct ehca_pd *e_pd,
  915. struct ehca_mr_pginfo *pginfo,
  916. u32 *lkey, /*OUT*/
  917. u32 *rkey, /*OUT*/
  918. enum ehca_reg_type reg_type)
  919. {
  920. int ret;
  921. u64 h_ret;
  922. u32 hipz_acl;
  923. struct ehca_mr_hipzout_parms hipzout;
  924. ehca_mrmw_map_acl(acl, &hipz_acl);
  925. ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
  926. if (ehca_use_hp_mr == 1)
  927. hipz_acl |= 0x00000001;
  928. h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
  929. (u64)iova_start, size, hipz_acl,
  930. e_pd->fw_pd, &hipzout);
  931. if (h_ret != H_SUCCESS) {
  932. ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
  933. "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
  934. ret = ehca2ib_return_code(h_ret);
  935. goto ehca_reg_mr_exit0;
  936. }
  937. e_mr->ipz_mr_handle = hipzout.handle;
  938. if (reg_type == EHCA_REG_BUSMAP_MR)
  939. ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
  940. else if (reg_type == EHCA_REG_MR)
  941. ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
  942. else
  943. ret = -EINVAL;
  944. if (ret)
  945. goto ehca_reg_mr_exit1;
  946. /* successful registration */
  947. e_mr->num_kpages = pginfo->num_kpages;
  948. e_mr->num_hwpages = pginfo->num_hwpages;
  949. e_mr->hwpage_size = pginfo->hwpage_size;
  950. e_mr->start = iova_start;
  951. e_mr->size = size;
  952. e_mr->acl = acl;
  953. *lkey = hipzout.lkey;
  954. *rkey = hipzout.rkey;
  955. return 0;
  956. ehca_reg_mr_exit1:
  957. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  958. if (h_ret != H_SUCCESS) {
  959. ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
  960. "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
  961. "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
  962. h_ret, shca, e_mr, iova_start, size, acl, e_pd,
  963. hipzout.lkey, pginfo, pginfo->num_kpages,
  964. pginfo->num_hwpages, ret);
  965. ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
  966. "not recoverable");
  967. }
  968. ehca_reg_mr_exit0:
  969. if (ret)
  970. ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
  971. "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
  972. "num_kpages=%llx num_hwpages=%llx",
  973. ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
  974. pginfo->num_kpages, pginfo->num_hwpages);
  975. return ret;
  976. } /* end ehca_reg_mr() */
  977. /*----------------------------------------------------------------------*/
  978. int ehca_reg_mr_rpages(struct ehca_shca *shca,
  979. struct ehca_mr *e_mr,
  980. struct ehca_mr_pginfo *pginfo)
  981. {
  982. int ret = 0;
  983. u64 h_ret;
  984. u32 rnum;
  985. u64 rpage;
  986. u32 i;
  987. u64 *kpage;
  988. if (!pginfo->num_hwpages) /* in case of fmr */
  989. return 0;
  990. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  991. if (!kpage) {
  992. ehca_err(&shca->ib_device, "kpage alloc failed");
  993. ret = -ENOMEM;
  994. goto ehca_reg_mr_rpages_exit0;
  995. }
  996. /* max MAX_RPAGES ehca mr pages per register call */
  997. for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
  998. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  999. rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
  1000. if (rnum == 0)
  1001. rnum = MAX_RPAGES; /* last shot is full */
  1002. } else
  1003. rnum = MAX_RPAGES;
  1004. ret = ehca_set_pagebuf(pginfo, rnum, kpage);
  1005. if (ret) {
  1006. ehca_err(&shca->ib_device, "ehca_set_pagebuf "
  1007. "bad rc, ret=%i rnum=%x kpage=%p",
  1008. ret, rnum, kpage);
  1009. goto ehca_reg_mr_rpages_exit1;
  1010. }
  1011. if (rnum > 1) {
  1012. rpage = virt_to_abs(kpage);
  1013. if (!rpage) {
  1014. ehca_err(&shca->ib_device, "kpage=%p i=%x",
  1015. kpage, i);
  1016. ret = -EFAULT;
  1017. goto ehca_reg_mr_rpages_exit1;
  1018. }
  1019. } else
  1020. rpage = *kpage;
  1021. h_ret = hipz_h_register_rpage_mr(
  1022. shca->ipz_hca_handle, e_mr,
  1023. ehca_encode_hwpage_size(pginfo->hwpage_size),
  1024. 0, rpage, rnum);
  1025. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  1026. /*
  1027. * check for 'registration complete'==H_SUCCESS
  1028. * and for 'page registered'==H_PAGE_REGISTERED
  1029. */
  1030. if (h_ret != H_SUCCESS) {
  1031. ehca_err(&shca->ib_device, "last "
  1032. "hipz_reg_rpage_mr failed, h_ret=%lli "
  1033. "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
  1034. " lkey=%x", h_ret, e_mr, i,
  1035. shca->ipz_hca_handle.handle,
  1036. e_mr->ipz_mr_handle.handle,
  1037. e_mr->ib.ib_mr.lkey);
  1038. ret = ehca2ib_return_code(h_ret);
  1039. break;
  1040. } else
  1041. ret = 0;
  1042. } else if (h_ret != H_PAGE_REGISTERED) {
  1043. ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
  1044. "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
  1045. "mr_hndl=%llx", h_ret, e_mr, i,
  1046. e_mr->ib.ib_mr.lkey,
  1047. shca->ipz_hca_handle.handle,
  1048. e_mr->ipz_mr_handle.handle);
  1049. ret = ehca2ib_return_code(h_ret);
  1050. break;
  1051. } else
  1052. ret = 0;
  1053. } /* end for(i) */
  1054. ehca_reg_mr_rpages_exit1:
  1055. ehca_free_fw_ctrlblock(kpage);
  1056. ehca_reg_mr_rpages_exit0:
  1057. if (ret)
  1058. ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
  1059. "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
  1060. pginfo, pginfo->num_kpages, pginfo->num_hwpages);
  1061. return ret;
  1062. } /* end ehca_reg_mr_rpages() */
  1063. /*----------------------------------------------------------------------*/
  1064. inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
  1065. struct ehca_mr *e_mr,
  1066. u64 *iova_start,
  1067. u64 size,
  1068. u32 acl,
  1069. struct ehca_pd *e_pd,
  1070. struct ehca_mr_pginfo *pginfo,
  1071. u32 *lkey, /*OUT*/
  1072. u32 *rkey) /*OUT*/
  1073. {
  1074. int ret;
  1075. u64 h_ret;
  1076. u32 hipz_acl;
  1077. u64 *kpage;
  1078. u64 rpage;
  1079. struct ehca_mr_pginfo pginfo_save;
  1080. struct ehca_mr_hipzout_parms hipzout;
  1081. ehca_mrmw_map_acl(acl, &hipz_acl);
  1082. ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
  1083. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  1084. if (!kpage) {
  1085. ehca_err(&shca->ib_device, "kpage alloc failed");
  1086. ret = -ENOMEM;
  1087. goto ehca_rereg_mr_rereg1_exit0;
  1088. }
  1089. pginfo_save = *pginfo;
  1090. ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
  1091. if (ret) {
  1092. ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
  1093. "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
  1094. "kpage=%p", e_mr, pginfo, pginfo->type,
  1095. pginfo->num_kpages, pginfo->num_hwpages, kpage);
  1096. goto ehca_rereg_mr_rereg1_exit1;
  1097. }
  1098. rpage = virt_to_abs(kpage);
  1099. if (!rpage) {
  1100. ehca_err(&shca->ib_device, "kpage=%p", kpage);
  1101. ret = -EFAULT;
  1102. goto ehca_rereg_mr_rereg1_exit1;
  1103. }
  1104. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
  1105. (u64)iova_start, size, hipz_acl,
  1106. e_pd->fw_pd, rpage, &hipzout);
  1107. if (h_ret != H_SUCCESS) {
  1108. /*
  1109. * reregistration unsuccessful, try it again with the 3 hCalls,
  1110. * e.g. this is required in case H_MR_CONDITION
  1111. * (MW bound or MR is shared)
  1112. */
  1113. ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
  1114. "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
  1115. *pginfo = pginfo_save;
  1116. ret = -EAGAIN;
  1117. } else if ((u64 *)hipzout.vaddr != iova_start) {
  1118. ehca_err(&shca->ib_device, "PHYP changed iova_start in "
  1119. "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
  1120. "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
  1121. hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
  1122. e_mr->ib.ib_mr.lkey, hipzout.lkey);
  1123. ret = -EFAULT;
  1124. } else {
  1125. /*
  1126. * successful reregistration
  1127. * note: start and start_out are identical for eServer HCAs
  1128. */
  1129. e_mr->num_kpages = pginfo->num_kpages;
  1130. e_mr->num_hwpages = pginfo->num_hwpages;
  1131. e_mr->hwpage_size = pginfo->hwpage_size;
  1132. e_mr->start = iova_start;
  1133. e_mr->size = size;
  1134. e_mr->acl = acl;
  1135. *lkey = hipzout.lkey;
  1136. *rkey = hipzout.rkey;
  1137. }
  1138. ehca_rereg_mr_rereg1_exit1:
  1139. ehca_free_fw_ctrlblock(kpage);
  1140. ehca_rereg_mr_rereg1_exit0:
  1141. if ( ret && (ret != -EAGAIN) )
  1142. ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
  1143. "pginfo=%p num_kpages=%llx num_hwpages=%llx",
  1144. ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
  1145. pginfo->num_hwpages);
  1146. return ret;
  1147. } /* end ehca_rereg_mr_rereg1() */
  1148. /*----------------------------------------------------------------------*/
  1149. int ehca_rereg_mr(struct ehca_shca *shca,
  1150. struct ehca_mr *e_mr,
  1151. u64 *iova_start,
  1152. u64 size,
  1153. int acl,
  1154. struct ehca_pd *e_pd,
  1155. struct ehca_mr_pginfo *pginfo,
  1156. u32 *lkey,
  1157. u32 *rkey)
  1158. {
  1159. int ret = 0;
  1160. u64 h_ret;
  1161. int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
  1162. int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
  1163. /* first determine reregistration hCall(s) */
  1164. if ((pginfo->num_hwpages > MAX_RPAGES) ||
  1165. (e_mr->num_hwpages > MAX_RPAGES) ||
  1166. (pginfo->num_hwpages > e_mr->num_hwpages)) {
  1167. ehca_dbg(&shca->ib_device, "Rereg3 case, "
  1168. "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
  1169. pginfo->num_hwpages, e_mr->num_hwpages);
  1170. rereg_1_hcall = 0;
  1171. rereg_3_hcall = 1;
  1172. }
  1173. if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
  1174. rereg_1_hcall = 0;
  1175. rereg_3_hcall = 1;
  1176. e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
  1177. ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
  1178. e_mr);
  1179. }
  1180. if (rereg_1_hcall) {
  1181. ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
  1182. acl, e_pd, pginfo, lkey, rkey);
  1183. if (ret) {
  1184. if (ret == -EAGAIN)
  1185. rereg_3_hcall = 1;
  1186. else
  1187. goto ehca_rereg_mr_exit0;
  1188. }
  1189. }
  1190. if (rereg_3_hcall) {
  1191. struct ehca_mr save_mr;
  1192. /* first deregister old MR */
  1193. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  1194. if (h_ret != H_SUCCESS) {
  1195. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1196. "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
  1197. "mr->lkey=%x",
  1198. h_ret, e_mr, shca->ipz_hca_handle.handle,
  1199. e_mr->ipz_mr_handle.handle,
  1200. e_mr->ib.ib_mr.lkey);
  1201. ret = ehca2ib_return_code(h_ret);
  1202. goto ehca_rereg_mr_exit0;
  1203. }
  1204. /* clean ehca_mr_t, without changing struct ib_mr and lock */
  1205. save_mr = *e_mr;
  1206. ehca_mr_deletenew(e_mr);
  1207. /* set some MR values */
  1208. e_mr->flags = save_mr.flags;
  1209. e_mr->hwpage_size = save_mr.hwpage_size;
  1210. e_mr->fmr_page_size = save_mr.fmr_page_size;
  1211. e_mr->fmr_max_pages = save_mr.fmr_max_pages;
  1212. e_mr->fmr_max_maps = save_mr.fmr_max_maps;
  1213. e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
  1214. ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
  1215. e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
  1216. if (ret) {
  1217. u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
  1218. memcpy(&e_mr->flags, &(save_mr.flags),
  1219. sizeof(struct ehca_mr) - offset);
  1220. goto ehca_rereg_mr_exit0;
  1221. }
  1222. }
  1223. ehca_rereg_mr_exit0:
  1224. if (ret)
  1225. ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
  1226. "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
  1227. "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
  1228. "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
  1229. acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
  1230. rereg_1_hcall, rereg_3_hcall);
  1231. return ret;
  1232. } /* end ehca_rereg_mr() */
  1233. /*----------------------------------------------------------------------*/
  1234. int ehca_unmap_one_fmr(struct ehca_shca *shca,
  1235. struct ehca_mr *e_fmr)
  1236. {
  1237. int ret = 0;
  1238. u64 h_ret;
  1239. struct ehca_pd *e_pd =
  1240. container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
  1241. struct ehca_mr save_fmr;
  1242. u32 tmp_lkey, tmp_rkey;
  1243. struct ehca_mr_pginfo pginfo;
  1244. struct ehca_mr_hipzout_parms hipzout;
  1245. struct ehca_mr save_mr;
  1246. if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
  1247. /*
  1248. * note: after using rereg hcall with len=0,
  1249. * rereg hcall must be used again for registering pages
  1250. */
  1251. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
  1252. 0, 0, e_pd->fw_pd, 0, &hipzout);
  1253. if (h_ret == H_SUCCESS) {
  1254. /* successful reregistration */
  1255. e_fmr->start = NULL;
  1256. e_fmr->size = 0;
  1257. tmp_lkey = hipzout.lkey;
  1258. tmp_rkey = hipzout.rkey;
  1259. return 0;
  1260. }
  1261. /*
  1262. * should not happen, because length checked above,
  1263. * FMRs are not shared and no MW bound to FMRs
  1264. */
  1265. ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
  1266. "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
  1267. "mr_hndl=%llx lkey=%x lkey_out=%x",
  1268. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1269. e_fmr->ipz_mr_handle.handle,
  1270. e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
  1271. /* try free and rereg */
  1272. }
  1273. /* first free old FMR */
  1274. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  1275. if (h_ret != H_SUCCESS) {
  1276. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1277. "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
  1278. "lkey=%x",
  1279. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1280. e_fmr->ipz_mr_handle.handle,
  1281. e_fmr->ib.ib_fmr.lkey);
  1282. ret = ehca2ib_return_code(h_ret);
  1283. goto ehca_unmap_one_fmr_exit0;
  1284. }
  1285. /* clean ehca_mr_t, without changing lock */
  1286. save_fmr = *e_fmr;
  1287. ehca_mr_deletenew(e_fmr);
  1288. /* set some MR values */
  1289. e_fmr->flags = save_fmr.flags;
  1290. e_fmr->hwpage_size = save_fmr.hwpage_size;
  1291. e_fmr->fmr_page_size = save_fmr.fmr_page_size;
  1292. e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
  1293. e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
  1294. e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
  1295. e_fmr->acl = save_fmr.acl;
  1296. memset(&pginfo, 0, sizeof(pginfo));
  1297. pginfo.type = EHCA_MR_PGI_FMR;
  1298. ret = ehca_reg_mr(shca, e_fmr, NULL,
  1299. (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
  1300. e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
  1301. &tmp_rkey, EHCA_REG_MR);
  1302. if (ret) {
  1303. u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
  1304. memcpy(&e_fmr->flags, &(save_mr.flags),
  1305. sizeof(struct ehca_mr) - offset);
  1306. }
  1307. ehca_unmap_one_fmr_exit0:
  1308. if (ret)
  1309. ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
  1310. "fmr_max_pages=%x",
  1311. ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
  1312. return ret;
  1313. } /* end ehca_unmap_one_fmr() */
  1314. /*----------------------------------------------------------------------*/
  1315. int ehca_reg_smr(struct ehca_shca *shca,
  1316. struct ehca_mr *e_origmr,
  1317. struct ehca_mr *e_newmr,
  1318. u64 *iova_start,
  1319. int acl,
  1320. struct ehca_pd *e_pd,
  1321. u32 *lkey, /*OUT*/
  1322. u32 *rkey) /*OUT*/
  1323. {
  1324. int ret = 0;
  1325. u64 h_ret;
  1326. u32 hipz_acl;
  1327. struct ehca_mr_hipzout_parms hipzout;
  1328. ehca_mrmw_map_acl(acl, &hipz_acl);
  1329. ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
  1330. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1331. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1332. &hipzout);
  1333. if (h_ret != H_SUCCESS) {
  1334. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
  1335. "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
  1336. "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
  1337. h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
  1338. shca->ipz_hca_handle.handle,
  1339. e_origmr->ipz_mr_handle.handle,
  1340. e_origmr->ib.ib_mr.lkey);
  1341. ret = ehca2ib_return_code(h_ret);
  1342. goto ehca_reg_smr_exit0;
  1343. }
  1344. /* successful registration */
  1345. e_newmr->num_kpages = e_origmr->num_kpages;
  1346. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1347. e_newmr->hwpage_size = e_origmr->hwpage_size;
  1348. e_newmr->start = iova_start;
  1349. e_newmr->size = e_origmr->size;
  1350. e_newmr->acl = acl;
  1351. e_newmr->ipz_mr_handle = hipzout.handle;
  1352. *lkey = hipzout.lkey;
  1353. *rkey = hipzout.rkey;
  1354. return 0;
  1355. ehca_reg_smr_exit0:
  1356. if (ret)
  1357. ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
  1358. "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
  1359. ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
  1360. return ret;
  1361. } /* end ehca_reg_smr() */
  1362. /*----------------------------------------------------------------------*/
  1363. static inline void *ehca_calc_sectbase(int top, int dir, int idx)
  1364. {
  1365. unsigned long ret = idx;
  1366. ret |= dir << EHCA_DIR_INDEX_SHIFT;
  1367. ret |= top << EHCA_TOP_INDEX_SHIFT;
  1368. return abs_to_virt(ret << SECTION_SIZE_BITS);
  1369. }
  1370. #define ehca_bmap_valid(entry) \
  1371. ((u64)entry != (u64)EHCA_INVAL_ADDR)
  1372. static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
  1373. struct ehca_shca *shca, struct ehca_mr *mr,
  1374. struct ehca_mr_pginfo *pginfo)
  1375. {
  1376. u64 h_ret = 0;
  1377. unsigned long page = 0;
  1378. u64 rpage = virt_to_abs(kpage);
  1379. int page_count;
  1380. void *sectbase = ehca_calc_sectbase(top, dir, idx);
  1381. if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
  1382. ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
  1383. "hwpage_size does not fit to "
  1384. "section start address");
  1385. }
  1386. page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
  1387. while (page < page_count) {
  1388. u64 rnum;
  1389. for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
  1390. rnum++) {
  1391. void *pg = sectbase + ((page++) * pginfo->hwpage_size);
  1392. kpage[rnum] = virt_to_abs(pg);
  1393. }
  1394. h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
  1395. ehca_encode_hwpage_size(pginfo->hwpage_size),
  1396. 0, rpage, rnum);
  1397. if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
  1398. ehca_err(&shca->ib_device, "register_rpage_mr failed");
  1399. return h_ret;
  1400. }
  1401. }
  1402. return h_ret;
  1403. }
  1404. static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
  1405. struct ehca_shca *shca, struct ehca_mr *mr,
  1406. struct ehca_mr_pginfo *pginfo)
  1407. {
  1408. u64 hret = H_SUCCESS;
  1409. int idx;
  1410. for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
  1411. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
  1412. continue;
  1413. hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
  1414. pginfo);
  1415. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  1416. return hret;
  1417. }
  1418. return hret;
  1419. }
  1420. static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
  1421. struct ehca_mr *mr,
  1422. struct ehca_mr_pginfo *pginfo)
  1423. {
  1424. u64 hret = H_SUCCESS;
  1425. int dir;
  1426. for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
  1427. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
  1428. continue;
  1429. hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
  1430. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  1431. return hret;
  1432. }
  1433. return hret;
  1434. }
  1435. /* register internal max-MR to internal SHCA */
  1436. int ehca_reg_internal_maxmr(
  1437. struct ehca_shca *shca,
  1438. struct ehca_pd *e_pd,
  1439. struct ehca_mr **e_maxmr) /*OUT*/
  1440. {
  1441. int ret;
  1442. struct ehca_mr *e_mr;
  1443. u64 *iova_start;
  1444. u64 size_maxmr;
  1445. struct ehca_mr_pginfo pginfo;
  1446. struct ib_phys_buf ib_pbuf;
  1447. u32 num_kpages;
  1448. u32 num_hwpages;
  1449. u64 hw_pgsize;
  1450. if (!ehca_bmap) {
  1451. ret = -EFAULT;
  1452. goto ehca_reg_internal_maxmr_exit0;
  1453. }
  1454. e_mr = ehca_mr_new();
  1455. if (!e_mr) {
  1456. ehca_err(&shca->ib_device, "out of memory");
  1457. ret = -ENOMEM;
  1458. goto ehca_reg_internal_maxmr_exit0;
  1459. }
  1460. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  1461. /* register internal max-MR on HCA */
  1462. size_maxmr = ehca_mr_len;
  1463. iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
  1464. ib_pbuf.addr = 0;
  1465. ib_pbuf.size = size_maxmr;
  1466. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
  1467. PAGE_SIZE);
  1468. hw_pgsize = ehca_get_max_hwpage_size(shca);
  1469. num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
  1470. hw_pgsize);
  1471. memset(&pginfo, 0, sizeof(pginfo));
  1472. pginfo.type = EHCA_MR_PGI_PHYS;
  1473. pginfo.num_kpages = num_kpages;
  1474. pginfo.num_hwpages = num_hwpages;
  1475. pginfo.hwpage_size = hw_pgsize;
  1476. pginfo.u.phy.num_phys_buf = 1;
  1477. pginfo.u.phy.phys_buf_array = &ib_pbuf;
  1478. ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
  1479. &pginfo, &e_mr->ib.ib_mr.lkey,
  1480. &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
  1481. if (ret) {
  1482. ehca_err(&shca->ib_device, "reg of internal max MR failed, "
  1483. "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
  1484. "num_hwpages=%x", e_mr, iova_start, size_maxmr,
  1485. num_kpages, num_hwpages);
  1486. goto ehca_reg_internal_maxmr_exit1;
  1487. }
  1488. /* successful registration of all pages */
  1489. e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
  1490. e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
  1491. e_mr->ib.ib_mr.uobject = NULL;
  1492. atomic_inc(&(e_pd->ib_pd.usecnt));
  1493. atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
  1494. *e_maxmr = e_mr;
  1495. return 0;
  1496. ehca_reg_internal_maxmr_exit1:
  1497. ehca_mr_delete(e_mr);
  1498. ehca_reg_internal_maxmr_exit0:
  1499. if (ret)
  1500. ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
  1501. ret, shca, e_pd, e_maxmr);
  1502. return ret;
  1503. } /* end ehca_reg_internal_maxmr() */
  1504. /*----------------------------------------------------------------------*/
  1505. int ehca_reg_maxmr(struct ehca_shca *shca,
  1506. struct ehca_mr *e_newmr,
  1507. u64 *iova_start,
  1508. int acl,
  1509. struct ehca_pd *e_pd,
  1510. u32 *lkey,
  1511. u32 *rkey)
  1512. {
  1513. u64 h_ret;
  1514. struct ehca_mr *e_origmr = shca->maxmr;
  1515. u32 hipz_acl;
  1516. struct ehca_mr_hipzout_parms hipzout;
  1517. ehca_mrmw_map_acl(acl, &hipz_acl);
  1518. ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
  1519. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1520. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1521. &hipzout);
  1522. if (h_ret != H_SUCCESS) {
  1523. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
  1524. "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
  1525. h_ret, e_origmr, shca->ipz_hca_handle.handle,
  1526. e_origmr->ipz_mr_handle.handle,
  1527. e_origmr->ib.ib_mr.lkey);
  1528. return ehca2ib_return_code(h_ret);
  1529. }
  1530. /* successful registration */
  1531. e_newmr->num_kpages = e_origmr->num_kpages;
  1532. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1533. e_newmr->hwpage_size = e_origmr->hwpage_size;
  1534. e_newmr->start = iova_start;
  1535. e_newmr->size = e_origmr->size;
  1536. e_newmr->acl = acl;
  1537. e_newmr->ipz_mr_handle = hipzout.handle;
  1538. *lkey = hipzout.lkey;
  1539. *rkey = hipzout.rkey;
  1540. return 0;
  1541. } /* end ehca_reg_maxmr() */
  1542. /*----------------------------------------------------------------------*/
  1543. int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
  1544. {
  1545. int ret;
  1546. struct ehca_mr *e_maxmr;
  1547. struct ib_pd *ib_pd;
  1548. if (!shca->maxmr) {
  1549. ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
  1550. ret = -EINVAL;
  1551. goto ehca_dereg_internal_maxmr_exit0;
  1552. }
  1553. e_maxmr = shca->maxmr;
  1554. ib_pd = e_maxmr->ib.ib_mr.pd;
  1555. shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
  1556. ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
  1557. if (ret) {
  1558. ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
  1559. "ret=%i e_maxmr=%p shca=%p lkey=%x",
  1560. ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
  1561. shca->maxmr = e_maxmr;
  1562. goto ehca_dereg_internal_maxmr_exit0;
  1563. }
  1564. atomic_dec(&ib_pd->usecnt);
  1565. ehca_dereg_internal_maxmr_exit0:
  1566. if (ret)
  1567. ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
  1568. ret, shca, shca->maxmr);
  1569. return ret;
  1570. } /* end ehca_dereg_internal_maxmr() */
  1571. /*----------------------------------------------------------------------*/
  1572. /*
  1573. * check physical buffer array of MR verbs for validness and
  1574. * calculates MR size
  1575. */
  1576. int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
  1577. int num_phys_buf,
  1578. u64 *iova_start,
  1579. u64 *size)
  1580. {
  1581. struct ib_phys_buf *pbuf = phys_buf_array;
  1582. u64 size_count = 0;
  1583. u32 i;
  1584. if (num_phys_buf == 0) {
  1585. ehca_gen_err("bad phys buf array len, num_phys_buf=0");
  1586. return -EINVAL;
  1587. }
  1588. /* check first buffer */
  1589. if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
  1590. ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
  1591. "pbuf->addr=%llx pbuf->size=%llx",
  1592. iova_start, pbuf->addr, pbuf->size);
  1593. return -EINVAL;
  1594. }
  1595. if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
  1596. (num_phys_buf > 1)) {
  1597. ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
  1598. "pbuf->size=%llx", pbuf->addr, pbuf->size);
  1599. return -EINVAL;
  1600. }
  1601. for (i = 0; i < num_phys_buf; i++) {
  1602. if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
  1603. ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
  1604. "pbuf->size=%llx",
  1605. i, pbuf->addr, pbuf->size);
  1606. return -EINVAL;
  1607. }
  1608. if (((i > 0) && /* not 1st */
  1609. (i < (num_phys_buf - 1)) && /* not last */
  1610. (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
  1611. ehca_gen_err("bad size, i=%x pbuf->size=%llx",
  1612. i, pbuf->size);
  1613. return -EINVAL;
  1614. }
  1615. size_count += pbuf->size;
  1616. pbuf++;
  1617. }
  1618. *size = size_count;
  1619. return 0;
  1620. } /* end ehca_mr_chk_buf_and_calc_size() */
  1621. /*----------------------------------------------------------------------*/
  1622. /* check page list of map FMR verb for validness */
  1623. int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
  1624. u64 *page_list,
  1625. int list_len)
  1626. {
  1627. u32 i;
  1628. u64 *page;
  1629. if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
  1630. ehca_gen_err("bad list_len, list_len=%x "
  1631. "e_fmr->fmr_max_pages=%x fmr=%p",
  1632. list_len, e_fmr->fmr_max_pages, e_fmr);
  1633. return -EINVAL;
  1634. }
  1635. /* each page must be aligned */
  1636. page = page_list;
  1637. for (i = 0; i < list_len; i++) {
  1638. if (*page % e_fmr->fmr_page_size) {
  1639. ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
  1640. "fmr_page_size=%x", i, *page, page, e_fmr,
  1641. e_fmr->fmr_page_size);
  1642. return -EINVAL;
  1643. }
  1644. page++;
  1645. }
  1646. return 0;
  1647. } /* end ehca_fmr_check_page_list() */
  1648. /*----------------------------------------------------------------------*/
  1649. /* PAGE_SIZE >= pginfo->hwpage_size */
  1650. static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
  1651. u32 number,
  1652. u64 *kpage)
  1653. {
  1654. int ret = 0;
  1655. struct ib_umem_chunk *prev_chunk;
  1656. struct ib_umem_chunk *chunk;
  1657. u64 pgaddr;
  1658. u32 i = 0;
  1659. u32 j = 0;
  1660. int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
  1661. /* loop over desired chunk entries */
  1662. chunk = pginfo->u.usr.next_chunk;
  1663. prev_chunk = pginfo->u.usr.next_chunk;
  1664. list_for_each_entry_continue(
  1665. chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
  1666. for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
  1667. pgaddr = page_to_pfn(sg_page(&chunk->page_list[i]))
  1668. << PAGE_SHIFT ;
  1669. *kpage = phys_to_abs(pgaddr +
  1670. (pginfo->next_hwpage *
  1671. pginfo->hwpage_size));
  1672. if ( !(*kpage) ) {
  1673. ehca_gen_err("pgaddr=%llx "
  1674. "chunk->page_list[i]=%llx "
  1675. "i=%x next_hwpage=%llx",
  1676. pgaddr, (u64)sg_dma_address(
  1677. &chunk->page_list[i]),
  1678. i, pginfo->next_hwpage);
  1679. return -EFAULT;
  1680. }
  1681. (pginfo->hwpage_cnt)++;
  1682. (pginfo->next_hwpage)++;
  1683. kpage++;
  1684. if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
  1685. (pginfo->kpage_cnt)++;
  1686. (pginfo->u.usr.next_nmap)++;
  1687. pginfo->next_hwpage = 0;
  1688. i++;
  1689. }
  1690. j++;
  1691. if (j >= number) break;
  1692. }
  1693. if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
  1694. (j >= number)) {
  1695. pginfo->u.usr.next_nmap = 0;
  1696. prev_chunk = chunk;
  1697. break;
  1698. } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
  1699. pginfo->u.usr.next_nmap = 0;
  1700. prev_chunk = chunk;
  1701. } else if (j >= number)
  1702. break;
  1703. else
  1704. prev_chunk = chunk;
  1705. }
  1706. pginfo->u.usr.next_chunk =
  1707. list_prepare_entry(prev_chunk,
  1708. (&(pginfo->u.usr.region->chunk_list)),
  1709. list);
  1710. return ret;
  1711. }
  1712. /*
  1713. * check given pages for contiguous layout
  1714. * last page addr is returned in prev_pgaddr for further check
  1715. */
  1716. static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
  1717. int start_idx, int end_idx,
  1718. u64 *prev_pgaddr)
  1719. {
  1720. int t;
  1721. for (t = start_idx; t <= end_idx; t++) {
  1722. u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
  1723. if (ehca_debug_level >= 3)
  1724. ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
  1725. *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
  1726. if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
  1727. ehca_gen_err("uncontiguous page found pgaddr=%llx "
  1728. "prev_pgaddr=%llx page_list_i=%x",
  1729. pgaddr, *prev_pgaddr, t);
  1730. return -EINVAL;
  1731. }
  1732. *prev_pgaddr = pgaddr;
  1733. }
  1734. return 0;
  1735. }
  1736. /* PAGE_SIZE < pginfo->hwpage_size */
  1737. static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
  1738. u32 number,
  1739. u64 *kpage)
  1740. {
  1741. int ret = 0;
  1742. struct ib_umem_chunk *prev_chunk;
  1743. struct ib_umem_chunk *chunk;
  1744. u64 pgaddr, prev_pgaddr;
  1745. u32 i = 0;
  1746. u32 j = 0;
  1747. int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
  1748. int nr_kpages = kpages_per_hwpage;
  1749. /* loop over desired chunk entries */
  1750. chunk = pginfo->u.usr.next_chunk;
  1751. prev_chunk = pginfo->u.usr.next_chunk;
  1752. list_for_each_entry_continue(
  1753. chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
  1754. for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
  1755. if (nr_kpages == kpages_per_hwpage) {
  1756. pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
  1757. << PAGE_SHIFT );
  1758. *kpage = phys_to_abs(pgaddr);
  1759. if ( !(*kpage) ) {
  1760. ehca_gen_err("pgaddr=%llx i=%x",
  1761. pgaddr, i);
  1762. ret = -EFAULT;
  1763. return ret;
  1764. }
  1765. /*
  1766. * The first page in a hwpage must be aligned;
  1767. * the first MR page is exempt from this rule.
  1768. */
  1769. if (pgaddr & (pginfo->hwpage_size - 1)) {
  1770. if (pginfo->hwpage_cnt) {
  1771. ehca_gen_err(
  1772. "invalid alignment "
  1773. "pgaddr=%llx i=%x "
  1774. "mr_pgsize=%llx",
  1775. pgaddr, i,
  1776. pginfo->hwpage_size);
  1777. ret = -EFAULT;
  1778. return ret;
  1779. }
  1780. /* first MR page */
  1781. pginfo->kpage_cnt =
  1782. (pgaddr &
  1783. (pginfo->hwpage_size - 1)) >>
  1784. PAGE_SHIFT;
  1785. nr_kpages -= pginfo->kpage_cnt;
  1786. *kpage = phys_to_abs(
  1787. pgaddr &
  1788. ~(pginfo->hwpage_size - 1));
  1789. }
  1790. if (ehca_debug_level >= 3) {
  1791. u64 val = *(u64 *)abs_to_virt(
  1792. phys_to_abs(pgaddr));
  1793. ehca_gen_dbg("kpage=%llx chunk_page=%llx "
  1794. "value=%016llx",
  1795. *kpage, pgaddr, val);
  1796. }
  1797. prev_pgaddr = pgaddr;
  1798. i++;
  1799. pginfo->kpage_cnt++;
  1800. pginfo->u.usr.next_nmap++;
  1801. nr_kpages--;
  1802. if (!nr_kpages)
  1803. goto next_kpage;
  1804. continue;
  1805. }
  1806. if (i + nr_kpages > chunk->nmap) {
  1807. ret = ehca_check_kpages_per_ate(
  1808. chunk->page_list, i,
  1809. chunk->nmap - 1, &prev_pgaddr);
  1810. if (ret) return ret;
  1811. pginfo->kpage_cnt += chunk->nmap - i;
  1812. pginfo->u.usr.next_nmap += chunk->nmap - i;
  1813. nr_kpages -= chunk->nmap - i;
  1814. break;
  1815. }
  1816. ret = ehca_check_kpages_per_ate(chunk->page_list, i,
  1817. i + nr_kpages - 1,
  1818. &prev_pgaddr);
  1819. if (ret) return ret;
  1820. i += nr_kpages;
  1821. pginfo->kpage_cnt += nr_kpages;
  1822. pginfo->u.usr.next_nmap += nr_kpages;
  1823. next_kpage:
  1824. nr_kpages = kpages_per_hwpage;
  1825. (pginfo->hwpage_cnt)++;
  1826. kpage++;
  1827. j++;
  1828. if (j >= number) break;
  1829. }
  1830. if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
  1831. (j >= number)) {
  1832. pginfo->u.usr.next_nmap = 0;
  1833. prev_chunk = chunk;
  1834. break;
  1835. } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
  1836. pginfo->u.usr.next_nmap = 0;
  1837. prev_chunk = chunk;
  1838. } else if (j >= number)
  1839. break;
  1840. else
  1841. prev_chunk = chunk;
  1842. }
  1843. pginfo->u.usr.next_chunk =
  1844. list_prepare_entry(prev_chunk,
  1845. (&(pginfo->u.usr.region->chunk_list)),
  1846. list);
  1847. return ret;
  1848. }
  1849. static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
  1850. u32 number, u64 *kpage)
  1851. {
  1852. int ret = 0;
  1853. struct ib_phys_buf *pbuf;
  1854. u64 num_hw, offs_hw;
  1855. u32 i = 0;
  1856. /* loop over desired phys_buf_array entries */
  1857. while (i < number) {
  1858. pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
  1859. num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
  1860. pbuf->size, pginfo->hwpage_size);
  1861. offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
  1862. pginfo->hwpage_size;
  1863. while (pginfo->next_hwpage < offs_hw + num_hw) {
  1864. /* sanity check */
  1865. if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
  1866. (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
  1867. ehca_gen_err("kpage_cnt >= num_kpages, "
  1868. "kpage_cnt=%llx num_kpages=%llx "
  1869. "hwpage_cnt=%llx "
  1870. "num_hwpages=%llx i=%x",
  1871. pginfo->kpage_cnt,
  1872. pginfo->num_kpages,
  1873. pginfo->hwpage_cnt,
  1874. pginfo->num_hwpages, i);
  1875. return -EFAULT;
  1876. }
  1877. *kpage = phys_to_abs(
  1878. (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
  1879. (pginfo->next_hwpage * pginfo->hwpage_size));
  1880. if ( !(*kpage) && pbuf->addr ) {
  1881. ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
  1882. "next_hwpage=%llx", pbuf->addr,
  1883. pbuf->size, pginfo->next_hwpage);
  1884. return -EFAULT;
  1885. }
  1886. (pginfo->hwpage_cnt)++;
  1887. (pginfo->next_hwpage)++;
  1888. if (PAGE_SIZE >= pginfo->hwpage_size) {
  1889. if (pginfo->next_hwpage %
  1890. (PAGE_SIZE / pginfo->hwpage_size) == 0)
  1891. (pginfo->kpage_cnt)++;
  1892. } else
  1893. pginfo->kpage_cnt += pginfo->hwpage_size /
  1894. PAGE_SIZE;
  1895. kpage++;
  1896. i++;
  1897. if (i >= number) break;
  1898. }
  1899. if (pginfo->next_hwpage >= offs_hw + num_hw) {
  1900. (pginfo->u.phy.next_buf)++;
  1901. pginfo->next_hwpage = 0;
  1902. }
  1903. }
  1904. return ret;
  1905. }
  1906. static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
  1907. u32 number, u64 *kpage)
  1908. {
  1909. int ret = 0;
  1910. u64 *fmrlist;
  1911. u32 i;
  1912. /* loop over desired page_list entries */
  1913. fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
  1914. for (i = 0; i < number; i++) {
  1915. *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
  1916. pginfo->next_hwpage * pginfo->hwpage_size);
  1917. if ( !(*kpage) ) {
  1918. ehca_gen_err("*fmrlist=%llx fmrlist=%p "
  1919. "next_listelem=%llx next_hwpage=%llx",
  1920. *fmrlist, fmrlist,
  1921. pginfo->u.fmr.next_listelem,
  1922. pginfo->next_hwpage);
  1923. return -EFAULT;
  1924. }
  1925. (pginfo->hwpage_cnt)++;
  1926. if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
  1927. if (pginfo->next_hwpage %
  1928. (pginfo->u.fmr.fmr_pgsize /
  1929. pginfo->hwpage_size) == 0) {
  1930. (pginfo->kpage_cnt)++;
  1931. (pginfo->u.fmr.next_listelem)++;
  1932. fmrlist++;
  1933. pginfo->next_hwpage = 0;
  1934. } else
  1935. (pginfo->next_hwpage)++;
  1936. } else {
  1937. unsigned int cnt_per_hwpage = pginfo->hwpage_size /
  1938. pginfo->u.fmr.fmr_pgsize;
  1939. unsigned int j;
  1940. u64 prev = *kpage;
  1941. /* check if adrs are contiguous */
  1942. for (j = 1; j < cnt_per_hwpage; j++) {
  1943. u64 p = phys_to_abs(fmrlist[j] &
  1944. ~(pginfo->hwpage_size - 1));
  1945. if (prev + pginfo->u.fmr.fmr_pgsize != p) {
  1946. ehca_gen_err("uncontiguous fmr pages "
  1947. "found prev=%llx p=%llx "
  1948. "idx=%x", prev, p, i + j);
  1949. return -EINVAL;
  1950. }
  1951. prev = p;
  1952. }
  1953. pginfo->kpage_cnt += cnt_per_hwpage;
  1954. pginfo->u.fmr.next_listelem += cnt_per_hwpage;
  1955. fmrlist += cnt_per_hwpage;
  1956. }
  1957. kpage++;
  1958. }
  1959. return ret;
  1960. }
  1961. /* setup page buffer from page info */
  1962. int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
  1963. u32 number,
  1964. u64 *kpage)
  1965. {
  1966. int ret;
  1967. switch (pginfo->type) {
  1968. case EHCA_MR_PGI_PHYS:
  1969. ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
  1970. break;
  1971. case EHCA_MR_PGI_USER:
  1972. ret = PAGE_SIZE >= pginfo->hwpage_size ?
  1973. ehca_set_pagebuf_user1(pginfo, number, kpage) :
  1974. ehca_set_pagebuf_user2(pginfo, number, kpage);
  1975. break;
  1976. case EHCA_MR_PGI_FMR:
  1977. ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
  1978. break;
  1979. default:
  1980. ehca_gen_err("bad pginfo->type=%x", pginfo->type);
  1981. ret = -EFAULT;
  1982. break;
  1983. }
  1984. return ret;
  1985. } /* end ehca_set_pagebuf() */
  1986. /*----------------------------------------------------------------------*/
  1987. /*
  1988. * check MR if it is a max-MR, i.e. uses whole memory
  1989. * in case it's a max-MR 1 is returned, else 0
  1990. */
  1991. int ehca_mr_is_maxmr(u64 size,
  1992. u64 *iova_start)
  1993. {
  1994. /* a MR is treated as max-MR only if it fits following: */
  1995. if ((size == ehca_mr_len) &&
  1996. (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
  1997. ehca_gen_dbg("this is a max-MR");
  1998. return 1;
  1999. } else
  2000. return 0;
  2001. } /* end ehca_mr_is_maxmr() */
  2002. /*----------------------------------------------------------------------*/
  2003. /* map access control for MR/MW. This routine is used for MR and MW. */
  2004. void ehca_mrmw_map_acl(int ib_acl,
  2005. u32 *hipz_acl)
  2006. {
  2007. *hipz_acl = 0;
  2008. if (ib_acl & IB_ACCESS_REMOTE_READ)
  2009. *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
  2010. if (ib_acl & IB_ACCESS_REMOTE_WRITE)
  2011. *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
  2012. if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
  2013. *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
  2014. if (ib_acl & IB_ACCESS_LOCAL_WRITE)
  2015. *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
  2016. if (ib_acl & IB_ACCESS_MW_BIND)
  2017. *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
  2018. } /* end ehca_mrmw_map_acl() */
  2019. /*----------------------------------------------------------------------*/
  2020. /* sets page size in hipz access control for MR/MW. */
  2021. void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
  2022. {
  2023. *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
  2024. } /* end ehca_mrmw_set_pgsize_hipz_acl() */
  2025. /*----------------------------------------------------------------------*/
  2026. /*
  2027. * reverse map access control for MR/MW.
  2028. * This routine is used for MR and MW.
  2029. */
  2030. void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
  2031. int *ib_acl) /*OUT*/
  2032. {
  2033. *ib_acl = 0;
  2034. if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
  2035. *ib_acl |= IB_ACCESS_REMOTE_READ;
  2036. if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
  2037. *ib_acl |= IB_ACCESS_REMOTE_WRITE;
  2038. if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
  2039. *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
  2040. if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
  2041. *ib_acl |= IB_ACCESS_LOCAL_WRITE;
  2042. if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
  2043. *ib_acl |= IB_ACCESS_MW_BIND;
  2044. } /* end ehca_mrmw_reverse_map_acl() */
  2045. /*----------------------------------------------------------------------*/
  2046. /*
  2047. * MR destructor and constructor
  2048. * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
  2049. * except struct ib_mr and spinlock
  2050. */
  2051. void ehca_mr_deletenew(struct ehca_mr *mr)
  2052. {
  2053. mr->flags = 0;
  2054. mr->num_kpages = 0;
  2055. mr->num_hwpages = 0;
  2056. mr->acl = 0;
  2057. mr->start = NULL;
  2058. mr->fmr_page_size = 0;
  2059. mr->fmr_max_pages = 0;
  2060. mr->fmr_max_maps = 0;
  2061. mr->fmr_map_cnt = 0;
  2062. memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
  2063. memset(&mr->galpas, 0, sizeof(mr->galpas));
  2064. } /* end ehca_mr_deletenew() */
  2065. int ehca_init_mrmw_cache(void)
  2066. {
  2067. mr_cache = kmem_cache_create("ehca_cache_mr",
  2068. sizeof(struct ehca_mr), 0,
  2069. SLAB_HWCACHE_ALIGN,
  2070. NULL);
  2071. if (!mr_cache)
  2072. return -ENOMEM;
  2073. mw_cache = kmem_cache_create("ehca_cache_mw",
  2074. sizeof(struct ehca_mw), 0,
  2075. SLAB_HWCACHE_ALIGN,
  2076. NULL);
  2077. if (!mw_cache) {
  2078. kmem_cache_destroy(mr_cache);
  2079. mr_cache = NULL;
  2080. return -ENOMEM;
  2081. }
  2082. return 0;
  2083. }
  2084. void ehca_cleanup_mrmw_cache(void)
  2085. {
  2086. if (mr_cache)
  2087. kmem_cache_destroy(mr_cache);
  2088. if (mw_cache)
  2089. kmem_cache_destroy(mw_cache);
  2090. }
  2091. static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
  2092. int dir)
  2093. {
  2094. if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
  2095. ehca_top_bmap->dir[dir] =
  2096. kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
  2097. if (!ehca_top_bmap->dir[dir])
  2098. return -ENOMEM;
  2099. /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
  2100. memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
  2101. }
  2102. return 0;
  2103. }
  2104. static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
  2105. {
  2106. if (!ehca_bmap_valid(ehca_bmap->top[top])) {
  2107. ehca_bmap->top[top] =
  2108. kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
  2109. if (!ehca_bmap->top[top])
  2110. return -ENOMEM;
  2111. /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
  2112. memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
  2113. }
  2114. return ehca_init_top_bmap(ehca_bmap->top[top], dir);
  2115. }
  2116. static inline int ehca_calc_index(unsigned long i, unsigned long s)
  2117. {
  2118. return (i >> s) & EHCA_INDEX_MASK;
  2119. }
  2120. void ehca_destroy_busmap(void)
  2121. {
  2122. int top, dir;
  2123. if (!ehca_bmap)
  2124. return;
  2125. for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
  2126. if (!ehca_bmap_valid(ehca_bmap->top[top]))
  2127. continue;
  2128. for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
  2129. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
  2130. continue;
  2131. kfree(ehca_bmap->top[top]->dir[dir]);
  2132. }
  2133. kfree(ehca_bmap->top[top]);
  2134. }
  2135. kfree(ehca_bmap);
  2136. ehca_bmap = NULL;
  2137. }
  2138. static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
  2139. {
  2140. unsigned long i, start_section, end_section;
  2141. int top, dir, idx;
  2142. if (!nr_pages)
  2143. return 0;
  2144. if (!ehca_bmap) {
  2145. ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
  2146. if (!ehca_bmap)
  2147. return -ENOMEM;
  2148. /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
  2149. memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
  2150. }
  2151. start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE;
  2152. end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
  2153. for (i = start_section; i < end_section; i++) {
  2154. int ret;
  2155. top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
  2156. dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
  2157. idx = i & EHCA_INDEX_MASK;
  2158. ret = ehca_init_bmap(ehca_bmap, top, dir);
  2159. if (ret) {
  2160. ehca_destroy_busmap();
  2161. return ret;
  2162. }
  2163. ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
  2164. ehca_mr_len += EHCA_SECTSIZE;
  2165. }
  2166. return 0;
  2167. }
  2168. static int ehca_is_hugepage(unsigned long pfn)
  2169. {
  2170. int page_order;
  2171. if (pfn & EHCA_HUGEPAGE_PFN_MASK)
  2172. return 0;
  2173. page_order = compound_order(pfn_to_page(pfn));
  2174. if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
  2175. return 0;
  2176. return 1;
  2177. }
  2178. static int ehca_create_busmap_callback(unsigned long initial_pfn,
  2179. unsigned long total_nr_pages, void *arg)
  2180. {
  2181. int ret;
  2182. unsigned long pfn, start_pfn, end_pfn, nr_pages;
  2183. if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
  2184. return ehca_update_busmap(initial_pfn, total_nr_pages);
  2185. /* Given chunk is >= 16GB -> check for hugepages */
  2186. start_pfn = initial_pfn;
  2187. end_pfn = initial_pfn + total_nr_pages;
  2188. pfn = start_pfn;
  2189. while (pfn < end_pfn) {
  2190. if (ehca_is_hugepage(pfn)) {
  2191. /* Add mem found in front of the hugepage */
  2192. nr_pages = pfn - start_pfn;
  2193. ret = ehca_update_busmap(start_pfn, nr_pages);
  2194. if (ret)
  2195. return ret;
  2196. /* Skip the hugepage */
  2197. pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
  2198. start_pfn = pfn;
  2199. } else
  2200. pfn += (EHCA_SECTSIZE / PAGE_SIZE);
  2201. }
  2202. /* Add mem found behind the hugepage(s) */
  2203. nr_pages = pfn - start_pfn;
  2204. return ehca_update_busmap(start_pfn, nr_pages);
  2205. }
  2206. int ehca_create_busmap(void)
  2207. {
  2208. int ret;
  2209. ehca_mr_len = 0;
  2210. ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
  2211. ehca_create_busmap_callback);
  2212. return ret;
  2213. }
  2214. static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
  2215. struct ehca_mr *e_mr,
  2216. struct ehca_mr_pginfo *pginfo)
  2217. {
  2218. int top;
  2219. u64 hret, *kpage;
  2220. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  2221. if (!kpage) {
  2222. ehca_err(&shca->ib_device, "kpage alloc failed");
  2223. return -ENOMEM;
  2224. }
  2225. for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
  2226. if (!ehca_bmap_valid(ehca_bmap->top[top]))
  2227. continue;
  2228. hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
  2229. if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
  2230. break;
  2231. }
  2232. ehca_free_fw_ctrlblock(kpage);
  2233. if (hret == H_SUCCESS)
  2234. return 0; /* Everything is fine */
  2235. else {
  2236. ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
  2237. "h_ret=%lli e_mr=%p top=%x lkey=%x "
  2238. "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
  2239. e_mr->ib.ib_mr.lkey,
  2240. shca->ipz_hca_handle.handle,
  2241. e_mr->ipz_mr_handle.handle);
  2242. return ehca2ib_return_code(hret);
  2243. }
  2244. }
  2245. static u64 ehca_map_vaddr(void *caddr)
  2246. {
  2247. int top, dir, idx;
  2248. unsigned long abs_addr, offset;
  2249. u64 entry;
  2250. if (!ehca_bmap)
  2251. return EHCA_INVAL_ADDR;
  2252. abs_addr = virt_to_abs(caddr);
  2253. top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
  2254. if (!ehca_bmap_valid(ehca_bmap->top[top]))
  2255. return EHCA_INVAL_ADDR;
  2256. dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
  2257. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
  2258. return EHCA_INVAL_ADDR;
  2259. idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
  2260. entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
  2261. if (ehca_bmap_valid(entry)) {
  2262. offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
  2263. return entry | offset;
  2264. } else
  2265. return EHCA_INVAL_ADDR;
  2266. }
  2267. static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
  2268. {
  2269. return dma_addr == EHCA_INVAL_ADDR;
  2270. }
  2271. static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
  2272. size_t size, enum dma_data_direction direction)
  2273. {
  2274. if (cpu_addr)
  2275. return ehca_map_vaddr(cpu_addr);
  2276. else
  2277. return EHCA_INVAL_ADDR;
  2278. }
  2279. static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
  2280. enum dma_data_direction direction)
  2281. {
  2282. /* This is only a stub; nothing to be done here */
  2283. }
  2284. static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
  2285. unsigned long offset, size_t size,
  2286. enum dma_data_direction direction)
  2287. {
  2288. u64 addr;
  2289. if (offset + size > PAGE_SIZE)
  2290. return EHCA_INVAL_ADDR;
  2291. addr = ehca_map_vaddr(page_address(page));
  2292. if (!ehca_dma_mapping_error(dev, addr))
  2293. addr += offset;
  2294. return addr;
  2295. }
  2296. static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
  2297. enum dma_data_direction direction)
  2298. {
  2299. /* This is only a stub; nothing to be done here */
  2300. }
  2301. static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
  2302. int nents, enum dma_data_direction direction)
  2303. {
  2304. struct scatterlist *sg;
  2305. int i;
  2306. for_each_sg(sgl, sg, nents, i) {
  2307. u64 addr;
  2308. addr = ehca_map_vaddr(sg_virt(sg));
  2309. if (ehca_dma_mapping_error(dev, addr))
  2310. return 0;
  2311. sg->dma_address = addr;
  2312. sg->dma_length = sg->length;
  2313. }
  2314. return nents;
  2315. }
  2316. static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
  2317. int nents, enum dma_data_direction direction)
  2318. {
  2319. /* This is only a stub; nothing to be done here */
  2320. }
  2321. static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg)
  2322. {
  2323. return sg->dma_address;
  2324. }
  2325. static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg)
  2326. {
  2327. return sg->length;
  2328. }
  2329. static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
  2330. size_t size,
  2331. enum dma_data_direction dir)
  2332. {
  2333. dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
  2334. }
  2335. static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
  2336. size_t size,
  2337. enum dma_data_direction dir)
  2338. {
  2339. dma_sync_single_for_device(dev->dma_device, addr, size, dir);
  2340. }
  2341. static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
  2342. u64 *dma_handle, gfp_t flag)
  2343. {
  2344. struct page *p;
  2345. void *addr = NULL;
  2346. u64 dma_addr;
  2347. p = alloc_pages(flag, get_order(size));
  2348. if (p) {
  2349. addr = page_address(p);
  2350. dma_addr = ehca_map_vaddr(addr);
  2351. if (ehca_dma_mapping_error(dev, dma_addr)) {
  2352. free_pages((unsigned long)addr, get_order(size));
  2353. return NULL;
  2354. }
  2355. if (dma_handle)
  2356. *dma_handle = dma_addr;
  2357. return addr;
  2358. }
  2359. return NULL;
  2360. }
  2361. static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
  2362. void *cpu_addr, u64 dma_handle)
  2363. {
  2364. if (cpu_addr && size)
  2365. free_pages((unsigned long)cpu_addr, get_order(size));
  2366. }
  2367. struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
  2368. .mapping_error = ehca_dma_mapping_error,
  2369. .map_single = ehca_dma_map_single,
  2370. .unmap_single = ehca_dma_unmap_single,
  2371. .map_page = ehca_dma_map_page,
  2372. .unmap_page = ehca_dma_unmap_page,
  2373. .map_sg = ehca_dma_map_sg,
  2374. .unmap_sg = ehca_dma_unmap_sg,
  2375. .dma_address = ehca_dma_address,
  2376. .dma_len = ehca_dma_len,
  2377. .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
  2378. .sync_single_for_device = ehca_dma_sync_single_for_device,
  2379. .alloc_coherent = ehca_dma_alloc_coherent,
  2380. .free_coherent = ehca_dma_free_coherent,
  2381. };