ehca_mrmw.c 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * MR/MW functions
  5. *
  6. * Authors: Dietmar Decker <ddecker@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #include <linux/slab.h>
  43. #include <rdma/ib_umem.h>
  44. #include "ehca_iverbs.h"
  45. #include "ehca_mrmw.h"
  46. #include "hcp_if.h"
  47. #include "hipz_hw.h"
  48. #define NUM_CHUNKS(length, chunk_size) \
  49. (((length) + (chunk_size - 1)) / (chunk_size))
  50. /* max number of rpages (per hcall register_rpages) */
  51. #define MAX_RPAGES 512
  52. /* DMEM toleration management */
  53. #define EHCA_SECTSHIFT SECTION_SIZE_BITS
  54. #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
  55. #define EHCA_HUGEPAGESHIFT 34
  56. #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
  57. #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
  58. #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
  59. #define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
  60. #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
  61. #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
  62. #define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
  63. #define EHCA_DIR_MAP_SIZE (0x10000)
  64. #define EHCA_ENT_MAP_SIZE (0x10000)
  65. #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
  66. static unsigned long ehca_mr_len;
  67. /*
  68. * Memory map data structures
  69. */
  70. struct ehca_dir_bmap {
  71. u64 ent[EHCA_MAP_ENTRIES];
  72. };
  73. struct ehca_top_bmap {
  74. struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
  75. };
  76. struct ehca_bmap {
  77. struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
  78. };
  79. static struct ehca_bmap *ehca_bmap;
  80. static struct kmem_cache *mr_cache;
  81. static struct kmem_cache *mw_cache;
  82. enum ehca_mr_pgsize {
  83. EHCA_MR_PGSIZE4K = 0x1000L,
  84. EHCA_MR_PGSIZE64K = 0x10000L,
  85. EHCA_MR_PGSIZE1M = 0x100000L,
  86. EHCA_MR_PGSIZE16M = 0x1000000L
  87. };
  88. #define EHCA_MR_PGSHIFT4K 12
  89. #define EHCA_MR_PGSHIFT64K 16
  90. #define EHCA_MR_PGSHIFT1M 20
  91. #define EHCA_MR_PGSHIFT16M 24
  92. static u64 ehca_map_vaddr(void *caddr);
  93. static u32 ehca_encode_hwpage_size(u32 pgsize)
  94. {
  95. int log = ilog2(pgsize);
  96. WARN_ON(log < 12 || log > 24 || log & 3);
  97. return (log - 12) / 4;
  98. }
  99. static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
  100. {
  101. return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
  102. }
  103. static struct ehca_mr *ehca_mr_new(void)
  104. {
  105. struct ehca_mr *me;
  106. me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
  107. if (me)
  108. spin_lock_init(&me->mrlock);
  109. else
  110. ehca_gen_err("alloc failed");
  111. return me;
  112. }
  113. static void ehca_mr_delete(struct ehca_mr *me)
  114. {
  115. kmem_cache_free(mr_cache, me);
  116. }
  117. static struct ehca_mw *ehca_mw_new(void)
  118. {
  119. struct ehca_mw *me;
  120. me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
  121. if (me)
  122. spin_lock_init(&me->mwlock);
  123. else
  124. ehca_gen_err("alloc failed");
  125. return me;
  126. }
  127. static void ehca_mw_delete(struct ehca_mw *me)
  128. {
  129. kmem_cache_free(mw_cache, me);
  130. }
  131. /*----------------------------------------------------------------------*/
  132. struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  133. {
  134. struct ib_mr *ib_mr;
  135. int ret;
  136. struct ehca_mr *e_maxmr;
  137. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  138. struct ehca_shca *shca =
  139. container_of(pd->device, struct ehca_shca, ib_device);
  140. if (shca->maxmr) {
  141. e_maxmr = ehca_mr_new();
  142. if (!e_maxmr) {
  143. ehca_err(&shca->ib_device, "out of memory");
  144. ib_mr = ERR_PTR(-ENOMEM);
  145. goto get_dma_mr_exit0;
  146. }
  147. ret = ehca_reg_maxmr(shca, e_maxmr,
  148. (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
  149. mr_access_flags, e_pd,
  150. &e_maxmr->ib.ib_mr.lkey,
  151. &e_maxmr->ib.ib_mr.rkey);
  152. if (ret) {
  153. ehca_mr_delete(e_maxmr);
  154. ib_mr = ERR_PTR(ret);
  155. goto get_dma_mr_exit0;
  156. }
  157. ib_mr = &e_maxmr->ib.ib_mr;
  158. } else {
  159. ehca_err(&shca->ib_device, "no internal max-MR exist!");
  160. ib_mr = ERR_PTR(-EINVAL);
  161. goto get_dma_mr_exit0;
  162. }
  163. get_dma_mr_exit0:
  164. if (IS_ERR(ib_mr))
  165. ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
  166. PTR_ERR(ib_mr), pd, mr_access_flags);
  167. return ib_mr;
  168. } /* end ehca_get_dma_mr() */
  169. /*----------------------------------------------------------------------*/
  170. struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
  171. struct ib_phys_buf *phys_buf_array,
  172. int num_phys_buf,
  173. int mr_access_flags,
  174. u64 *iova_start)
  175. {
  176. struct ib_mr *ib_mr;
  177. int ret;
  178. struct ehca_mr *e_mr;
  179. struct ehca_shca *shca =
  180. container_of(pd->device, struct ehca_shca, ib_device);
  181. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  182. u64 size;
  183. if ((num_phys_buf <= 0) || !phys_buf_array) {
  184. ehca_err(pd->device, "bad input values: num_phys_buf=%x "
  185. "phys_buf_array=%p", num_phys_buf, phys_buf_array);
  186. ib_mr = ERR_PTR(-EINVAL);
  187. goto reg_phys_mr_exit0;
  188. }
  189. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  190. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  191. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  192. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  193. /*
  194. * Remote Write Access requires Local Write Access
  195. * Remote Atomic Access requires Local Write Access
  196. */
  197. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  198. mr_access_flags);
  199. ib_mr = ERR_PTR(-EINVAL);
  200. goto reg_phys_mr_exit0;
  201. }
  202. /* check physical buffer list and calculate size */
  203. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
  204. iova_start, &size);
  205. if (ret) {
  206. ib_mr = ERR_PTR(ret);
  207. goto reg_phys_mr_exit0;
  208. }
  209. if ((size == 0) ||
  210. (((u64)iova_start + size) < (u64)iova_start)) {
  211. ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
  212. size, iova_start);
  213. ib_mr = ERR_PTR(-EINVAL);
  214. goto reg_phys_mr_exit0;
  215. }
  216. e_mr = ehca_mr_new();
  217. if (!e_mr) {
  218. ehca_err(pd->device, "out of memory");
  219. ib_mr = ERR_PTR(-ENOMEM);
  220. goto reg_phys_mr_exit0;
  221. }
  222. /* register MR on HCA */
  223. if (ehca_mr_is_maxmr(size, iova_start)) {
  224. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  225. ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
  226. e_pd, &e_mr->ib.ib_mr.lkey,
  227. &e_mr->ib.ib_mr.rkey);
  228. if (ret) {
  229. ib_mr = ERR_PTR(ret);
  230. goto reg_phys_mr_exit1;
  231. }
  232. } else {
  233. struct ehca_mr_pginfo pginfo;
  234. u32 num_kpages;
  235. u32 num_hwpages;
  236. u64 hw_pgsize;
  237. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
  238. PAGE_SIZE);
  239. /* for kernel space we try most possible pgsize */
  240. hw_pgsize = ehca_get_max_hwpage_size(shca);
  241. num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
  242. hw_pgsize);
  243. memset(&pginfo, 0, sizeof(pginfo));
  244. pginfo.type = EHCA_MR_PGI_PHYS;
  245. pginfo.num_kpages = num_kpages;
  246. pginfo.hwpage_size = hw_pgsize;
  247. pginfo.num_hwpages = num_hwpages;
  248. pginfo.u.phy.num_phys_buf = num_phys_buf;
  249. pginfo.u.phy.phys_buf_array = phys_buf_array;
  250. pginfo.next_hwpage =
  251. ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
  252. ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
  253. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  254. &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
  255. if (ret) {
  256. ib_mr = ERR_PTR(ret);
  257. goto reg_phys_mr_exit1;
  258. }
  259. }
  260. /* successful registration of all pages */
  261. return &e_mr->ib.ib_mr;
  262. reg_phys_mr_exit1:
  263. ehca_mr_delete(e_mr);
  264. reg_phys_mr_exit0:
  265. if (IS_ERR(ib_mr))
  266. ehca_err(pd->device, "h_ret=%li pd=%p phys_buf_array=%p "
  267. "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
  268. PTR_ERR(ib_mr), pd, phys_buf_array,
  269. num_phys_buf, mr_access_flags, iova_start);
  270. return ib_mr;
  271. } /* end ehca_reg_phys_mr() */
  272. /*----------------------------------------------------------------------*/
  273. struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  274. u64 virt, int mr_access_flags,
  275. struct ib_udata *udata)
  276. {
  277. struct ib_mr *ib_mr;
  278. struct ehca_mr *e_mr;
  279. struct ehca_shca *shca =
  280. container_of(pd->device, struct ehca_shca, ib_device);
  281. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  282. struct ehca_mr_pginfo pginfo;
  283. int ret, page_shift;
  284. u32 num_kpages;
  285. u32 num_hwpages;
  286. u64 hwpage_size;
  287. if (!pd) {
  288. ehca_gen_err("bad pd=%p", pd);
  289. return ERR_PTR(-EFAULT);
  290. }
  291. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  292. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  293. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  294. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  295. /*
  296. * Remote Write Access requires Local Write Access
  297. * Remote Atomic Access requires Local Write Access
  298. */
  299. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  300. mr_access_flags);
  301. ib_mr = ERR_PTR(-EINVAL);
  302. goto reg_user_mr_exit0;
  303. }
  304. if (length == 0 || virt + length < virt) {
  305. ehca_err(pd->device, "bad input values: length=%llx "
  306. "virt_base=%llx", length, virt);
  307. ib_mr = ERR_PTR(-EINVAL);
  308. goto reg_user_mr_exit0;
  309. }
  310. e_mr = ehca_mr_new();
  311. if (!e_mr) {
  312. ehca_err(pd->device, "out of memory");
  313. ib_mr = ERR_PTR(-ENOMEM);
  314. goto reg_user_mr_exit0;
  315. }
  316. e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
  317. mr_access_flags, 0);
  318. if (IS_ERR(e_mr->umem)) {
  319. ib_mr = (void *)e_mr->umem;
  320. goto reg_user_mr_exit1;
  321. }
  322. if (e_mr->umem->page_size != PAGE_SIZE) {
  323. ehca_err(pd->device, "page size not supported, "
  324. "e_mr->umem->page_size=%x", e_mr->umem->page_size);
  325. ib_mr = ERR_PTR(-EINVAL);
  326. goto reg_user_mr_exit2;
  327. }
  328. /* determine number of MR pages */
  329. num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
  330. /* select proper hw_pgsize */
  331. page_shift = PAGE_SHIFT;
  332. if (e_mr->umem->hugetlb) {
  333. /* determine page_shift, clamp between 4K and 16M */
  334. page_shift = (fls64(length - 1) + 3) & ~3;
  335. page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
  336. EHCA_MR_PGSHIFT16M);
  337. }
  338. hwpage_size = 1UL << page_shift;
  339. /* now that we have the desired page size, shift until it's
  340. * supported, too. 4K is always supported, so this terminates.
  341. */
  342. while (!(hwpage_size & shca->hca_cap_mr_pgsize))
  343. hwpage_size >>= 4;
  344. reg_user_mr_fallback:
  345. num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
  346. /* register MR on HCA */
  347. memset(&pginfo, 0, sizeof(pginfo));
  348. pginfo.type = EHCA_MR_PGI_USER;
  349. pginfo.hwpage_size = hwpage_size;
  350. pginfo.num_kpages = num_kpages;
  351. pginfo.num_hwpages = num_hwpages;
  352. pginfo.u.usr.region = e_mr->umem;
  353. pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
  354. pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
  355. (&e_mr->umem->chunk_list),
  356. list);
  357. ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
  358. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  359. &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
  360. if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
  361. ehca_warn(pd->device, "failed to register mr "
  362. "with hwpage_size=%llx", hwpage_size);
  363. ehca_info(pd->device, "try to register mr with "
  364. "kpage_size=%lx", PAGE_SIZE);
  365. /*
  366. * this means kpages are not contiguous for a hw page
  367. * try kernel page size as fallback solution
  368. */
  369. hwpage_size = PAGE_SIZE;
  370. goto reg_user_mr_fallback;
  371. }
  372. if (ret) {
  373. ib_mr = ERR_PTR(ret);
  374. goto reg_user_mr_exit2;
  375. }
  376. /* successful registration of all pages */
  377. return &e_mr->ib.ib_mr;
  378. reg_user_mr_exit2:
  379. ib_umem_release(e_mr->umem);
  380. reg_user_mr_exit1:
  381. ehca_mr_delete(e_mr);
  382. reg_user_mr_exit0:
  383. if (IS_ERR(ib_mr))
  384. ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
  385. PTR_ERR(ib_mr), pd, mr_access_flags, udata);
  386. return ib_mr;
  387. } /* end ehca_reg_user_mr() */
  388. /*----------------------------------------------------------------------*/
  389. int ehca_rereg_phys_mr(struct ib_mr *mr,
  390. int mr_rereg_mask,
  391. struct ib_pd *pd,
  392. struct ib_phys_buf *phys_buf_array,
  393. int num_phys_buf,
  394. int mr_access_flags,
  395. u64 *iova_start)
  396. {
  397. int ret;
  398. struct ehca_shca *shca =
  399. container_of(mr->device, struct ehca_shca, ib_device);
  400. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  401. u64 new_size;
  402. u64 *new_start;
  403. u32 new_acl;
  404. struct ehca_pd *new_pd;
  405. u32 tmp_lkey, tmp_rkey;
  406. unsigned long sl_flags;
  407. u32 num_kpages = 0;
  408. u32 num_hwpages = 0;
  409. struct ehca_mr_pginfo pginfo;
  410. if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
  411. /* TODO not supported, because PHYP rereg hCall needs pages */
  412. ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
  413. "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
  414. ret = -EINVAL;
  415. goto rereg_phys_mr_exit0;
  416. }
  417. if (mr_rereg_mask & IB_MR_REREG_PD) {
  418. if (!pd) {
  419. ehca_err(mr->device, "rereg with bad pd, pd=%p "
  420. "mr_rereg_mask=%x", pd, mr_rereg_mask);
  421. ret = -EINVAL;
  422. goto rereg_phys_mr_exit0;
  423. }
  424. }
  425. if ((mr_rereg_mask &
  426. ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
  427. (mr_rereg_mask == 0)) {
  428. ret = -EINVAL;
  429. goto rereg_phys_mr_exit0;
  430. }
  431. /* check other parameters */
  432. if (e_mr == shca->maxmr) {
  433. /* should be impossible, however reject to be sure */
  434. ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
  435. "shca->maxmr=%p mr->lkey=%x",
  436. mr, shca->maxmr, mr->lkey);
  437. ret = -EINVAL;
  438. goto rereg_phys_mr_exit0;
  439. }
  440. if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
  441. if (e_mr->flags & EHCA_MR_FLAG_FMR) {
  442. ehca_err(mr->device, "not supported for FMR, mr=%p "
  443. "flags=%x", mr, e_mr->flags);
  444. ret = -EINVAL;
  445. goto rereg_phys_mr_exit0;
  446. }
  447. if (!phys_buf_array || num_phys_buf <= 0) {
  448. ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
  449. " phys_buf_array=%p num_phys_buf=%x",
  450. mr_rereg_mask, phys_buf_array, num_phys_buf);
  451. ret = -EINVAL;
  452. goto rereg_phys_mr_exit0;
  453. }
  454. }
  455. if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
  456. (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  457. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  458. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  459. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
  460. /*
  461. * Remote Write Access requires Local Write Access
  462. * Remote Atomic Access requires Local Write Access
  463. */
  464. ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
  465. "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
  466. ret = -EINVAL;
  467. goto rereg_phys_mr_exit0;
  468. }
  469. /* set requested values dependent on rereg request */
  470. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  471. new_start = e_mr->start;
  472. new_size = e_mr->size;
  473. new_acl = e_mr->acl;
  474. new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  475. if (mr_rereg_mask & IB_MR_REREG_TRANS) {
  476. u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
  477. new_start = iova_start; /* change address */
  478. /* check physical buffer list and calculate size */
  479. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
  480. num_phys_buf, iova_start,
  481. &new_size);
  482. if (ret)
  483. goto rereg_phys_mr_exit1;
  484. if ((new_size == 0) ||
  485. (((u64)iova_start + new_size) < (u64)iova_start)) {
  486. ehca_err(mr->device, "bad input values: new_size=%llx "
  487. "iova_start=%p", new_size, iova_start);
  488. ret = -EINVAL;
  489. goto rereg_phys_mr_exit1;
  490. }
  491. num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
  492. new_size, PAGE_SIZE);
  493. num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
  494. new_size, hw_pgsize);
  495. memset(&pginfo, 0, sizeof(pginfo));
  496. pginfo.type = EHCA_MR_PGI_PHYS;
  497. pginfo.num_kpages = num_kpages;
  498. pginfo.hwpage_size = hw_pgsize;
  499. pginfo.num_hwpages = num_hwpages;
  500. pginfo.u.phy.num_phys_buf = num_phys_buf;
  501. pginfo.u.phy.phys_buf_array = phys_buf_array;
  502. pginfo.next_hwpage =
  503. ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
  504. }
  505. if (mr_rereg_mask & IB_MR_REREG_ACCESS)
  506. new_acl = mr_access_flags;
  507. if (mr_rereg_mask & IB_MR_REREG_PD)
  508. new_pd = container_of(pd, struct ehca_pd, ib_pd);
  509. ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
  510. new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  511. if (ret)
  512. goto rereg_phys_mr_exit1;
  513. /* successful reregistration */
  514. if (mr_rereg_mask & IB_MR_REREG_PD)
  515. mr->pd = pd;
  516. mr->lkey = tmp_lkey;
  517. mr->rkey = tmp_rkey;
  518. rereg_phys_mr_exit1:
  519. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  520. rereg_phys_mr_exit0:
  521. if (ret)
  522. ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p "
  523. "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
  524. "iova_start=%p",
  525. ret, mr, mr_rereg_mask, pd, phys_buf_array,
  526. num_phys_buf, mr_access_flags, iova_start);
  527. return ret;
  528. } /* end ehca_rereg_phys_mr() */
  529. /*----------------------------------------------------------------------*/
  530. int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  531. {
  532. int ret = 0;
  533. u64 h_ret;
  534. struct ehca_shca *shca =
  535. container_of(mr->device, struct ehca_shca, ib_device);
  536. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  537. unsigned long sl_flags;
  538. struct ehca_mr_hipzout_parms hipzout;
  539. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  540. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  541. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  542. ret = -EINVAL;
  543. goto query_mr_exit0;
  544. }
  545. memset(mr_attr, 0, sizeof(struct ib_mr_attr));
  546. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  547. h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
  548. if (h_ret != H_SUCCESS) {
  549. ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
  550. "hca_hndl=%llx mr_hndl=%llx lkey=%x",
  551. h_ret, mr, shca->ipz_hca_handle.handle,
  552. e_mr->ipz_mr_handle.handle, mr->lkey);
  553. ret = ehca2ib_return_code(h_ret);
  554. goto query_mr_exit1;
  555. }
  556. mr_attr->pd = mr->pd;
  557. mr_attr->device_virt_addr = hipzout.vaddr;
  558. mr_attr->size = hipzout.len;
  559. mr_attr->lkey = hipzout.lkey;
  560. mr_attr->rkey = hipzout.rkey;
  561. ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
  562. query_mr_exit1:
  563. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  564. query_mr_exit0:
  565. if (ret)
  566. ehca_err(mr->device, "ret=%i mr=%p mr_attr=%p",
  567. ret, mr, mr_attr);
  568. return ret;
  569. } /* end ehca_query_mr() */
  570. /*----------------------------------------------------------------------*/
  571. int ehca_dereg_mr(struct ib_mr *mr)
  572. {
  573. int ret = 0;
  574. u64 h_ret;
  575. struct ehca_shca *shca =
  576. container_of(mr->device, struct ehca_shca, ib_device);
  577. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  578. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  579. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  580. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  581. ret = -EINVAL;
  582. goto dereg_mr_exit0;
  583. } else if (e_mr == shca->maxmr) {
  584. /* should be impossible, however reject to be sure */
  585. ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
  586. "shca->maxmr=%p mr->lkey=%x",
  587. mr, shca->maxmr, mr->lkey);
  588. ret = -EINVAL;
  589. goto dereg_mr_exit0;
  590. }
  591. /* TODO: BUSY: MR still has bound window(s) */
  592. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  593. if (h_ret != H_SUCCESS) {
  594. ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
  595. "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
  596. h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
  597. e_mr->ipz_mr_handle.handle, mr->lkey);
  598. ret = ehca2ib_return_code(h_ret);
  599. goto dereg_mr_exit0;
  600. }
  601. if (e_mr->umem)
  602. ib_umem_release(e_mr->umem);
  603. /* successful deregistration */
  604. ehca_mr_delete(e_mr);
  605. dereg_mr_exit0:
  606. if (ret)
  607. ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
  608. return ret;
  609. } /* end ehca_dereg_mr() */
  610. /*----------------------------------------------------------------------*/
  611. struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
  612. {
  613. struct ib_mw *ib_mw;
  614. u64 h_ret;
  615. struct ehca_mw *e_mw;
  616. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  617. struct ehca_shca *shca =
  618. container_of(pd->device, struct ehca_shca, ib_device);
  619. struct ehca_mw_hipzout_parms hipzout;
  620. if (type != IB_MW_TYPE_1)
  621. return ERR_PTR(-EINVAL);
  622. e_mw = ehca_mw_new();
  623. if (!e_mw) {
  624. ib_mw = ERR_PTR(-ENOMEM);
  625. goto alloc_mw_exit0;
  626. }
  627. h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
  628. e_pd->fw_pd, &hipzout);
  629. if (h_ret != H_SUCCESS) {
  630. ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
  631. "shca=%p hca_hndl=%llx mw=%p",
  632. h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
  633. ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
  634. goto alloc_mw_exit1;
  635. }
  636. /* successful MW allocation */
  637. e_mw->ipz_mw_handle = hipzout.handle;
  638. e_mw->ib_mw.rkey = hipzout.rkey;
  639. return &e_mw->ib_mw;
  640. alloc_mw_exit1:
  641. ehca_mw_delete(e_mw);
  642. alloc_mw_exit0:
  643. if (IS_ERR(ib_mw))
  644. ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
  645. return ib_mw;
  646. } /* end ehca_alloc_mw() */
  647. /*----------------------------------------------------------------------*/
  648. int ehca_bind_mw(struct ib_qp *qp,
  649. struct ib_mw *mw,
  650. struct ib_mw_bind *mw_bind)
  651. {
  652. /* TODO: not supported up to now */
  653. ehca_gen_err("bind MW currently not supported by HCAD");
  654. return -EPERM;
  655. } /* end ehca_bind_mw() */
  656. /*----------------------------------------------------------------------*/
  657. int ehca_dealloc_mw(struct ib_mw *mw)
  658. {
  659. u64 h_ret;
  660. struct ehca_shca *shca =
  661. container_of(mw->device, struct ehca_shca, ib_device);
  662. struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
  663. h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
  664. if (h_ret != H_SUCCESS) {
  665. ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
  666. "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
  667. h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
  668. e_mw->ipz_mw_handle.handle);
  669. return ehca2ib_return_code(h_ret);
  670. }
  671. /* successful deallocation */
  672. ehca_mw_delete(e_mw);
  673. return 0;
  674. } /* end ehca_dealloc_mw() */
  675. /*----------------------------------------------------------------------*/
  676. struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
  677. int mr_access_flags,
  678. struct ib_fmr_attr *fmr_attr)
  679. {
  680. struct ib_fmr *ib_fmr;
  681. struct ehca_shca *shca =
  682. container_of(pd->device, struct ehca_shca, ib_device);
  683. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  684. struct ehca_mr *e_fmr;
  685. int ret;
  686. u32 tmp_lkey, tmp_rkey;
  687. struct ehca_mr_pginfo pginfo;
  688. u64 hw_pgsize;
  689. /* check other parameters */
  690. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  691. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  692. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  693. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  694. /*
  695. * Remote Write Access requires Local Write Access
  696. * Remote Atomic Access requires Local Write Access
  697. */
  698. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  699. mr_access_flags);
  700. ib_fmr = ERR_PTR(-EINVAL);
  701. goto alloc_fmr_exit0;
  702. }
  703. if (mr_access_flags & IB_ACCESS_MW_BIND) {
  704. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  705. mr_access_flags);
  706. ib_fmr = ERR_PTR(-EINVAL);
  707. goto alloc_fmr_exit0;
  708. }
  709. if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
  710. ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
  711. "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
  712. fmr_attr->max_pages, fmr_attr->max_maps,
  713. fmr_attr->page_shift);
  714. ib_fmr = ERR_PTR(-EINVAL);
  715. goto alloc_fmr_exit0;
  716. }
  717. hw_pgsize = 1 << fmr_attr->page_shift;
  718. if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
  719. ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
  720. fmr_attr->page_shift);
  721. ib_fmr = ERR_PTR(-EINVAL);
  722. goto alloc_fmr_exit0;
  723. }
  724. e_fmr = ehca_mr_new();
  725. if (!e_fmr) {
  726. ib_fmr = ERR_PTR(-ENOMEM);
  727. goto alloc_fmr_exit0;
  728. }
  729. e_fmr->flags |= EHCA_MR_FLAG_FMR;
  730. /* register MR on HCA */
  731. memset(&pginfo, 0, sizeof(pginfo));
  732. pginfo.hwpage_size = hw_pgsize;
  733. /*
  734. * pginfo.num_hwpages==0, ie register_rpages() will not be called
  735. * but deferred to map_phys_fmr()
  736. */
  737. ret = ehca_reg_mr(shca, e_fmr, NULL,
  738. fmr_attr->max_pages * (1 << fmr_attr->page_shift),
  739. mr_access_flags, e_pd, &pginfo,
  740. &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
  741. if (ret) {
  742. ib_fmr = ERR_PTR(ret);
  743. goto alloc_fmr_exit1;
  744. }
  745. /* successful */
  746. e_fmr->hwpage_size = hw_pgsize;
  747. e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
  748. e_fmr->fmr_max_pages = fmr_attr->max_pages;
  749. e_fmr->fmr_max_maps = fmr_attr->max_maps;
  750. e_fmr->fmr_map_cnt = 0;
  751. return &e_fmr->ib.ib_fmr;
  752. alloc_fmr_exit1:
  753. ehca_mr_delete(e_fmr);
  754. alloc_fmr_exit0:
  755. return ib_fmr;
  756. } /* end ehca_alloc_fmr() */
  757. /*----------------------------------------------------------------------*/
  758. int ehca_map_phys_fmr(struct ib_fmr *fmr,
  759. u64 *page_list,
  760. int list_len,
  761. u64 iova)
  762. {
  763. int ret;
  764. struct ehca_shca *shca =
  765. container_of(fmr->device, struct ehca_shca, ib_device);
  766. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  767. struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
  768. struct ehca_mr_pginfo pginfo;
  769. u32 tmp_lkey, tmp_rkey;
  770. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  771. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  772. e_fmr, e_fmr->flags);
  773. ret = -EINVAL;
  774. goto map_phys_fmr_exit0;
  775. }
  776. ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
  777. if (ret)
  778. goto map_phys_fmr_exit0;
  779. if (iova % e_fmr->fmr_page_size) {
  780. /* only whole-numbered pages */
  781. ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
  782. iova, e_fmr->fmr_page_size);
  783. ret = -EINVAL;
  784. goto map_phys_fmr_exit0;
  785. }
  786. if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
  787. /* HCAD does not limit the maps, however trace this anyway */
  788. ehca_info(fmr->device, "map limit exceeded, fmr=%p "
  789. "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
  790. fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
  791. }
  792. memset(&pginfo, 0, sizeof(pginfo));
  793. pginfo.type = EHCA_MR_PGI_FMR;
  794. pginfo.num_kpages = list_len;
  795. pginfo.hwpage_size = e_fmr->hwpage_size;
  796. pginfo.num_hwpages =
  797. list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
  798. pginfo.u.fmr.page_list = page_list;
  799. pginfo.next_hwpage =
  800. (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
  801. pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
  802. ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
  803. list_len * e_fmr->fmr_page_size,
  804. e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  805. if (ret)
  806. goto map_phys_fmr_exit0;
  807. /* successful reregistration */
  808. e_fmr->fmr_map_cnt++;
  809. e_fmr->ib.ib_fmr.lkey = tmp_lkey;
  810. e_fmr->ib.ib_fmr.rkey = tmp_rkey;
  811. return 0;
  812. map_phys_fmr_exit0:
  813. if (ret)
  814. ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
  815. "iova=%llx", ret, fmr, page_list, list_len, iova);
  816. return ret;
  817. } /* end ehca_map_phys_fmr() */
  818. /*----------------------------------------------------------------------*/
  819. int ehca_unmap_fmr(struct list_head *fmr_list)
  820. {
  821. int ret = 0;
  822. struct ib_fmr *ib_fmr;
  823. struct ehca_shca *shca = NULL;
  824. struct ehca_shca *prev_shca;
  825. struct ehca_mr *e_fmr;
  826. u32 num_fmr = 0;
  827. u32 unmap_fmr_cnt = 0;
  828. /* check all FMR belong to same SHCA, and check internal flag */
  829. list_for_each_entry(ib_fmr, fmr_list, list) {
  830. prev_shca = shca;
  831. shca = container_of(ib_fmr->device, struct ehca_shca,
  832. ib_device);
  833. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  834. if ((shca != prev_shca) && prev_shca) {
  835. ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
  836. "prev_shca=%p e_fmr=%p",
  837. shca, prev_shca, e_fmr);
  838. ret = -EINVAL;
  839. goto unmap_fmr_exit0;
  840. }
  841. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  842. ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
  843. "e_fmr->flags=%x", e_fmr, e_fmr->flags);
  844. ret = -EINVAL;
  845. goto unmap_fmr_exit0;
  846. }
  847. num_fmr++;
  848. }
  849. /* loop over all FMRs to unmap */
  850. list_for_each_entry(ib_fmr, fmr_list, list) {
  851. unmap_fmr_cnt++;
  852. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  853. shca = container_of(ib_fmr->device, struct ehca_shca,
  854. ib_device);
  855. ret = ehca_unmap_one_fmr(shca, e_fmr);
  856. if (ret) {
  857. /* unmap failed, stop unmapping of rest of FMRs */
  858. ehca_err(&shca->ib_device, "unmap of one FMR failed, "
  859. "stop rest, e_fmr=%p num_fmr=%x "
  860. "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
  861. unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
  862. goto unmap_fmr_exit0;
  863. }
  864. }
  865. unmap_fmr_exit0:
  866. if (ret)
  867. ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
  868. ret, fmr_list, num_fmr, unmap_fmr_cnt);
  869. return ret;
  870. } /* end ehca_unmap_fmr() */
  871. /*----------------------------------------------------------------------*/
  872. int ehca_dealloc_fmr(struct ib_fmr *fmr)
  873. {
  874. int ret;
  875. u64 h_ret;
  876. struct ehca_shca *shca =
  877. container_of(fmr->device, struct ehca_shca, ib_device);
  878. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  879. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  880. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  881. e_fmr, e_fmr->flags);
  882. ret = -EINVAL;
  883. goto free_fmr_exit0;
  884. }
  885. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  886. if (h_ret != H_SUCCESS) {
  887. ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
  888. "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
  889. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  890. e_fmr->ipz_mr_handle.handle, fmr->lkey);
  891. ret = ehca2ib_return_code(h_ret);
  892. goto free_fmr_exit0;
  893. }
  894. /* successful deregistration */
  895. ehca_mr_delete(e_fmr);
  896. return 0;
  897. free_fmr_exit0:
  898. if (ret)
  899. ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
  900. return ret;
  901. } /* end ehca_dealloc_fmr() */
  902. /*----------------------------------------------------------------------*/
  903. static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
  904. struct ehca_mr *e_mr,
  905. struct ehca_mr_pginfo *pginfo);
  906. int ehca_reg_mr(struct ehca_shca *shca,
  907. struct ehca_mr *e_mr,
  908. u64 *iova_start,
  909. u64 size,
  910. int acl,
  911. struct ehca_pd *e_pd,
  912. struct ehca_mr_pginfo *pginfo,
  913. u32 *lkey, /*OUT*/
  914. u32 *rkey, /*OUT*/
  915. enum ehca_reg_type reg_type)
  916. {
  917. int ret;
  918. u64 h_ret;
  919. u32 hipz_acl;
  920. struct ehca_mr_hipzout_parms hipzout;
  921. ehca_mrmw_map_acl(acl, &hipz_acl);
  922. ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
  923. if (ehca_use_hp_mr == 1)
  924. hipz_acl |= 0x00000001;
  925. h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
  926. (u64)iova_start, size, hipz_acl,
  927. e_pd->fw_pd, &hipzout);
  928. if (h_ret != H_SUCCESS) {
  929. ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
  930. "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
  931. ret = ehca2ib_return_code(h_ret);
  932. goto ehca_reg_mr_exit0;
  933. }
  934. e_mr->ipz_mr_handle = hipzout.handle;
  935. if (reg_type == EHCA_REG_BUSMAP_MR)
  936. ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
  937. else if (reg_type == EHCA_REG_MR)
  938. ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
  939. else
  940. ret = -EINVAL;
  941. if (ret)
  942. goto ehca_reg_mr_exit1;
  943. /* successful registration */
  944. e_mr->num_kpages = pginfo->num_kpages;
  945. e_mr->num_hwpages = pginfo->num_hwpages;
  946. e_mr->hwpage_size = pginfo->hwpage_size;
  947. e_mr->start = iova_start;
  948. e_mr->size = size;
  949. e_mr->acl = acl;
  950. *lkey = hipzout.lkey;
  951. *rkey = hipzout.rkey;
  952. return 0;
  953. ehca_reg_mr_exit1:
  954. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  955. if (h_ret != H_SUCCESS) {
  956. ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
  957. "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
  958. "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
  959. h_ret, shca, e_mr, iova_start, size, acl, e_pd,
  960. hipzout.lkey, pginfo, pginfo->num_kpages,
  961. pginfo->num_hwpages, ret);
  962. ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
  963. "not recoverable");
  964. }
  965. ehca_reg_mr_exit0:
  966. if (ret)
  967. ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
  968. "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
  969. "num_kpages=%llx num_hwpages=%llx",
  970. ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
  971. pginfo->num_kpages, pginfo->num_hwpages);
  972. return ret;
  973. } /* end ehca_reg_mr() */
  974. /*----------------------------------------------------------------------*/
  975. int ehca_reg_mr_rpages(struct ehca_shca *shca,
  976. struct ehca_mr *e_mr,
  977. struct ehca_mr_pginfo *pginfo)
  978. {
  979. int ret = 0;
  980. u64 h_ret;
  981. u32 rnum;
  982. u64 rpage;
  983. u32 i;
  984. u64 *kpage;
  985. if (!pginfo->num_hwpages) /* in case of fmr */
  986. return 0;
  987. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  988. if (!kpage) {
  989. ehca_err(&shca->ib_device, "kpage alloc failed");
  990. ret = -ENOMEM;
  991. goto ehca_reg_mr_rpages_exit0;
  992. }
  993. /* max MAX_RPAGES ehca mr pages per register call */
  994. for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
  995. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  996. rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
  997. if (rnum == 0)
  998. rnum = MAX_RPAGES; /* last shot is full */
  999. } else
  1000. rnum = MAX_RPAGES;
  1001. ret = ehca_set_pagebuf(pginfo, rnum, kpage);
  1002. if (ret) {
  1003. ehca_err(&shca->ib_device, "ehca_set_pagebuf "
  1004. "bad rc, ret=%i rnum=%x kpage=%p",
  1005. ret, rnum, kpage);
  1006. goto ehca_reg_mr_rpages_exit1;
  1007. }
  1008. if (rnum > 1) {
  1009. rpage = __pa(kpage);
  1010. if (!rpage) {
  1011. ehca_err(&shca->ib_device, "kpage=%p i=%x",
  1012. kpage, i);
  1013. ret = -EFAULT;
  1014. goto ehca_reg_mr_rpages_exit1;
  1015. }
  1016. } else
  1017. rpage = *kpage;
  1018. h_ret = hipz_h_register_rpage_mr(
  1019. shca->ipz_hca_handle, e_mr,
  1020. ehca_encode_hwpage_size(pginfo->hwpage_size),
  1021. 0, rpage, rnum);
  1022. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  1023. /*
  1024. * check for 'registration complete'==H_SUCCESS
  1025. * and for 'page registered'==H_PAGE_REGISTERED
  1026. */
  1027. if (h_ret != H_SUCCESS) {
  1028. ehca_err(&shca->ib_device, "last "
  1029. "hipz_reg_rpage_mr failed, h_ret=%lli "
  1030. "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
  1031. " lkey=%x", h_ret, e_mr, i,
  1032. shca->ipz_hca_handle.handle,
  1033. e_mr->ipz_mr_handle.handle,
  1034. e_mr->ib.ib_mr.lkey);
  1035. ret = ehca2ib_return_code(h_ret);
  1036. break;
  1037. } else
  1038. ret = 0;
  1039. } else if (h_ret != H_PAGE_REGISTERED) {
  1040. ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
  1041. "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
  1042. "mr_hndl=%llx", h_ret, e_mr, i,
  1043. e_mr->ib.ib_mr.lkey,
  1044. shca->ipz_hca_handle.handle,
  1045. e_mr->ipz_mr_handle.handle);
  1046. ret = ehca2ib_return_code(h_ret);
  1047. break;
  1048. } else
  1049. ret = 0;
  1050. } /* end for(i) */
  1051. ehca_reg_mr_rpages_exit1:
  1052. ehca_free_fw_ctrlblock(kpage);
  1053. ehca_reg_mr_rpages_exit0:
  1054. if (ret)
  1055. ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
  1056. "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
  1057. pginfo, pginfo->num_kpages, pginfo->num_hwpages);
  1058. return ret;
  1059. } /* end ehca_reg_mr_rpages() */
  1060. /*----------------------------------------------------------------------*/
  1061. inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
  1062. struct ehca_mr *e_mr,
  1063. u64 *iova_start,
  1064. u64 size,
  1065. u32 acl,
  1066. struct ehca_pd *e_pd,
  1067. struct ehca_mr_pginfo *pginfo,
  1068. u32 *lkey, /*OUT*/
  1069. u32 *rkey) /*OUT*/
  1070. {
  1071. int ret;
  1072. u64 h_ret;
  1073. u32 hipz_acl;
  1074. u64 *kpage;
  1075. u64 rpage;
  1076. struct ehca_mr_pginfo pginfo_save;
  1077. struct ehca_mr_hipzout_parms hipzout;
  1078. ehca_mrmw_map_acl(acl, &hipz_acl);
  1079. ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
  1080. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  1081. if (!kpage) {
  1082. ehca_err(&shca->ib_device, "kpage alloc failed");
  1083. ret = -ENOMEM;
  1084. goto ehca_rereg_mr_rereg1_exit0;
  1085. }
  1086. pginfo_save = *pginfo;
  1087. ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
  1088. if (ret) {
  1089. ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
  1090. "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
  1091. "kpage=%p", e_mr, pginfo, pginfo->type,
  1092. pginfo->num_kpages, pginfo->num_hwpages, kpage);
  1093. goto ehca_rereg_mr_rereg1_exit1;
  1094. }
  1095. rpage = __pa(kpage);
  1096. if (!rpage) {
  1097. ehca_err(&shca->ib_device, "kpage=%p", kpage);
  1098. ret = -EFAULT;
  1099. goto ehca_rereg_mr_rereg1_exit1;
  1100. }
  1101. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
  1102. (u64)iova_start, size, hipz_acl,
  1103. e_pd->fw_pd, rpage, &hipzout);
  1104. if (h_ret != H_SUCCESS) {
  1105. /*
  1106. * reregistration unsuccessful, try it again with the 3 hCalls,
  1107. * e.g. this is required in case H_MR_CONDITION
  1108. * (MW bound or MR is shared)
  1109. */
  1110. ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
  1111. "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
  1112. *pginfo = pginfo_save;
  1113. ret = -EAGAIN;
  1114. } else if ((u64 *)hipzout.vaddr != iova_start) {
  1115. ehca_err(&shca->ib_device, "PHYP changed iova_start in "
  1116. "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
  1117. "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
  1118. hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
  1119. e_mr->ib.ib_mr.lkey, hipzout.lkey);
  1120. ret = -EFAULT;
  1121. } else {
  1122. /*
  1123. * successful reregistration
  1124. * note: start and start_out are identical for eServer HCAs
  1125. */
  1126. e_mr->num_kpages = pginfo->num_kpages;
  1127. e_mr->num_hwpages = pginfo->num_hwpages;
  1128. e_mr->hwpage_size = pginfo->hwpage_size;
  1129. e_mr->start = iova_start;
  1130. e_mr->size = size;
  1131. e_mr->acl = acl;
  1132. *lkey = hipzout.lkey;
  1133. *rkey = hipzout.rkey;
  1134. }
  1135. ehca_rereg_mr_rereg1_exit1:
  1136. ehca_free_fw_ctrlblock(kpage);
  1137. ehca_rereg_mr_rereg1_exit0:
  1138. if ( ret && (ret != -EAGAIN) )
  1139. ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
  1140. "pginfo=%p num_kpages=%llx num_hwpages=%llx",
  1141. ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
  1142. pginfo->num_hwpages);
  1143. return ret;
  1144. } /* end ehca_rereg_mr_rereg1() */
  1145. /*----------------------------------------------------------------------*/
  1146. int ehca_rereg_mr(struct ehca_shca *shca,
  1147. struct ehca_mr *e_mr,
  1148. u64 *iova_start,
  1149. u64 size,
  1150. int acl,
  1151. struct ehca_pd *e_pd,
  1152. struct ehca_mr_pginfo *pginfo,
  1153. u32 *lkey,
  1154. u32 *rkey)
  1155. {
  1156. int ret = 0;
  1157. u64 h_ret;
  1158. int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
  1159. int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
  1160. /* first determine reregistration hCall(s) */
  1161. if ((pginfo->num_hwpages > MAX_RPAGES) ||
  1162. (e_mr->num_hwpages > MAX_RPAGES) ||
  1163. (pginfo->num_hwpages > e_mr->num_hwpages)) {
  1164. ehca_dbg(&shca->ib_device, "Rereg3 case, "
  1165. "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
  1166. pginfo->num_hwpages, e_mr->num_hwpages);
  1167. rereg_1_hcall = 0;
  1168. rereg_3_hcall = 1;
  1169. }
  1170. if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
  1171. rereg_1_hcall = 0;
  1172. rereg_3_hcall = 1;
  1173. e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
  1174. ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
  1175. e_mr);
  1176. }
  1177. if (rereg_1_hcall) {
  1178. ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
  1179. acl, e_pd, pginfo, lkey, rkey);
  1180. if (ret) {
  1181. if (ret == -EAGAIN)
  1182. rereg_3_hcall = 1;
  1183. else
  1184. goto ehca_rereg_mr_exit0;
  1185. }
  1186. }
  1187. if (rereg_3_hcall) {
  1188. struct ehca_mr save_mr;
  1189. /* first deregister old MR */
  1190. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  1191. if (h_ret != H_SUCCESS) {
  1192. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1193. "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
  1194. "mr->lkey=%x",
  1195. h_ret, e_mr, shca->ipz_hca_handle.handle,
  1196. e_mr->ipz_mr_handle.handle,
  1197. e_mr->ib.ib_mr.lkey);
  1198. ret = ehca2ib_return_code(h_ret);
  1199. goto ehca_rereg_mr_exit0;
  1200. }
  1201. /* clean ehca_mr_t, without changing struct ib_mr and lock */
  1202. save_mr = *e_mr;
  1203. ehca_mr_deletenew(e_mr);
  1204. /* set some MR values */
  1205. e_mr->flags = save_mr.flags;
  1206. e_mr->hwpage_size = save_mr.hwpage_size;
  1207. e_mr->fmr_page_size = save_mr.fmr_page_size;
  1208. e_mr->fmr_max_pages = save_mr.fmr_max_pages;
  1209. e_mr->fmr_max_maps = save_mr.fmr_max_maps;
  1210. e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
  1211. ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
  1212. e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
  1213. if (ret) {
  1214. u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
  1215. memcpy(&e_mr->flags, &(save_mr.flags),
  1216. sizeof(struct ehca_mr) - offset);
  1217. goto ehca_rereg_mr_exit0;
  1218. }
  1219. }
  1220. ehca_rereg_mr_exit0:
  1221. if (ret)
  1222. ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
  1223. "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
  1224. "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
  1225. "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
  1226. acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
  1227. rereg_1_hcall, rereg_3_hcall);
  1228. return ret;
  1229. } /* end ehca_rereg_mr() */
  1230. /*----------------------------------------------------------------------*/
  1231. int ehca_unmap_one_fmr(struct ehca_shca *shca,
  1232. struct ehca_mr *e_fmr)
  1233. {
  1234. int ret = 0;
  1235. u64 h_ret;
  1236. struct ehca_pd *e_pd =
  1237. container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
  1238. struct ehca_mr save_fmr;
  1239. u32 tmp_lkey, tmp_rkey;
  1240. struct ehca_mr_pginfo pginfo;
  1241. struct ehca_mr_hipzout_parms hipzout;
  1242. struct ehca_mr save_mr;
  1243. if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
  1244. /*
  1245. * note: after using rereg hcall with len=0,
  1246. * rereg hcall must be used again for registering pages
  1247. */
  1248. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
  1249. 0, 0, e_pd->fw_pd, 0, &hipzout);
  1250. if (h_ret == H_SUCCESS) {
  1251. /* successful reregistration */
  1252. e_fmr->start = NULL;
  1253. e_fmr->size = 0;
  1254. tmp_lkey = hipzout.lkey;
  1255. tmp_rkey = hipzout.rkey;
  1256. return 0;
  1257. }
  1258. /*
  1259. * should not happen, because length checked above,
  1260. * FMRs are not shared and no MW bound to FMRs
  1261. */
  1262. ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
  1263. "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
  1264. "mr_hndl=%llx lkey=%x lkey_out=%x",
  1265. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1266. e_fmr->ipz_mr_handle.handle,
  1267. e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
  1268. /* try free and rereg */
  1269. }
  1270. /* first free old FMR */
  1271. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  1272. if (h_ret != H_SUCCESS) {
  1273. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1274. "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
  1275. "lkey=%x",
  1276. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1277. e_fmr->ipz_mr_handle.handle,
  1278. e_fmr->ib.ib_fmr.lkey);
  1279. ret = ehca2ib_return_code(h_ret);
  1280. goto ehca_unmap_one_fmr_exit0;
  1281. }
  1282. /* clean ehca_mr_t, without changing lock */
  1283. save_fmr = *e_fmr;
  1284. ehca_mr_deletenew(e_fmr);
  1285. /* set some MR values */
  1286. e_fmr->flags = save_fmr.flags;
  1287. e_fmr->hwpage_size = save_fmr.hwpage_size;
  1288. e_fmr->fmr_page_size = save_fmr.fmr_page_size;
  1289. e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
  1290. e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
  1291. e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
  1292. e_fmr->acl = save_fmr.acl;
  1293. memset(&pginfo, 0, sizeof(pginfo));
  1294. pginfo.type = EHCA_MR_PGI_FMR;
  1295. ret = ehca_reg_mr(shca, e_fmr, NULL,
  1296. (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
  1297. e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
  1298. &tmp_rkey, EHCA_REG_MR);
  1299. if (ret) {
  1300. u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
  1301. memcpy(&e_fmr->flags, &(save_mr.flags),
  1302. sizeof(struct ehca_mr) - offset);
  1303. }
  1304. ehca_unmap_one_fmr_exit0:
  1305. if (ret)
  1306. ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
  1307. "fmr_max_pages=%x",
  1308. ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
  1309. return ret;
  1310. } /* end ehca_unmap_one_fmr() */
  1311. /*----------------------------------------------------------------------*/
  1312. int ehca_reg_smr(struct ehca_shca *shca,
  1313. struct ehca_mr *e_origmr,
  1314. struct ehca_mr *e_newmr,
  1315. u64 *iova_start,
  1316. int acl,
  1317. struct ehca_pd *e_pd,
  1318. u32 *lkey, /*OUT*/
  1319. u32 *rkey) /*OUT*/
  1320. {
  1321. int ret = 0;
  1322. u64 h_ret;
  1323. u32 hipz_acl;
  1324. struct ehca_mr_hipzout_parms hipzout;
  1325. ehca_mrmw_map_acl(acl, &hipz_acl);
  1326. ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
  1327. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1328. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1329. &hipzout);
  1330. if (h_ret != H_SUCCESS) {
  1331. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
  1332. "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
  1333. "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
  1334. h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
  1335. shca->ipz_hca_handle.handle,
  1336. e_origmr->ipz_mr_handle.handle,
  1337. e_origmr->ib.ib_mr.lkey);
  1338. ret = ehca2ib_return_code(h_ret);
  1339. goto ehca_reg_smr_exit0;
  1340. }
  1341. /* successful registration */
  1342. e_newmr->num_kpages = e_origmr->num_kpages;
  1343. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1344. e_newmr->hwpage_size = e_origmr->hwpage_size;
  1345. e_newmr->start = iova_start;
  1346. e_newmr->size = e_origmr->size;
  1347. e_newmr->acl = acl;
  1348. e_newmr->ipz_mr_handle = hipzout.handle;
  1349. *lkey = hipzout.lkey;
  1350. *rkey = hipzout.rkey;
  1351. return 0;
  1352. ehca_reg_smr_exit0:
  1353. if (ret)
  1354. ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
  1355. "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
  1356. ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
  1357. return ret;
  1358. } /* end ehca_reg_smr() */
  1359. /*----------------------------------------------------------------------*/
  1360. static inline void *ehca_calc_sectbase(int top, int dir, int idx)
  1361. {
  1362. unsigned long ret = idx;
  1363. ret |= dir << EHCA_DIR_INDEX_SHIFT;
  1364. ret |= top << EHCA_TOP_INDEX_SHIFT;
  1365. return __va(ret << SECTION_SIZE_BITS);
  1366. }
  1367. #define ehca_bmap_valid(entry) \
  1368. ((u64)entry != (u64)EHCA_INVAL_ADDR)
  1369. static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
  1370. struct ehca_shca *shca, struct ehca_mr *mr,
  1371. struct ehca_mr_pginfo *pginfo)
  1372. {
  1373. u64 h_ret = 0;
  1374. unsigned long page = 0;
  1375. u64 rpage = __pa(kpage);
  1376. int page_count;
  1377. void *sectbase = ehca_calc_sectbase(top, dir, idx);
  1378. if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
  1379. ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
  1380. "hwpage_size does not fit to "
  1381. "section start address");
  1382. }
  1383. page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
  1384. while (page < page_count) {
  1385. u64 rnum;
  1386. for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
  1387. rnum++) {
  1388. void *pg = sectbase + ((page++) * pginfo->hwpage_size);
  1389. kpage[rnum] = __pa(pg);
  1390. }
  1391. h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
  1392. ehca_encode_hwpage_size(pginfo->hwpage_size),
  1393. 0, rpage, rnum);
  1394. if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
  1395. ehca_err(&shca->ib_device, "register_rpage_mr failed");
  1396. return h_ret;
  1397. }
  1398. }
  1399. return h_ret;
  1400. }
  1401. static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
  1402. struct ehca_shca *shca, struct ehca_mr *mr,
  1403. struct ehca_mr_pginfo *pginfo)
  1404. {
  1405. u64 hret = H_SUCCESS;
  1406. int idx;
  1407. for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
  1408. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
  1409. continue;
  1410. hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
  1411. pginfo);
  1412. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  1413. return hret;
  1414. }
  1415. return hret;
  1416. }
  1417. static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
  1418. struct ehca_mr *mr,
  1419. struct ehca_mr_pginfo *pginfo)
  1420. {
  1421. u64 hret = H_SUCCESS;
  1422. int dir;
  1423. for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
  1424. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
  1425. continue;
  1426. hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
  1427. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  1428. return hret;
  1429. }
  1430. return hret;
  1431. }
  1432. /* register internal max-MR to internal SHCA */
  1433. int ehca_reg_internal_maxmr(
  1434. struct ehca_shca *shca,
  1435. struct ehca_pd *e_pd,
  1436. struct ehca_mr **e_maxmr) /*OUT*/
  1437. {
  1438. int ret;
  1439. struct ehca_mr *e_mr;
  1440. u64 *iova_start;
  1441. u64 size_maxmr;
  1442. struct ehca_mr_pginfo pginfo;
  1443. struct ib_phys_buf ib_pbuf;
  1444. u32 num_kpages;
  1445. u32 num_hwpages;
  1446. u64 hw_pgsize;
  1447. if (!ehca_bmap) {
  1448. ret = -EFAULT;
  1449. goto ehca_reg_internal_maxmr_exit0;
  1450. }
  1451. e_mr = ehca_mr_new();
  1452. if (!e_mr) {
  1453. ehca_err(&shca->ib_device, "out of memory");
  1454. ret = -ENOMEM;
  1455. goto ehca_reg_internal_maxmr_exit0;
  1456. }
  1457. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  1458. /* register internal max-MR on HCA */
  1459. size_maxmr = ehca_mr_len;
  1460. iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
  1461. ib_pbuf.addr = 0;
  1462. ib_pbuf.size = size_maxmr;
  1463. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
  1464. PAGE_SIZE);
  1465. hw_pgsize = ehca_get_max_hwpage_size(shca);
  1466. num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
  1467. hw_pgsize);
  1468. memset(&pginfo, 0, sizeof(pginfo));
  1469. pginfo.type = EHCA_MR_PGI_PHYS;
  1470. pginfo.num_kpages = num_kpages;
  1471. pginfo.num_hwpages = num_hwpages;
  1472. pginfo.hwpage_size = hw_pgsize;
  1473. pginfo.u.phy.num_phys_buf = 1;
  1474. pginfo.u.phy.phys_buf_array = &ib_pbuf;
  1475. ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
  1476. &pginfo, &e_mr->ib.ib_mr.lkey,
  1477. &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
  1478. if (ret) {
  1479. ehca_err(&shca->ib_device, "reg of internal max MR failed, "
  1480. "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
  1481. "num_hwpages=%x", e_mr, iova_start, size_maxmr,
  1482. num_kpages, num_hwpages);
  1483. goto ehca_reg_internal_maxmr_exit1;
  1484. }
  1485. /* successful registration of all pages */
  1486. e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
  1487. e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
  1488. e_mr->ib.ib_mr.uobject = NULL;
  1489. atomic_inc(&(e_pd->ib_pd.usecnt));
  1490. atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
  1491. *e_maxmr = e_mr;
  1492. return 0;
  1493. ehca_reg_internal_maxmr_exit1:
  1494. ehca_mr_delete(e_mr);
  1495. ehca_reg_internal_maxmr_exit0:
  1496. if (ret)
  1497. ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
  1498. ret, shca, e_pd, e_maxmr);
  1499. return ret;
  1500. } /* end ehca_reg_internal_maxmr() */
  1501. /*----------------------------------------------------------------------*/
  1502. int ehca_reg_maxmr(struct ehca_shca *shca,
  1503. struct ehca_mr *e_newmr,
  1504. u64 *iova_start,
  1505. int acl,
  1506. struct ehca_pd *e_pd,
  1507. u32 *lkey,
  1508. u32 *rkey)
  1509. {
  1510. u64 h_ret;
  1511. struct ehca_mr *e_origmr = shca->maxmr;
  1512. u32 hipz_acl;
  1513. struct ehca_mr_hipzout_parms hipzout;
  1514. ehca_mrmw_map_acl(acl, &hipz_acl);
  1515. ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
  1516. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1517. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1518. &hipzout);
  1519. if (h_ret != H_SUCCESS) {
  1520. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
  1521. "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
  1522. h_ret, e_origmr, shca->ipz_hca_handle.handle,
  1523. e_origmr->ipz_mr_handle.handle,
  1524. e_origmr->ib.ib_mr.lkey);
  1525. return ehca2ib_return_code(h_ret);
  1526. }
  1527. /* successful registration */
  1528. e_newmr->num_kpages = e_origmr->num_kpages;
  1529. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1530. e_newmr->hwpage_size = e_origmr->hwpage_size;
  1531. e_newmr->start = iova_start;
  1532. e_newmr->size = e_origmr->size;
  1533. e_newmr->acl = acl;
  1534. e_newmr->ipz_mr_handle = hipzout.handle;
  1535. *lkey = hipzout.lkey;
  1536. *rkey = hipzout.rkey;
  1537. return 0;
  1538. } /* end ehca_reg_maxmr() */
  1539. /*----------------------------------------------------------------------*/
  1540. int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
  1541. {
  1542. int ret;
  1543. struct ehca_mr *e_maxmr;
  1544. struct ib_pd *ib_pd;
  1545. if (!shca->maxmr) {
  1546. ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
  1547. ret = -EINVAL;
  1548. goto ehca_dereg_internal_maxmr_exit0;
  1549. }
  1550. e_maxmr = shca->maxmr;
  1551. ib_pd = e_maxmr->ib.ib_mr.pd;
  1552. shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
  1553. ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
  1554. if (ret) {
  1555. ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
  1556. "ret=%i e_maxmr=%p shca=%p lkey=%x",
  1557. ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
  1558. shca->maxmr = e_maxmr;
  1559. goto ehca_dereg_internal_maxmr_exit0;
  1560. }
  1561. atomic_dec(&ib_pd->usecnt);
  1562. ehca_dereg_internal_maxmr_exit0:
  1563. if (ret)
  1564. ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
  1565. ret, shca, shca->maxmr);
  1566. return ret;
  1567. } /* end ehca_dereg_internal_maxmr() */
  1568. /*----------------------------------------------------------------------*/
  1569. /*
  1570. * check physical buffer array of MR verbs for validness and
  1571. * calculates MR size
  1572. */
  1573. int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
  1574. int num_phys_buf,
  1575. u64 *iova_start,
  1576. u64 *size)
  1577. {
  1578. struct ib_phys_buf *pbuf = phys_buf_array;
  1579. u64 size_count = 0;
  1580. u32 i;
  1581. if (num_phys_buf == 0) {
  1582. ehca_gen_err("bad phys buf array len, num_phys_buf=0");
  1583. return -EINVAL;
  1584. }
  1585. /* check first buffer */
  1586. if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
  1587. ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
  1588. "pbuf->addr=%llx pbuf->size=%llx",
  1589. iova_start, pbuf->addr, pbuf->size);
  1590. return -EINVAL;
  1591. }
  1592. if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
  1593. (num_phys_buf > 1)) {
  1594. ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
  1595. "pbuf->size=%llx", pbuf->addr, pbuf->size);
  1596. return -EINVAL;
  1597. }
  1598. for (i = 0; i < num_phys_buf; i++) {
  1599. if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
  1600. ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
  1601. "pbuf->size=%llx",
  1602. i, pbuf->addr, pbuf->size);
  1603. return -EINVAL;
  1604. }
  1605. if (((i > 0) && /* not 1st */
  1606. (i < (num_phys_buf - 1)) && /* not last */
  1607. (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
  1608. ehca_gen_err("bad size, i=%x pbuf->size=%llx",
  1609. i, pbuf->size);
  1610. return -EINVAL;
  1611. }
  1612. size_count += pbuf->size;
  1613. pbuf++;
  1614. }
  1615. *size = size_count;
  1616. return 0;
  1617. } /* end ehca_mr_chk_buf_and_calc_size() */
  1618. /*----------------------------------------------------------------------*/
  1619. /* check page list of map FMR verb for validness */
  1620. int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
  1621. u64 *page_list,
  1622. int list_len)
  1623. {
  1624. u32 i;
  1625. u64 *page;
  1626. if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
  1627. ehca_gen_err("bad list_len, list_len=%x "
  1628. "e_fmr->fmr_max_pages=%x fmr=%p",
  1629. list_len, e_fmr->fmr_max_pages, e_fmr);
  1630. return -EINVAL;
  1631. }
  1632. /* each page must be aligned */
  1633. page = page_list;
  1634. for (i = 0; i < list_len; i++) {
  1635. if (*page % e_fmr->fmr_page_size) {
  1636. ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
  1637. "fmr_page_size=%x", i, *page, page, e_fmr,
  1638. e_fmr->fmr_page_size);
  1639. return -EINVAL;
  1640. }
  1641. page++;
  1642. }
  1643. return 0;
  1644. } /* end ehca_fmr_check_page_list() */
  1645. /*----------------------------------------------------------------------*/
  1646. /* PAGE_SIZE >= pginfo->hwpage_size */
  1647. static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
  1648. u32 number,
  1649. u64 *kpage)
  1650. {
  1651. int ret = 0;
  1652. struct ib_umem_chunk *prev_chunk;
  1653. struct ib_umem_chunk *chunk;
  1654. u64 pgaddr;
  1655. u32 i = 0;
  1656. u32 j = 0;
  1657. int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
  1658. /* loop over desired chunk entries */
  1659. chunk = pginfo->u.usr.next_chunk;
  1660. prev_chunk = pginfo->u.usr.next_chunk;
  1661. list_for_each_entry_continue(
  1662. chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
  1663. for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
  1664. pgaddr = page_to_pfn(sg_page(&chunk->page_list[i]))
  1665. << PAGE_SHIFT ;
  1666. *kpage = pgaddr + (pginfo->next_hwpage *
  1667. pginfo->hwpage_size);
  1668. if ( !(*kpage) ) {
  1669. ehca_gen_err("pgaddr=%llx "
  1670. "chunk->page_list[i]=%llx "
  1671. "i=%x next_hwpage=%llx",
  1672. pgaddr, (u64)sg_dma_address(
  1673. &chunk->page_list[i]),
  1674. i, pginfo->next_hwpage);
  1675. return -EFAULT;
  1676. }
  1677. (pginfo->hwpage_cnt)++;
  1678. (pginfo->next_hwpage)++;
  1679. kpage++;
  1680. if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
  1681. (pginfo->kpage_cnt)++;
  1682. (pginfo->u.usr.next_nmap)++;
  1683. pginfo->next_hwpage = 0;
  1684. i++;
  1685. }
  1686. j++;
  1687. if (j >= number) break;
  1688. }
  1689. if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
  1690. (j >= number)) {
  1691. pginfo->u.usr.next_nmap = 0;
  1692. prev_chunk = chunk;
  1693. break;
  1694. } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
  1695. pginfo->u.usr.next_nmap = 0;
  1696. prev_chunk = chunk;
  1697. } else if (j >= number)
  1698. break;
  1699. else
  1700. prev_chunk = chunk;
  1701. }
  1702. pginfo->u.usr.next_chunk =
  1703. list_prepare_entry(prev_chunk,
  1704. (&(pginfo->u.usr.region->chunk_list)),
  1705. list);
  1706. return ret;
  1707. }
  1708. /*
  1709. * check given pages for contiguous layout
  1710. * last page addr is returned in prev_pgaddr for further check
  1711. */
  1712. static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
  1713. int start_idx, int end_idx,
  1714. u64 *prev_pgaddr)
  1715. {
  1716. int t;
  1717. for (t = start_idx; t <= end_idx; t++) {
  1718. u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
  1719. if (ehca_debug_level >= 3)
  1720. ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
  1721. *(u64 *)__va(pgaddr));
  1722. if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
  1723. ehca_gen_err("uncontiguous page found pgaddr=%llx "
  1724. "prev_pgaddr=%llx page_list_i=%x",
  1725. pgaddr, *prev_pgaddr, t);
  1726. return -EINVAL;
  1727. }
  1728. *prev_pgaddr = pgaddr;
  1729. }
  1730. return 0;
  1731. }
  1732. /* PAGE_SIZE < pginfo->hwpage_size */
  1733. static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
  1734. u32 number,
  1735. u64 *kpage)
  1736. {
  1737. int ret = 0;
  1738. struct ib_umem_chunk *prev_chunk;
  1739. struct ib_umem_chunk *chunk;
  1740. u64 pgaddr, prev_pgaddr;
  1741. u32 i = 0;
  1742. u32 j = 0;
  1743. int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
  1744. int nr_kpages = kpages_per_hwpage;
  1745. /* loop over desired chunk entries */
  1746. chunk = pginfo->u.usr.next_chunk;
  1747. prev_chunk = pginfo->u.usr.next_chunk;
  1748. list_for_each_entry_continue(
  1749. chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
  1750. for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
  1751. if (nr_kpages == kpages_per_hwpage) {
  1752. pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
  1753. << PAGE_SHIFT );
  1754. *kpage = pgaddr;
  1755. if ( !(*kpage) ) {
  1756. ehca_gen_err("pgaddr=%llx i=%x",
  1757. pgaddr, i);
  1758. ret = -EFAULT;
  1759. return ret;
  1760. }
  1761. /*
  1762. * The first page in a hwpage must be aligned;
  1763. * the first MR page is exempt from this rule.
  1764. */
  1765. if (pgaddr & (pginfo->hwpage_size - 1)) {
  1766. if (pginfo->hwpage_cnt) {
  1767. ehca_gen_err(
  1768. "invalid alignment "
  1769. "pgaddr=%llx i=%x "
  1770. "mr_pgsize=%llx",
  1771. pgaddr, i,
  1772. pginfo->hwpage_size);
  1773. ret = -EFAULT;
  1774. return ret;
  1775. }
  1776. /* first MR page */
  1777. pginfo->kpage_cnt =
  1778. (pgaddr &
  1779. (pginfo->hwpage_size - 1)) >>
  1780. PAGE_SHIFT;
  1781. nr_kpages -= pginfo->kpage_cnt;
  1782. *kpage = pgaddr &
  1783. ~(pginfo->hwpage_size - 1);
  1784. }
  1785. if (ehca_debug_level >= 3) {
  1786. u64 val = *(u64 *)__va(pgaddr);
  1787. ehca_gen_dbg("kpage=%llx chunk_page=%llx "
  1788. "value=%016llx",
  1789. *kpage, pgaddr, val);
  1790. }
  1791. prev_pgaddr = pgaddr;
  1792. i++;
  1793. pginfo->kpage_cnt++;
  1794. pginfo->u.usr.next_nmap++;
  1795. nr_kpages--;
  1796. if (!nr_kpages)
  1797. goto next_kpage;
  1798. continue;
  1799. }
  1800. if (i + nr_kpages > chunk->nmap) {
  1801. ret = ehca_check_kpages_per_ate(
  1802. chunk->page_list, i,
  1803. chunk->nmap - 1, &prev_pgaddr);
  1804. if (ret) return ret;
  1805. pginfo->kpage_cnt += chunk->nmap - i;
  1806. pginfo->u.usr.next_nmap += chunk->nmap - i;
  1807. nr_kpages -= chunk->nmap - i;
  1808. break;
  1809. }
  1810. ret = ehca_check_kpages_per_ate(chunk->page_list, i,
  1811. i + nr_kpages - 1,
  1812. &prev_pgaddr);
  1813. if (ret) return ret;
  1814. i += nr_kpages;
  1815. pginfo->kpage_cnt += nr_kpages;
  1816. pginfo->u.usr.next_nmap += nr_kpages;
  1817. next_kpage:
  1818. nr_kpages = kpages_per_hwpage;
  1819. (pginfo->hwpage_cnt)++;
  1820. kpage++;
  1821. j++;
  1822. if (j >= number) break;
  1823. }
  1824. if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
  1825. (j >= number)) {
  1826. pginfo->u.usr.next_nmap = 0;
  1827. prev_chunk = chunk;
  1828. break;
  1829. } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
  1830. pginfo->u.usr.next_nmap = 0;
  1831. prev_chunk = chunk;
  1832. } else if (j >= number)
  1833. break;
  1834. else
  1835. prev_chunk = chunk;
  1836. }
  1837. pginfo->u.usr.next_chunk =
  1838. list_prepare_entry(prev_chunk,
  1839. (&(pginfo->u.usr.region->chunk_list)),
  1840. list);
  1841. return ret;
  1842. }
  1843. static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
  1844. u32 number, u64 *kpage)
  1845. {
  1846. int ret = 0;
  1847. struct ib_phys_buf *pbuf;
  1848. u64 num_hw, offs_hw;
  1849. u32 i = 0;
  1850. /* loop over desired phys_buf_array entries */
  1851. while (i < number) {
  1852. pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
  1853. num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
  1854. pbuf->size, pginfo->hwpage_size);
  1855. offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
  1856. pginfo->hwpage_size;
  1857. while (pginfo->next_hwpage < offs_hw + num_hw) {
  1858. /* sanity check */
  1859. if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
  1860. (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
  1861. ehca_gen_err("kpage_cnt >= num_kpages, "
  1862. "kpage_cnt=%llx num_kpages=%llx "
  1863. "hwpage_cnt=%llx "
  1864. "num_hwpages=%llx i=%x",
  1865. pginfo->kpage_cnt,
  1866. pginfo->num_kpages,
  1867. pginfo->hwpage_cnt,
  1868. pginfo->num_hwpages, i);
  1869. return -EFAULT;
  1870. }
  1871. *kpage = (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
  1872. (pginfo->next_hwpage * pginfo->hwpage_size);
  1873. if ( !(*kpage) && pbuf->addr ) {
  1874. ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
  1875. "next_hwpage=%llx", pbuf->addr,
  1876. pbuf->size, pginfo->next_hwpage);
  1877. return -EFAULT;
  1878. }
  1879. (pginfo->hwpage_cnt)++;
  1880. (pginfo->next_hwpage)++;
  1881. if (PAGE_SIZE >= pginfo->hwpage_size) {
  1882. if (pginfo->next_hwpage %
  1883. (PAGE_SIZE / pginfo->hwpage_size) == 0)
  1884. (pginfo->kpage_cnt)++;
  1885. } else
  1886. pginfo->kpage_cnt += pginfo->hwpage_size /
  1887. PAGE_SIZE;
  1888. kpage++;
  1889. i++;
  1890. if (i >= number) break;
  1891. }
  1892. if (pginfo->next_hwpage >= offs_hw + num_hw) {
  1893. (pginfo->u.phy.next_buf)++;
  1894. pginfo->next_hwpage = 0;
  1895. }
  1896. }
  1897. return ret;
  1898. }
  1899. static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
  1900. u32 number, u64 *kpage)
  1901. {
  1902. int ret = 0;
  1903. u64 *fmrlist;
  1904. u32 i;
  1905. /* loop over desired page_list entries */
  1906. fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
  1907. for (i = 0; i < number; i++) {
  1908. *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
  1909. pginfo->next_hwpage * pginfo->hwpage_size;
  1910. if ( !(*kpage) ) {
  1911. ehca_gen_err("*fmrlist=%llx fmrlist=%p "
  1912. "next_listelem=%llx next_hwpage=%llx",
  1913. *fmrlist, fmrlist,
  1914. pginfo->u.fmr.next_listelem,
  1915. pginfo->next_hwpage);
  1916. return -EFAULT;
  1917. }
  1918. (pginfo->hwpage_cnt)++;
  1919. if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
  1920. if (pginfo->next_hwpage %
  1921. (pginfo->u.fmr.fmr_pgsize /
  1922. pginfo->hwpage_size) == 0) {
  1923. (pginfo->kpage_cnt)++;
  1924. (pginfo->u.fmr.next_listelem)++;
  1925. fmrlist++;
  1926. pginfo->next_hwpage = 0;
  1927. } else
  1928. (pginfo->next_hwpage)++;
  1929. } else {
  1930. unsigned int cnt_per_hwpage = pginfo->hwpage_size /
  1931. pginfo->u.fmr.fmr_pgsize;
  1932. unsigned int j;
  1933. u64 prev = *kpage;
  1934. /* check if adrs are contiguous */
  1935. for (j = 1; j < cnt_per_hwpage; j++) {
  1936. u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
  1937. if (prev + pginfo->u.fmr.fmr_pgsize != p) {
  1938. ehca_gen_err("uncontiguous fmr pages "
  1939. "found prev=%llx p=%llx "
  1940. "idx=%x", prev, p, i + j);
  1941. return -EINVAL;
  1942. }
  1943. prev = p;
  1944. }
  1945. pginfo->kpage_cnt += cnt_per_hwpage;
  1946. pginfo->u.fmr.next_listelem += cnt_per_hwpage;
  1947. fmrlist += cnt_per_hwpage;
  1948. }
  1949. kpage++;
  1950. }
  1951. return ret;
  1952. }
  1953. /* setup page buffer from page info */
  1954. int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
  1955. u32 number,
  1956. u64 *kpage)
  1957. {
  1958. int ret;
  1959. switch (pginfo->type) {
  1960. case EHCA_MR_PGI_PHYS:
  1961. ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
  1962. break;
  1963. case EHCA_MR_PGI_USER:
  1964. ret = PAGE_SIZE >= pginfo->hwpage_size ?
  1965. ehca_set_pagebuf_user1(pginfo, number, kpage) :
  1966. ehca_set_pagebuf_user2(pginfo, number, kpage);
  1967. break;
  1968. case EHCA_MR_PGI_FMR:
  1969. ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
  1970. break;
  1971. default:
  1972. ehca_gen_err("bad pginfo->type=%x", pginfo->type);
  1973. ret = -EFAULT;
  1974. break;
  1975. }
  1976. return ret;
  1977. } /* end ehca_set_pagebuf() */
  1978. /*----------------------------------------------------------------------*/
  1979. /*
  1980. * check MR if it is a max-MR, i.e. uses whole memory
  1981. * in case it's a max-MR 1 is returned, else 0
  1982. */
  1983. int ehca_mr_is_maxmr(u64 size,
  1984. u64 *iova_start)
  1985. {
  1986. /* a MR is treated as max-MR only if it fits following: */
  1987. if ((size == ehca_mr_len) &&
  1988. (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
  1989. ehca_gen_dbg("this is a max-MR");
  1990. return 1;
  1991. } else
  1992. return 0;
  1993. } /* end ehca_mr_is_maxmr() */
  1994. /*----------------------------------------------------------------------*/
  1995. /* map access control for MR/MW. This routine is used for MR and MW. */
  1996. void ehca_mrmw_map_acl(int ib_acl,
  1997. u32 *hipz_acl)
  1998. {
  1999. *hipz_acl = 0;
  2000. if (ib_acl & IB_ACCESS_REMOTE_READ)
  2001. *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
  2002. if (ib_acl & IB_ACCESS_REMOTE_WRITE)
  2003. *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
  2004. if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
  2005. *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
  2006. if (ib_acl & IB_ACCESS_LOCAL_WRITE)
  2007. *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
  2008. if (ib_acl & IB_ACCESS_MW_BIND)
  2009. *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
  2010. } /* end ehca_mrmw_map_acl() */
  2011. /*----------------------------------------------------------------------*/
  2012. /* sets page size in hipz access control for MR/MW. */
  2013. void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
  2014. {
  2015. *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
  2016. } /* end ehca_mrmw_set_pgsize_hipz_acl() */
  2017. /*----------------------------------------------------------------------*/
  2018. /*
  2019. * reverse map access control for MR/MW.
  2020. * This routine is used for MR and MW.
  2021. */
  2022. void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
  2023. int *ib_acl) /*OUT*/
  2024. {
  2025. *ib_acl = 0;
  2026. if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
  2027. *ib_acl |= IB_ACCESS_REMOTE_READ;
  2028. if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
  2029. *ib_acl |= IB_ACCESS_REMOTE_WRITE;
  2030. if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
  2031. *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
  2032. if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
  2033. *ib_acl |= IB_ACCESS_LOCAL_WRITE;
  2034. if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
  2035. *ib_acl |= IB_ACCESS_MW_BIND;
  2036. } /* end ehca_mrmw_reverse_map_acl() */
  2037. /*----------------------------------------------------------------------*/
  2038. /*
  2039. * MR destructor and constructor
  2040. * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
  2041. * except struct ib_mr and spinlock
  2042. */
  2043. void ehca_mr_deletenew(struct ehca_mr *mr)
  2044. {
  2045. mr->flags = 0;
  2046. mr->num_kpages = 0;
  2047. mr->num_hwpages = 0;
  2048. mr->acl = 0;
  2049. mr->start = NULL;
  2050. mr->fmr_page_size = 0;
  2051. mr->fmr_max_pages = 0;
  2052. mr->fmr_max_maps = 0;
  2053. mr->fmr_map_cnt = 0;
  2054. memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
  2055. memset(&mr->galpas, 0, sizeof(mr->galpas));
  2056. } /* end ehca_mr_deletenew() */
  2057. int ehca_init_mrmw_cache(void)
  2058. {
  2059. mr_cache = kmem_cache_create("ehca_cache_mr",
  2060. sizeof(struct ehca_mr), 0,
  2061. SLAB_HWCACHE_ALIGN,
  2062. NULL);
  2063. if (!mr_cache)
  2064. return -ENOMEM;
  2065. mw_cache = kmem_cache_create("ehca_cache_mw",
  2066. sizeof(struct ehca_mw), 0,
  2067. SLAB_HWCACHE_ALIGN,
  2068. NULL);
  2069. if (!mw_cache) {
  2070. kmem_cache_destroy(mr_cache);
  2071. mr_cache = NULL;
  2072. return -ENOMEM;
  2073. }
  2074. return 0;
  2075. }
  2076. void ehca_cleanup_mrmw_cache(void)
  2077. {
  2078. if (mr_cache)
  2079. kmem_cache_destroy(mr_cache);
  2080. if (mw_cache)
  2081. kmem_cache_destroy(mw_cache);
  2082. }
  2083. static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
  2084. int dir)
  2085. {
  2086. if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
  2087. ehca_top_bmap->dir[dir] =
  2088. kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
  2089. if (!ehca_top_bmap->dir[dir])
  2090. return -ENOMEM;
  2091. /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
  2092. memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
  2093. }
  2094. return 0;
  2095. }
  2096. static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
  2097. {
  2098. if (!ehca_bmap_valid(ehca_bmap->top[top])) {
  2099. ehca_bmap->top[top] =
  2100. kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
  2101. if (!ehca_bmap->top[top])
  2102. return -ENOMEM;
  2103. /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
  2104. memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
  2105. }
  2106. return ehca_init_top_bmap(ehca_bmap->top[top], dir);
  2107. }
  2108. static inline int ehca_calc_index(unsigned long i, unsigned long s)
  2109. {
  2110. return (i >> s) & EHCA_INDEX_MASK;
  2111. }
  2112. void ehca_destroy_busmap(void)
  2113. {
  2114. int top, dir;
  2115. if (!ehca_bmap)
  2116. return;
  2117. for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
  2118. if (!ehca_bmap_valid(ehca_bmap->top[top]))
  2119. continue;
  2120. for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
  2121. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
  2122. continue;
  2123. kfree(ehca_bmap->top[top]->dir[dir]);
  2124. }
  2125. kfree(ehca_bmap->top[top]);
  2126. }
  2127. kfree(ehca_bmap);
  2128. ehca_bmap = NULL;
  2129. }
  2130. static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
  2131. {
  2132. unsigned long i, start_section, end_section;
  2133. int top, dir, idx;
  2134. if (!nr_pages)
  2135. return 0;
  2136. if (!ehca_bmap) {
  2137. ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
  2138. if (!ehca_bmap)
  2139. return -ENOMEM;
  2140. /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
  2141. memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
  2142. }
  2143. start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
  2144. end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
  2145. for (i = start_section; i < end_section; i++) {
  2146. int ret;
  2147. top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
  2148. dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
  2149. idx = i & EHCA_INDEX_MASK;
  2150. ret = ehca_init_bmap(ehca_bmap, top, dir);
  2151. if (ret) {
  2152. ehca_destroy_busmap();
  2153. return ret;
  2154. }
  2155. ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
  2156. ehca_mr_len += EHCA_SECTSIZE;
  2157. }
  2158. return 0;
  2159. }
  2160. static int ehca_is_hugepage(unsigned long pfn)
  2161. {
  2162. int page_order;
  2163. if (pfn & EHCA_HUGEPAGE_PFN_MASK)
  2164. return 0;
  2165. page_order = compound_order(pfn_to_page(pfn));
  2166. if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
  2167. return 0;
  2168. return 1;
  2169. }
  2170. static int ehca_create_busmap_callback(unsigned long initial_pfn,
  2171. unsigned long total_nr_pages, void *arg)
  2172. {
  2173. int ret;
  2174. unsigned long pfn, start_pfn, end_pfn, nr_pages;
  2175. if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
  2176. return ehca_update_busmap(initial_pfn, total_nr_pages);
  2177. /* Given chunk is >= 16GB -> check for hugepages */
  2178. start_pfn = initial_pfn;
  2179. end_pfn = initial_pfn + total_nr_pages;
  2180. pfn = start_pfn;
  2181. while (pfn < end_pfn) {
  2182. if (ehca_is_hugepage(pfn)) {
  2183. /* Add mem found in front of the hugepage */
  2184. nr_pages = pfn - start_pfn;
  2185. ret = ehca_update_busmap(start_pfn, nr_pages);
  2186. if (ret)
  2187. return ret;
  2188. /* Skip the hugepage */
  2189. pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
  2190. start_pfn = pfn;
  2191. } else
  2192. pfn += (EHCA_SECTSIZE / PAGE_SIZE);
  2193. }
  2194. /* Add mem found behind the hugepage(s) */
  2195. nr_pages = pfn - start_pfn;
  2196. return ehca_update_busmap(start_pfn, nr_pages);
  2197. }
  2198. int ehca_create_busmap(void)
  2199. {
  2200. int ret;
  2201. ehca_mr_len = 0;
  2202. ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
  2203. ehca_create_busmap_callback);
  2204. return ret;
  2205. }
  2206. static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
  2207. struct ehca_mr *e_mr,
  2208. struct ehca_mr_pginfo *pginfo)
  2209. {
  2210. int top;
  2211. u64 hret, *kpage;
  2212. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  2213. if (!kpage) {
  2214. ehca_err(&shca->ib_device, "kpage alloc failed");
  2215. return -ENOMEM;
  2216. }
  2217. for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
  2218. if (!ehca_bmap_valid(ehca_bmap->top[top]))
  2219. continue;
  2220. hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
  2221. if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
  2222. break;
  2223. }
  2224. ehca_free_fw_ctrlblock(kpage);
  2225. if (hret == H_SUCCESS)
  2226. return 0; /* Everything is fine */
  2227. else {
  2228. ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
  2229. "h_ret=%lli e_mr=%p top=%x lkey=%x "
  2230. "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
  2231. e_mr->ib.ib_mr.lkey,
  2232. shca->ipz_hca_handle.handle,
  2233. e_mr->ipz_mr_handle.handle);
  2234. return ehca2ib_return_code(hret);
  2235. }
  2236. }
  2237. static u64 ehca_map_vaddr(void *caddr)
  2238. {
  2239. int top, dir, idx;
  2240. unsigned long abs_addr, offset;
  2241. u64 entry;
  2242. if (!ehca_bmap)
  2243. return EHCA_INVAL_ADDR;
  2244. abs_addr = __pa(caddr);
  2245. top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
  2246. if (!ehca_bmap_valid(ehca_bmap->top[top]))
  2247. return EHCA_INVAL_ADDR;
  2248. dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
  2249. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
  2250. return EHCA_INVAL_ADDR;
  2251. idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
  2252. entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
  2253. if (ehca_bmap_valid(entry)) {
  2254. offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
  2255. return entry | offset;
  2256. } else
  2257. return EHCA_INVAL_ADDR;
  2258. }
  2259. static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
  2260. {
  2261. return dma_addr == EHCA_INVAL_ADDR;
  2262. }
  2263. static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
  2264. size_t size, enum dma_data_direction direction)
  2265. {
  2266. if (cpu_addr)
  2267. return ehca_map_vaddr(cpu_addr);
  2268. else
  2269. return EHCA_INVAL_ADDR;
  2270. }
  2271. static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
  2272. enum dma_data_direction direction)
  2273. {
  2274. /* This is only a stub; nothing to be done here */
  2275. }
  2276. static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
  2277. unsigned long offset, size_t size,
  2278. enum dma_data_direction direction)
  2279. {
  2280. u64 addr;
  2281. if (offset + size > PAGE_SIZE)
  2282. return EHCA_INVAL_ADDR;
  2283. addr = ehca_map_vaddr(page_address(page));
  2284. if (!ehca_dma_mapping_error(dev, addr))
  2285. addr += offset;
  2286. return addr;
  2287. }
  2288. static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
  2289. enum dma_data_direction direction)
  2290. {
  2291. /* This is only a stub; nothing to be done here */
  2292. }
  2293. static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
  2294. int nents, enum dma_data_direction direction)
  2295. {
  2296. struct scatterlist *sg;
  2297. int i;
  2298. for_each_sg(sgl, sg, nents, i) {
  2299. u64 addr;
  2300. addr = ehca_map_vaddr(sg_virt(sg));
  2301. if (ehca_dma_mapping_error(dev, addr))
  2302. return 0;
  2303. sg->dma_address = addr;
  2304. sg->dma_length = sg->length;
  2305. }
  2306. return nents;
  2307. }
  2308. static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
  2309. int nents, enum dma_data_direction direction)
  2310. {
  2311. /* This is only a stub; nothing to be done here */
  2312. }
  2313. static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg)
  2314. {
  2315. return sg->dma_address;
  2316. }
  2317. static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg)
  2318. {
  2319. return sg->length;
  2320. }
  2321. static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
  2322. size_t size,
  2323. enum dma_data_direction dir)
  2324. {
  2325. dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
  2326. }
  2327. static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
  2328. size_t size,
  2329. enum dma_data_direction dir)
  2330. {
  2331. dma_sync_single_for_device(dev->dma_device, addr, size, dir);
  2332. }
  2333. static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
  2334. u64 *dma_handle, gfp_t flag)
  2335. {
  2336. struct page *p;
  2337. void *addr = NULL;
  2338. u64 dma_addr;
  2339. p = alloc_pages(flag, get_order(size));
  2340. if (p) {
  2341. addr = page_address(p);
  2342. dma_addr = ehca_map_vaddr(addr);
  2343. if (ehca_dma_mapping_error(dev, dma_addr)) {
  2344. free_pages((unsigned long)addr, get_order(size));
  2345. return NULL;
  2346. }
  2347. if (dma_handle)
  2348. *dma_handle = dma_addr;
  2349. return addr;
  2350. }
  2351. return NULL;
  2352. }
  2353. static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
  2354. void *cpu_addr, u64 dma_handle)
  2355. {
  2356. if (cpu_addr && size)
  2357. free_pages((unsigned long)cpu_addr, get_order(size));
  2358. }
  2359. struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
  2360. .mapping_error = ehca_dma_mapping_error,
  2361. .map_single = ehca_dma_map_single,
  2362. .unmap_single = ehca_dma_unmap_single,
  2363. .map_page = ehca_dma_map_page,
  2364. .unmap_page = ehca_dma_unmap_page,
  2365. .map_sg = ehca_dma_map_sg,
  2366. .unmap_sg = ehca_dma_unmap_sg,
  2367. .dma_address = ehca_dma_address,
  2368. .dma_len = ehca_dma_len,
  2369. .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
  2370. .sync_single_for_device = ehca_dma_sync_single_for_device,
  2371. .alloc_coherent = ehca_dma_alloc_coherent,
  2372. .free_coherent = ehca_dma_free_coherent,
  2373. };