ehca_mrmw.c 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * MR/MW functions
  5. *
  6. * Authors: Dietmar Decker <ddecker@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #include <rdma/ib_umem.h>
  43. #include "ehca_iverbs.h"
  44. #include "ehca_mrmw.h"
  45. #include "hcp_if.h"
  46. #include "hipz_hw.h"
  47. #define NUM_CHUNKS(length, chunk_size) \
  48. (((length) + (chunk_size - 1)) / (chunk_size))
  49. /* max number of rpages (per hcall register_rpages) */
  50. #define MAX_RPAGES 512
  51. /* DMEM toleration management */
  52. #define EHCA_SECTSHIFT SECTION_SIZE_BITS
  53. #define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
  54. #define EHCA_HUGEPAGESHIFT 34
  55. #define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
  56. #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
  57. #define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
  58. #define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
  59. #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
  60. #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
  61. #define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
  62. #define EHCA_DIR_MAP_SIZE (0x10000)
  63. #define EHCA_ENT_MAP_SIZE (0x10000)
  64. #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
  65. static unsigned long ehca_mr_len;
  66. /*
  67. * Memory map data structures
  68. */
  69. struct ehca_dir_bmap {
  70. u64 ent[EHCA_MAP_ENTRIES];
  71. };
  72. struct ehca_top_bmap {
  73. struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
  74. };
  75. struct ehca_bmap {
  76. struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
  77. };
  78. static struct ehca_bmap *ehca_bmap;
  79. static struct kmem_cache *mr_cache;
  80. static struct kmem_cache *mw_cache;
  81. enum ehca_mr_pgsize {
  82. EHCA_MR_PGSIZE4K = 0x1000L,
  83. EHCA_MR_PGSIZE64K = 0x10000L,
  84. EHCA_MR_PGSIZE1M = 0x100000L,
  85. EHCA_MR_PGSIZE16M = 0x1000000L
  86. };
  87. #define EHCA_MR_PGSHIFT4K 12
  88. #define EHCA_MR_PGSHIFT64K 16
  89. #define EHCA_MR_PGSHIFT1M 20
  90. #define EHCA_MR_PGSHIFT16M 24
  91. static u64 ehca_map_vaddr(void *caddr);
  92. static u32 ehca_encode_hwpage_size(u32 pgsize)
  93. {
  94. int log = ilog2(pgsize);
  95. WARN_ON(log < 12 || log > 24 || log & 3);
  96. return (log - 12) / 4;
  97. }
  98. static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
  99. {
  100. return 1UL << ilog2(shca->hca_cap_mr_pgsize);
  101. }
  102. static struct ehca_mr *ehca_mr_new(void)
  103. {
  104. struct ehca_mr *me;
  105. me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
  106. if (me)
  107. spin_lock_init(&me->mrlock);
  108. else
  109. ehca_gen_err("alloc failed");
  110. return me;
  111. }
  112. static void ehca_mr_delete(struct ehca_mr *me)
  113. {
  114. kmem_cache_free(mr_cache, me);
  115. }
  116. static struct ehca_mw *ehca_mw_new(void)
  117. {
  118. struct ehca_mw *me;
  119. me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
  120. if (me)
  121. spin_lock_init(&me->mwlock);
  122. else
  123. ehca_gen_err("alloc failed");
  124. return me;
  125. }
  126. static void ehca_mw_delete(struct ehca_mw *me)
  127. {
  128. kmem_cache_free(mw_cache, me);
  129. }
  130. /*----------------------------------------------------------------------*/
  131. struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  132. {
  133. struct ib_mr *ib_mr;
  134. int ret;
  135. struct ehca_mr *e_maxmr;
  136. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  137. struct ehca_shca *shca =
  138. container_of(pd->device, struct ehca_shca, ib_device);
  139. if (shca->maxmr) {
  140. e_maxmr = ehca_mr_new();
  141. if (!e_maxmr) {
  142. ehca_err(&shca->ib_device, "out of memory");
  143. ib_mr = ERR_PTR(-ENOMEM);
  144. goto get_dma_mr_exit0;
  145. }
  146. ret = ehca_reg_maxmr(shca, e_maxmr,
  147. (void *)ehca_map_vaddr((void *)KERNELBASE),
  148. mr_access_flags, e_pd,
  149. &e_maxmr->ib.ib_mr.lkey,
  150. &e_maxmr->ib.ib_mr.rkey);
  151. if (ret) {
  152. ehca_mr_delete(e_maxmr);
  153. ib_mr = ERR_PTR(ret);
  154. goto get_dma_mr_exit0;
  155. }
  156. ib_mr = &e_maxmr->ib.ib_mr;
  157. } else {
  158. ehca_err(&shca->ib_device, "no internal max-MR exist!");
  159. ib_mr = ERR_PTR(-EINVAL);
  160. goto get_dma_mr_exit0;
  161. }
  162. get_dma_mr_exit0:
  163. if (IS_ERR(ib_mr))
  164. ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
  165. PTR_ERR(ib_mr), pd, mr_access_flags);
  166. return ib_mr;
  167. } /* end ehca_get_dma_mr() */
  168. /*----------------------------------------------------------------------*/
  169. struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
  170. struct ib_phys_buf *phys_buf_array,
  171. int num_phys_buf,
  172. int mr_access_flags,
  173. u64 *iova_start)
  174. {
  175. struct ib_mr *ib_mr;
  176. int ret;
  177. struct ehca_mr *e_mr;
  178. struct ehca_shca *shca =
  179. container_of(pd->device, struct ehca_shca, ib_device);
  180. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  181. u64 size;
  182. if ((num_phys_buf <= 0) || !phys_buf_array) {
  183. ehca_err(pd->device, "bad input values: num_phys_buf=%x "
  184. "phys_buf_array=%p", num_phys_buf, phys_buf_array);
  185. ib_mr = ERR_PTR(-EINVAL);
  186. goto reg_phys_mr_exit0;
  187. }
  188. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  189. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  190. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  191. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  192. /*
  193. * Remote Write Access requires Local Write Access
  194. * Remote Atomic Access requires Local Write Access
  195. */
  196. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  197. mr_access_flags);
  198. ib_mr = ERR_PTR(-EINVAL);
  199. goto reg_phys_mr_exit0;
  200. }
  201. /* check physical buffer list and calculate size */
  202. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
  203. iova_start, &size);
  204. if (ret) {
  205. ib_mr = ERR_PTR(ret);
  206. goto reg_phys_mr_exit0;
  207. }
  208. if ((size == 0) ||
  209. (((u64)iova_start + size) < (u64)iova_start)) {
  210. ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
  211. size, iova_start);
  212. ib_mr = ERR_PTR(-EINVAL);
  213. goto reg_phys_mr_exit0;
  214. }
  215. e_mr = ehca_mr_new();
  216. if (!e_mr) {
  217. ehca_err(pd->device, "out of memory");
  218. ib_mr = ERR_PTR(-ENOMEM);
  219. goto reg_phys_mr_exit0;
  220. }
  221. /* register MR on HCA */
  222. if (ehca_mr_is_maxmr(size, iova_start)) {
  223. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  224. ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
  225. e_pd, &e_mr->ib.ib_mr.lkey,
  226. &e_mr->ib.ib_mr.rkey);
  227. if (ret) {
  228. ib_mr = ERR_PTR(ret);
  229. goto reg_phys_mr_exit1;
  230. }
  231. } else {
  232. struct ehca_mr_pginfo pginfo;
  233. u32 num_kpages;
  234. u32 num_hwpages;
  235. u64 hw_pgsize;
  236. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
  237. PAGE_SIZE);
  238. /* for kernel space we try most possible pgsize */
  239. hw_pgsize = ehca_get_max_hwpage_size(shca);
  240. num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
  241. hw_pgsize);
  242. memset(&pginfo, 0, sizeof(pginfo));
  243. pginfo.type = EHCA_MR_PGI_PHYS;
  244. pginfo.num_kpages = num_kpages;
  245. pginfo.hwpage_size = hw_pgsize;
  246. pginfo.num_hwpages = num_hwpages;
  247. pginfo.u.phy.num_phys_buf = num_phys_buf;
  248. pginfo.u.phy.phys_buf_array = phys_buf_array;
  249. pginfo.next_hwpage =
  250. ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
  251. ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
  252. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  253. &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
  254. if (ret) {
  255. ib_mr = ERR_PTR(ret);
  256. goto reg_phys_mr_exit1;
  257. }
  258. }
  259. /* successful registration of all pages */
  260. return &e_mr->ib.ib_mr;
  261. reg_phys_mr_exit1:
  262. ehca_mr_delete(e_mr);
  263. reg_phys_mr_exit0:
  264. if (IS_ERR(ib_mr))
  265. ehca_err(pd->device, "h_ret=%li pd=%p phys_buf_array=%p "
  266. "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
  267. PTR_ERR(ib_mr), pd, phys_buf_array,
  268. num_phys_buf, mr_access_flags, iova_start);
  269. return ib_mr;
  270. } /* end ehca_reg_phys_mr() */
  271. /*----------------------------------------------------------------------*/
  272. struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  273. u64 virt, int mr_access_flags,
  274. struct ib_udata *udata)
  275. {
  276. struct ib_mr *ib_mr;
  277. struct ehca_mr *e_mr;
  278. struct ehca_shca *shca =
  279. container_of(pd->device, struct ehca_shca, ib_device);
  280. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  281. struct ehca_mr_pginfo pginfo;
  282. int ret, page_shift;
  283. u32 num_kpages;
  284. u32 num_hwpages;
  285. u64 hwpage_size;
  286. if (!pd) {
  287. ehca_gen_err("bad pd=%p", pd);
  288. return ERR_PTR(-EFAULT);
  289. }
  290. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  291. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  292. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  293. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  294. /*
  295. * Remote Write Access requires Local Write Access
  296. * Remote Atomic Access requires Local Write Access
  297. */
  298. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  299. mr_access_flags);
  300. ib_mr = ERR_PTR(-EINVAL);
  301. goto reg_user_mr_exit0;
  302. }
  303. if (length == 0 || virt + length < virt) {
  304. ehca_err(pd->device, "bad input values: length=%llx "
  305. "virt_base=%llx", length, virt);
  306. ib_mr = ERR_PTR(-EINVAL);
  307. goto reg_user_mr_exit0;
  308. }
  309. e_mr = ehca_mr_new();
  310. if (!e_mr) {
  311. ehca_err(pd->device, "out of memory");
  312. ib_mr = ERR_PTR(-ENOMEM);
  313. goto reg_user_mr_exit0;
  314. }
  315. e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
  316. mr_access_flags, 0);
  317. if (IS_ERR(e_mr->umem)) {
  318. ib_mr = (void *)e_mr->umem;
  319. goto reg_user_mr_exit1;
  320. }
  321. if (e_mr->umem->page_size != PAGE_SIZE) {
  322. ehca_err(pd->device, "page size not supported, "
  323. "e_mr->umem->page_size=%x", e_mr->umem->page_size);
  324. ib_mr = ERR_PTR(-EINVAL);
  325. goto reg_user_mr_exit2;
  326. }
  327. /* determine number of MR pages */
  328. num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
  329. /* select proper hw_pgsize */
  330. page_shift = PAGE_SHIFT;
  331. if (e_mr->umem->hugetlb) {
  332. /* determine page_shift, clamp between 4K and 16M */
  333. page_shift = (fls64(length - 1) + 3) & ~3;
  334. page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
  335. EHCA_MR_PGSHIFT16M);
  336. }
  337. hwpage_size = 1UL << page_shift;
  338. /* now that we have the desired page size, shift until it's
  339. * supported, too. 4K is always supported, so this terminates.
  340. */
  341. while (!(hwpage_size & shca->hca_cap_mr_pgsize))
  342. hwpage_size >>= 4;
  343. reg_user_mr_fallback:
  344. num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
  345. /* register MR on HCA */
  346. memset(&pginfo, 0, sizeof(pginfo));
  347. pginfo.type = EHCA_MR_PGI_USER;
  348. pginfo.hwpage_size = hwpage_size;
  349. pginfo.num_kpages = num_kpages;
  350. pginfo.num_hwpages = num_hwpages;
  351. pginfo.u.usr.region = e_mr->umem;
  352. pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
  353. pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
  354. (&e_mr->umem->chunk_list),
  355. list);
  356. ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
  357. e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
  358. &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
  359. if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
  360. ehca_warn(pd->device, "failed to register mr "
  361. "with hwpage_size=%llx", hwpage_size);
  362. ehca_info(pd->device, "try to register mr with "
  363. "kpage_size=%lx", PAGE_SIZE);
  364. /*
  365. * this means kpages are not contiguous for a hw page
  366. * try kernel page size as fallback solution
  367. */
  368. hwpage_size = PAGE_SIZE;
  369. goto reg_user_mr_fallback;
  370. }
  371. if (ret) {
  372. ib_mr = ERR_PTR(ret);
  373. goto reg_user_mr_exit2;
  374. }
  375. /* successful registration of all pages */
  376. return &e_mr->ib.ib_mr;
  377. reg_user_mr_exit2:
  378. ib_umem_release(e_mr->umem);
  379. reg_user_mr_exit1:
  380. ehca_mr_delete(e_mr);
  381. reg_user_mr_exit0:
  382. if (IS_ERR(ib_mr))
  383. ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
  384. PTR_ERR(ib_mr), pd, mr_access_flags, udata);
  385. return ib_mr;
  386. } /* end ehca_reg_user_mr() */
  387. /*----------------------------------------------------------------------*/
  388. int ehca_rereg_phys_mr(struct ib_mr *mr,
  389. int mr_rereg_mask,
  390. struct ib_pd *pd,
  391. struct ib_phys_buf *phys_buf_array,
  392. int num_phys_buf,
  393. int mr_access_flags,
  394. u64 *iova_start)
  395. {
  396. int ret;
  397. struct ehca_shca *shca =
  398. container_of(mr->device, struct ehca_shca, ib_device);
  399. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  400. u64 new_size;
  401. u64 *new_start;
  402. u32 new_acl;
  403. struct ehca_pd *new_pd;
  404. u32 tmp_lkey, tmp_rkey;
  405. unsigned long sl_flags;
  406. u32 num_kpages = 0;
  407. u32 num_hwpages = 0;
  408. struct ehca_mr_pginfo pginfo;
  409. if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
  410. /* TODO not supported, because PHYP rereg hCall needs pages */
  411. ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
  412. "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
  413. ret = -EINVAL;
  414. goto rereg_phys_mr_exit0;
  415. }
  416. if (mr_rereg_mask & IB_MR_REREG_PD) {
  417. if (!pd) {
  418. ehca_err(mr->device, "rereg with bad pd, pd=%p "
  419. "mr_rereg_mask=%x", pd, mr_rereg_mask);
  420. ret = -EINVAL;
  421. goto rereg_phys_mr_exit0;
  422. }
  423. }
  424. if ((mr_rereg_mask &
  425. ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
  426. (mr_rereg_mask == 0)) {
  427. ret = -EINVAL;
  428. goto rereg_phys_mr_exit0;
  429. }
  430. /* check other parameters */
  431. if (e_mr == shca->maxmr) {
  432. /* should be impossible, however reject to be sure */
  433. ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
  434. "shca->maxmr=%p mr->lkey=%x",
  435. mr, shca->maxmr, mr->lkey);
  436. ret = -EINVAL;
  437. goto rereg_phys_mr_exit0;
  438. }
  439. if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
  440. if (e_mr->flags & EHCA_MR_FLAG_FMR) {
  441. ehca_err(mr->device, "not supported for FMR, mr=%p "
  442. "flags=%x", mr, e_mr->flags);
  443. ret = -EINVAL;
  444. goto rereg_phys_mr_exit0;
  445. }
  446. if (!phys_buf_array || num_phys_buf <= 0) {
  447. ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
  448. " phys_buf_array=%p num_phys_buf=%x",
  449. mr_rereg_mask, phys_buf_array, num_phys_buf);
  450. ret = -EINVAL;
  451. goto rereg_phys_mr_exit0;
  452. }
  453. }
  454. if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
  455. (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  456. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  457. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  458. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
  459. /*
  460. * Remote Write Access requires Local Write Access
  461. * Remote Atomic Access requires Local Write Access
  462. */
  463. ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
  464. "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
  465. ret = -EINVAL;
  466. goto rereg_phys_mr_exit0;
  467. }
  468. /* set requested values dependent on rereg request */
  469. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  470. new_start = e_mr->start;
  471. new_size = e_mr->size;
  472. new_acl = e_mr->acl;
  473. new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
  474. if (mr_rereg_mask & IB_MR_REREG_TRANS) {
  475. u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
  476. new_start = iova_start; /* change address */
  477. /* check physical buffer list and calculate size */
  478. ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
  479. num_phys_buf, iova_start,
  480. &new_size);
  481. if (ret)
  482. goto rereg_phys_mr_exit1;
  483. if ((new_size == 0) ||
  484. (((u64)iova_start + new_size) < (u64)iova_start)) {
  485. ehca_err(mr->device, "bad input values: new_size=%llx "
  486. "iova_start=%p", new_size, iova_start);
  487. ret = -EINVAL;
  488. goto rereg_phys_mr_exit1;
  489. }
  490. num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
  491. new_size, PAGE_SIZE);
  492. num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
  493. new_size, hw_pgsize);
  494. memset(&pginfo, 0, sizeof(pginfo));
  495. pginfo.type = EHCA_MR_PGI_PHYS;
  496. pginfo.num_kpages = num_kpages;
  497. pginfo.hwpage_size = hw_pgsize;
  498. pginfo.num_hwpages = num_hwpages;
  499. pginfo.u.phy.num_phys_buf = num_phys_buf;
  500. pginfo.u.phy.phys_buf_array = phys_buf_array;
  501. pginfo.next_hwpage =
  502. ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
  503. }
  504. if (mr_rereg_mask & IB_MR_REREG_ACCESS)
  505. new_acl = mr_access_flags;
  506. if (mr_rereg_mask & IB_MR_REREG_PD)
  507. new_pd = container_of(pd, struct ehca_pd, ib_pd);
  508. ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
  509. new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  510. if (ret)
  511. goto rereg_phys_mr_exit1;
  512. /* successful reregistration */
  513. if (mr_rereg_mask & IB_MR_REREG_PD)
  514. mr->pd = pd;
  515. mr->lkey = tmp_lkey;
  516. mr->rkey = tmp_rkey;
  517. rereg_phys_mr_exit1:
  518. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  519. rereg_phys_mr_exit0:
  520. if (ret)
  521. ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p "
  522. "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
  523. "iova_start=%p",
  524. ret, mr, mr_rereg_mask, pd, phys_buf_array,
  525. num_phys_buf, mr_access_flags, iova_start);
  526. return ret;
  527. } /* end ehca_rereg_phys_mr() */
  528. /*----------------------------------------------------------------------*/
  529. int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  530. {
  531. int ret = 0;
  532. u64 h_ret;
  533. struct ehca_shca *shca =
  534. container_of(mr->device, struct ehca_shca, ib_device);
  535. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  536. unsigned long sl_flags;
  537. struct ehca_mr_hipzout_parms hipzout;
  538. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  539. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  540. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  541. ret = -EINVAL;
  542. goto query_mr_exit0;
  543. }
  544. memset(mr_attr, 0, sizeof(struct ib_mr_attr));
  545. spin_lock_irqsave(&e_mr->mrlock, sl_flags);
  546. h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
  547. if (h_ret != H_SUCCESS) {
  548. ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
  549. "hca_hndl=%llx mr_hndl=%llx lkey=%x",
  550. h_ret, mr, shca->ipz_hca_handle.handle,
  551. e_mr->ipz_mr_handle.handle, mr->lkey);
  552. ret = ehca2ib_return_code(h_ret);
  553. goto query_mr_exit1;
  554. }
  555. mr_attr->pd = mr->pd;
  556. mr_attr->device_virt_addr = hipzout.vaddr;
  557. mr_attr->size = hipzout.len;
  558. mr_attr->lkey = hipzout.lkey;
  559. mr_attr->rkey = hipzout.rkey;
  560. ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
  561. query_mr_exit1:
  562. spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
  563. query_mr_exit0:
  564. if (ret)
  565. ehca_err(mr->device, "ret=%i mr=%p mr_attr=%p",
  566. ret, mr, mr_attr);
  567. return ret;
  568. } /* end ehca_query_mr() */
  569. /*----------------------------------------------------------------------*/
  570. int ehca_dereg_mr(struct ib_mr *mr)
  571. {
  572. int ret = 0;
  573. u64 h_ret;
  574. struct ehca_shca *shca =
  575. container_of(mr->device, struct ehca_shca, ib_device);
  576. struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
  577. if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
  578. ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
  579. "e_mr->flags=%x", mr, e_mr, e_mr->flags);
  580. ret = -EINVAL;
  581. goto dereg_mr_exit0;
  582. } else if (e_mr == shca->maxmr) {
  583. /* should be impossible, however reject to be sure */
  584. ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
  585. "shca->maxmr=%p mr->lkey=%x",
  586. mr, shca->maxmr, mr->lkey);
  587. ret = -EINVAL;
  588. goto dereg_mr_exit0;
  589. }
  590. /* TODO: BUSY: MR still has bound window(s) */
  591. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  592. if (h_ret != H_SUCCESS) {
  593. ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
  594. "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
  595. h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
  596. e_mr->ipz_mr_handle.handle, mr->lkey);
  597. ret = ehca2ib_return_code(h_ret);
  598. goto dereg_mr_exit0;
  599. }
  600. if (e_mr->umem)
  601. ib_umem_release(e_mr->umem);
  602. /* successful deregistration */
  603. ehca_mr_delete(e_mr);
  604. dereg_mr_exit0:
  605. if (ret)
  606. ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
  607. return ret;
  608. } /* end ehca_dereg_mr() */
  609. /*----------------------------------------------------------------------*/
  610. struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
  611. {
  612. struct ib_mw *ib_mw;
  613. u64 h_ret;
  614. struct ehca_mw *e_mw;
  615. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  616. struct ehca_shca *shca =
  617. container_of(pd->device, struct ehca_shca, ib_device);
  618. struct ehca_mw_hipzout_parms hipzout;
  619. e_mw = ehca_mw_new();
  620. if (!e_mw) {
  621. ib_mw = ERR_PTR(-ENOMEM);
  622. goto alloc_mw_exit0;
  623. }
  624. h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
  625. e_pd->fw_pd, &hipzout);
  626. if (h_ret != H_SUCCESS) {
  627. ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
  628. "shca=%p hca_hndl=%llx mw=%p",
  629. h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
  630. ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
  631. goto alloc_mw_exit1;
  632. }
  633. /* successful MW allocation */
  634. e_mw->ipz_mw_handle = hipzout.handle;
  635. e_mw->ib_mw.rkey = hipzout.rkey;
  636. return &e_mw->ib_mw;
  637. alloc_mw_exit1:
  638. ehca_mw_delete(e_mw);
  639. alloc_mw_exit0:
  640. if (IS_ERR(ib_mw))
  641. ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
  642. return ib_mw;
  643. } /* end ehca_alloc_mw() */
  644. /*----------------------------------------------------------------------*/
  645. int ehca_bind_mw(struct ib_qp *qp,
  646. struct ib_mw *mw,
  647. struct ib_mw_bind *mw_bind)
  648. {
  649. /* TODO: not supported up to now */
  650. ehca_gen_err("bind MW currently not supported by HCAD");
  651. return -EPERM;
  652. } /* end ehca_bind_mw() */
  653. /*----------------------------------------------------------------------*/
  654. int ehca_dealloc_mw(struct ib_mw *mw)
  655. {
  656. u64 h_ret;
  657. struct ehca_shca *shca =
  658. container_of(mw->device, struct ehca_shca, ib_device);
  659. struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
  660. h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
  661. if (h_ret != H_SUCCESS) {
  662. ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
  663. "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
  664. h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
  665. e_mw->ipz_mw_handle.handle);
  666. return ehca2ib_return_code(h_ret);
  667. }
  668. /* successful deallocation */
  669. ehca_mw_delete(e_mw);
  670. return 0;
  671. } /* end ehca_dealloc_mw() */
  672. /*----------------------------------------------------------------------*/
  673. struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
  674. int mr_access_flags,
  675. struct ib_fmr_attr *fmr_attr)
  676. {
  677. struct ib_fmr *ib_fmr;
  678. struct ehca_shca *shca =
  679. container_of(pd->device, struct ehca_shca, ib_device);
  680. struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
  681. struct ehca_mr *e_fmr;
  682. int ret;
  683. u32 tmp_lkey, tmp_rkey;
  684. struct ehca_mr_pginfo pginfo;
  685. u64 hw_pgsize;
  686. /* check other parameters */
  687. if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
  688. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
  689. ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
  690. !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
  691. /*
  692. * Remote Write Access requires Local Write Access
  693. * Remote Atomic Access requires Local Write Access
  694. */
  695. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  696. mr_access_flags);
  697. ib_fmr = ERR_PTR(-EINVAL);
  698. goto alloc_fmr_exit0;
  699. }
  700. if (mr_access_flags & IB_ACCESS_MW_BIND) {
  701. ehca_err(pd->device, "bad input values: mr_access_flags=%x",
  702. mr_access_flags);
  703. ib_fmr = ERR_PTR(-EINVAL);
  704. goto alloc_fmr_exit0;
  705. }
  706. if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
  707. ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
  708. "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
  709. fmr_attr->max_pages, fmr_attr->max_maps,
  710. fmr_attr->page_shift);
  711. ib_fmr = ERR_PTR(-EINVAL);
  712. goto alloc_fmr_exit0;
  713. }
  714. hw_pgsize = 1 << fmr_attr->page_shift;
  715. if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
  716. ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
  717. fmr_attr->page_shift);
  718. ib_fmr = ERR_PTR(-EINVAL);
  719. goto alloc_fmr_exit0;
  720. }
  721. e_fmr = ehca_mr_new();
  722. if (!e_fmr) {
  723. ib_fmr = ERR_PTR(-ENOMEM);
  724. goto alloc_fmr_exit0;
  725. }
  726. e_fmr->flags |= EHCA_MR_FLAG_FMR;
  727. /* register MR on HCA */
  728. memset(&pginfo, 0, sizeof(pginfo));
  729. pginfo.hwpage_size = hw_pgsize;
  730. /*
  731. * pginfo.num_hwpages==0, ie register_rpages() will not be called
  732. * but deferred to map_phys_fmr()
  733. */
  734. ret = ehca_reg_mr(shca, e_fmr, NULL,
  735. fmr_attr->max_pages * (1 << fmr_attr->page_shift),
  736. mr_access_flags, e_pd, &pginfo,
  737. &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
  738. if (ret) {
  739. ib_fmr = ERR_PTR(ret);
  740. goto alloc_fmr_exit1;
  741. }
  742. /* successful */
  743. e_fmr->hwpage_size = hw_pgsize;
  744. e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
  745. e_fmr->fmr_max_pages = fmr_attr->max_pages;
  746. e_fmr->fmr_max_maps = fmr_attr->max_maps;
  747. e_fmr->fmr_map_cnt = 0;
  748. return &e_fmr->ib.ib_fmr;
  749. alloc_fmr_exit1:
  750. ehca_mr_delete(e_fmr);
  751. alloc_fmr_exit0:
  752. return ib_fmr;
  753. } /* end ehca_alloc_fmr() */
  754. /*----------------------------------------------------------------------*/
  755. int ehca_map_phys_fmr(struct ib_fmr *fmr,
  756. u64 *page_list,
  757. int list_len,
  758. u64 iova)
  759. {
  760. int ret;
  761. struct ehca_shca *shca =
  762. container_of(fmr->device, struct ehca_shca, ib_device);
  763. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  764. struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
  765. struct ehca_mr_pginfo pginfo;
  766. u32 tmp_lkey, tmp_rkey;
  767. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  768. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  769. e_fmr, e_fmr->flags);
  770. ret = -EINVAL;
  771. goto map_phys_fmr_exit0;
  772. }
  773. ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
  774. if (ret)
  775. goto map_phys_fmr_exit0;
  776. if (iova % e_fmr->fmr_page_size) {
  777. /* only whole-numbered pages */
  778. ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
  779. iova, e_fmr->fmr_page_size);
  780. ret = -EINVAL;
  781. goto map_phys_fmr_exit0;
  782. }
  783. if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
  784. /* HCAD does not limit the maps, however trace this anyway */
  785. ehca_info(fmr->device, "map limit exceeded, fmr=%p "
  786. "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
  787. fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
  788. }
  789. memset(&pginfo, 0, sizeof(pginfo));
  790. pginfo.type = EHCA_MR_PGI_FMR;
  791. pginfo.num_kpages = list_len;
  792. pginfo.hwpage_size = e_fmr->hwpage_size;
  793. pginfo.num_hwpages =
  794. list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
  795. pginfo.u.fmr.page_list = page_list;
  796. pginfo.next_hwpage =
  797. (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
  798. pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
  799. ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
  800. list_len * e_fmr->fmr_page_size,
  801. e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
  802. if (ret)
  803. goto map_phys_fmr_exit0;
  804. /* successful reregistration */
  805. e_fmr->fmr_map_cnt++;
  806. e_fmr->ib.ib_fmr.lkey = tmp_lkey;
  807. e_fmr->ib.ib_fmr.rkey = tmp_rkey;
  808. return 0;
  809. map_phys_fmr_exit0:
  810. if (ret)
  811. ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
  812. "iova=%llx", ret, fmr, page_list, list_len, iova);
  813. return ret;
  814. } /* end ehca_map_phys_fmr() */
  815. /*----------------------------------------------------------------------*/
  816. int ehca_unmap_fmr(struct list_head *fmr_list)
  817. {
  818. int ret = 0;
  819. struct ib_fmr *ib_fmr;
  820. struct ehca_shca *shca = NULL;
  821. struct ehca_shca *prev_shca;
  822. struct ehca_mr *e_fmr;
  823. u32 num_fmr = 0;
  824. u32 unmap_fmr_cnt = 0;
  825. /* check all FMR belong to same SHCA, and check internal flag */
  826. list_for_each_entry(ib_fmr, fmr_list, list) {
  827. prev_shca = shca;
  828. if (!ib_fmr) {
  829. ehca_gen_err("bad fmr=%p in list", ib_fmr);
  830. ret = -EINVAL;
  831. goto unmap_fmr_exit0;
  832. }
  833. shca = container_of(ib_fmr->device, struct ehca_shca,
  834. ib_device);
  835. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  836. if ((shca != prev_shca) && prev_shca) {
  837. ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
  838. "prev_shca=%p e_fmr=%p",
  839. shca, prev_shca, e_fmr);
  840. ret = -EINVAL;
  841. goto unmap_fmr_exit0;
  842. }
  843. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  844. ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
  845. "e_fmr->flags=%x", e_fmr, e_fmr->flags);
  846. ret = -EINVAL;
  847. goto unmap_fmr_exit0;
  848. }
  849. num_fmr++;
  850. }
  851. /* loop over all FMRs to unmap */
  852. list_for_each_entry(ib_fmr, fmr_list, list) {
  853. unmap_fmr_cnt++;
  854. e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
  855. shca = container_of(ib_fmr->device, struct ehca_shca,
  856. ib_device);
  857. ret = ehca_unmap_one_fmr(shca, e_fmr);
  858. if (ret) {
  859. /* unmap failed, stop unmapping of rest of FMRs */
  860. ehca_err(&shca->ib_device, "unmap of one FMR failed, "
  861. "stop rest, e_fmr=%p num_fmr=%x "
  862. "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
  863. unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
  864. goto unmap_fmr_exit0;
  865. }
  866. }
  867. unmap_fmr_exit0:
  868. if (ret)
  869. ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
  870. ret, fmr_list, num_fmr, unmap_fmr_cnt);
  871. return ret;
  872. } /* end ehca_unmap_fmr() */
  873. /*----------------------------------------------------------------------*/
  874. int ehca_dealloc_fmr(struct ib_fmr *fmr)
  875. {
  876. int ret;
  877. u64 h_ret;
  878. struct ehca_shca *shca =
  879. container_of(fmr->device, struct ehca_shca, ib_device);
  880. struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
  881. if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
  882. ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
  883. e_fmr, e_fmr->flags);
  884. ret = -EINVAL;
  885. goto free_fmr_exit0;
  886. }
  887. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  888. if (h_ret != H_SUCCESS) {
  889. ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
  890. "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
  891. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  892. e_fmr->ipz_mr_handle.handle, fmr->lkey);
  893. ret = ehca2ib_return_code(h_ret);
  894. goto free_fmr_exit0;
  895. }
  896. /* successful deregistration */
  897. ehca_mr_delete(e_fmr);
  898. return 0;
  899. free_fmr_exit0:
  900. if (ret)
  901. ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
  902. return ret;
  903. } /* end ehca_dealloc_fmr() */
  904. /*----------------------------------------------------------------------*/
  905. static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
  906. struct ehca_mr *e_mr,
  907. struct ehca_mr_pginfo *pginfo);
  908. int ehca_reg_mr(struct ehca_shca *shca,
  909. struct ehca_mr *e_mr,
  910. u64 *iova_start,
  911. u64 size,
  912. int acl,
  913. struct ehca_pd *e_pd,
  914. struct ehca_mr_pginfo *pginfo,
  915. u32 *lkey, /*OUT*/
  916. u32 *rkey, /*OUT*/
  917. enum ehca_reg_type reg_type)
  918. {
  919. int ret;
  920. u64 h_ret;
  921. u32 hipz_acl;
  922. struct ehca_mr_hipzout_parms hipzout;
  923. ehca_mrmw_map_acl(acl, &hipz_acl);
  924. ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
  925. if (ehca_use_hp_mr == 1)
  926. hipz_acl |= 0x00000001;
  927. h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
  928. (u64)iova_start, size, hipz_acl,
  929. e_pd->fw_pd, &hipzout);
  930. if (h_ret != H_SUCCESS) {
  931. ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
  932. "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
  933. ret = ehca2ib_return_code(h_ret);
  934. goto ehca_reg_mr_exit0;
  935. }
  936. e_mr->ipz_mr_handle = hipzout.handle;
  937. if (reg_type == EHCA_REG_BUSMAP_MR)
  938. ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
  939. else if (reg_type == EHCA_REG_MR)
  940. ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
  941. else
  942. ret = -EINVAL;
  943. if (ret)
  944. goto ehca_reg_mr_exit1;
  945. /* successful registration */
  946. e_mr->num_kpages = pginfo->num_kpages;
  947. e_mr->num_hwpages = pginfo->num_hwpages;
  948. e_mr->hwpage_size = pginfo->hwpage_size;
  949. e_mr->start = iova_start;
  950. e_mr->size = size;
  951. e_mr->acl = acl;
  952. *lkey = hipzout.lkey;
  953. *rkey = hipzout.rkey;
  954. return 0;
  955. ehca_reg_mr_exit1:
  956. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  957. if (h_ret != H_SUCCESS) {
  958. ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
  959. "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
  960. "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
  961. h_ret, shca, e_mr, iova_start, size, acl, e_pd,
  962. hipzout.lkey, pginfo, pginfo->num_kpages,
  963. pginfo->num_hwpages, ret);
  964. ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
  965. "not recoverable");
  966. }
  967. ehca_reg_mr_exit0:
  968. if (ret)
  969. ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
  970. "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
  971. "num_kpages=%llx num_hwpages=%llx",
  972. ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
  973. pginfo->num_kpages, pginfo->num_hwpages);
  974. return ret;
  975. } /* end ehca_reg_mr() */
  976. /*----------------------------------------------------------------------*/
  977. int ehca_reg_mr_rpages(struct ehca_shca *shca,
  978. struct ehca_mr *e_mr,
  979. struct ehca_mr_pginfo *pginfo)
  980. {
  981. int ret = 0;
  982. u64 h_ret;
  983. u32 rnum;
  984. u64 rpage;
  985. u32 i;
  986. u64 *kpage;
  987. if (!pginfo->num_hwpages) /* in case of fmr */
  988. return 0;
  989. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  990. if (!kpage) {
  991. ehca_err(&shca->ib_device, "kpage alloc failed");
  992. ret = -ENOMEM;
  993. goto ehca_reg_mr_rpages_exit0;
  994. }
  995. /* max MAX_RPAGES ehca mr pages per register call */
  996. for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
  997. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  998. rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
  999. if (rnum == 0)
  1000. rnum = MAX_RPAGES; /* last shot is full */
  1001. } else
  1002. rnum = MAX_RPAGES;
  1003. ret = ehca_set_pagebuf(pginfo, rnum, kpage);
  1004. if (ret) {
  1005. ehca_err(&shca->ib_device, "ehca_set_pagebuf "
  1006. "bad rc, ret=%i rnum=%x kpage=%p",
  1007. ret, rnum, kpage);
  1008. goto ehca_reg_mr_rpages_exit1;
  1009. }
  1010. if (rnum > 1) {
  1011. rpage = virt_to_abs(kpage);
  1012. if (!rpage) {
  1013. ehca_err(&shca->ib_device, "kpage=%p i=%x",
  1014. kpage, i);
  1015. ret = -EFAULT;
  1016. goto ehca_reg_mr_rpages_exit1;
  1017. }
  1018. } else
  1019. rpage = *kpage;
  1020. h_ret = hipz_h_register_rpage_mr(
  1021. shca->ipz_hca_handle, e_mr,
  1022. ehca_encode_hwpage_size(pginfo->hwpage_size),
  1023. 0, rpage, rnum);
  1024. if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
  1025. /*
  1026. * check for 'registration complete'==H_SUCCESS
  1027. * and for 'page registered'==H_PAGE_REGISTERED
  1028. */
  1029. if (h_ret != H_SUCCESS) {
  1030. ehca_err(&shca->ib_device, "last "
  1031. "hipz_reg_rpage_mr failed, h_ret=%lli "
  1032. "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
  1033. " lkey=%x", h_ret, e_mr, i,
  1034. shca->ipz_hca_handle.handle,
  1035. e_mr->ipz_mr_handle.handle,
  1036. e_mr->ib.ib_mr.lkey);
  1037. ret = ehca2ib_return_code(h_ret);
  1038. break;
  1039. } else
  1040. ret = 0;
  1041. } else if (h_ret != H_PAGE_REGISTERED) {
  1042. ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
  1043. "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
  1044. "mr_hndl=%llx", h_ret, e_mr, i,
  1045. e_mr->ib.ib_mr.lkey,
  1046. shca->ipz_hca_handle.handle,
  1047. e_mr->ipz_mr_handle.handle);
  1048. ret = ehca2ib_return_code(h_ret);
  1049. break;
  1050. } else
  1051. ret = 0;
  1052. } /* end for(i) */
  1053. ehca_reg_mr_rpages_exit1:
  1054. ehca_free_fw_ctrlblock(kpage);
  1055. ehca_reg_mr_rpages_exit0:
  1056. if (ret)
  1057. ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
  1058. "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
  1059. pginfo, pginfo->num_kpages, pginfo->num_hwpages);
  1060. return ret;
  1061. } /* end ehca_reg_mr_rpages() */
  1062. /*----------------------------------------------------------------------*/
  1063. inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
  1064. struct ehca_mr *e_mr,
  1065. u64 *iova_start,
  1066. u64 size,
  1067. u32 acl,
  1068. struct ehca_pd *e_pd,
  1069. struct ehca_mr_pginfo *pginfo,
  1070. u32 *lkey, /*OUT*/
  1071. u32 *rkey) /*OUT*/
  1072. {
  1073. int ret;
  1074. u64 h_ret;
  1075. u32 hipz_acl;
  1076. u64 *kpage;
  1077. u64 rpage;
  1078. struct ehca_mr_pginfo pginfo_save;
  1079. struct ehca_mr_hipzout_parms hipzout;
  1080. ehca_mrmw_map_acl(acl, &hipz_acl);
  1081. ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
  1082. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  1083. if (!kpage) {
  1084. ehca_err(&shca->ib_device, "kpage alloc failed");
  1085. ret = -ENOMEM;
  1086. goto ehca_rereg_mr_rereg1_exit0;
  1087. }
  1088. pginfo_save = *pginfo;
  1089. ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
  1090. if (ret) {
  1091. ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
  1092. "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
  1093. "kpage=%p", e_mr, pginfo, pginfo->type,
  1094. pginfo->num_kpages, pginfo->num_hwpages, kpage);
  1095. goto ehca_rereg_mr_rereg1_exit1;
  1096. }
  1097. rpage = virt_to_abs(kpage);
  1098. if (!rpage) {
  1099. ehca_err(&shca->ib_device, "kpage=%p", kpage);
  1100. ret = -EFAULT;
  1101. goto ehca_rereg_mr_rereg1_exit1;
  1102. }
  1103. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
  1104. (u64)iova_start, size, hipz_acl,
  1105. e_pd->fw_pd, rpage, &hipzout);
  1106. if (h_ret != H_SUCCESS) {
  1107. /*
  1108. * reregistration unsuccessful, try it again with the 3 hCalls,
  1109. * e.g. this is required in case H_MR_CONDITION
  1110. * (MW bound or MR is shared)
  1111. */
  1112. ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
  1113. "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
  1114. *pginfo = pginfo_save;
  1115. ret = -EAGAIN;
  1116. } else if ((u64 *)hipzout.vaddr != iova_start) {
  1117. ehca_err(&shca->ib_device, "PHYP changed iova_start in "
  1118. "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
  1119. "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
  1120. hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
  1121. e_mr->ib.ib_mr.lkey, hipzout.lkey);
  1122. ret = -EFAULT;
  1123. } else {
  1124. /*
  1125. * successful reregistration
  1126. * note: start and start_out are identical for eServer HCAs
  1127. */
  1128. e_mr->num_kpages = pginfo->num_kpages;
  1129. e_mr->num_hwpages = pginfo->num_hwpages;
  1130. e_mr->hwpage_size = pginfo->hwpage_size;
  1131. e_mr->start = iova_start;
  1132. e_mr->size = size;
  1133. e_mr->acl = acl;
  1134. *lkey = hipzout.lkey;
  1135. *rkey = hipzout.rkey;
  1136. }
  1137. ehca_rereg_mr_rereg1_exit1:
  1138. ehca_free_fw_ctrlblock(kpage);
  1139. ehca_rereg_mr_rereg1_exit0:
  1140. if ( ret && (ret != -EAGAIN) )
  1141. ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
  1142. "pginfo=%p num_kpages=%llx num_hwpages=%llx",
  1143. ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
  1144. pginfo->num_hwpages);
  1145. return ret;
  1146. } /* end ehca_rereg_mr_rereg1() */
  1147. /*----------------------------------------------------------------------*/
  1148. int ehca_rereg_mr(struct ehca_shca *shca,
  1149. struct ehca_mr *e_mr,
  1150. u64 *iova_start,
  1151. u64 size,
  1152. int acl,
  1153. struct ehca_pd *e_pd,
  1154. struct ehca_mr_pginfo *pginfo,
  1155. u32 *lkey,
  1156. u32 *rkey)
  1157. {
  1158. int ret = 0;
  1159. u64 h_ret;
  1160. int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
  1161. int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
  1162. /* first determine reregistration hCall(s) */
  1163. if ((pginfo->num_hwpages > MAX_RPAGES) ||
  1164. (e_mr->num_hwpages > MAX_RPAGES) ||
  1165. (pginfo->num_hwpages > e_mr->num_hwpages)) {
  1166. ehca_dbg(&shca->ib_device, "Rereg3 case, "
  1167. "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
  1168. pginfo->num_hwpages, e_mr->num_hwpages);
  1169. rereg_1_hcall = 0;
  1170. rereg_3_hcall = 1;
  1171. }
  1172. if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
  1173. rereg_1_hcall = 0;
  1174. rereg_3_hcall = 1;
  1175. e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
  1176. ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
  1177. e_mr);
  1178. }
  1179. if (rereg_1_hcall) {
  1180. ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
  1181. acl, e_pd, pginfo, lkey, rkey);
  1182. if (ret) {
  1183. if (ret == -EAGAIN)
  1184. rereg_3_hcall = 1;
  1185. else
  1186. goto ehca_rereg_mr_exit0;
  1187. }
  1188. }
  1189. if (rereg_3_hcall) {
  1190. struct ehca_mr save_mr;
  1191. /* first deregister old MR */
  1192. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
  1193. if (h_ret != H_SUCCESS) {
  1194. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1195. "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
  1196. "mr->lkey=%x",
  1197. h_ret, e_mr, shca->ipz_hca_handle.handle,
  1198. e_mr->ipz_mr_handle.handle,
  1199. e_mr->ib.ib_mr.lkey);
  1200. ret = ehca2ib_return_code(h_ret);
  1201. goto ehca_rereg_mr_exit0;
  1202. }
  1203. /* clean ehca_mr_t, without changing struct ib_mr and lock */
  1204. save_mr = *e_mr;
  1205. ehca_mr_deletenew(e_mr);
  1206. /* set some MR values */
  1207. e_mr->flags = save_mr.flags;
  1208. e_mr->hwpage_size = save_mr.hwpage_size;
  1209. e_mr->fmr_page_size = save_mr.fmr_page_size;
  1210. e_mr->fmr_max_pages = save_mr.fmr_max_pages;
  1211. e_mr->fmr_max_maps = save_mr.fmr_max_maps;
  1212. e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
  1213. ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
  1214. e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
  1215. if (ret) {
  1216. u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
  1217. memcpy(&e_mr->flags, &(save_mr.flags),
  1218. sizeof(struct ehca_mr) - offset);
  1219. goto ehca_rereg_mr_exit0;
  1220. }
  1221. }
  1222. ehca_rereg_mr_exit0:
  1223. if (ret)
  1224. ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
  1225. "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
  1226. "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
  1227. "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
  1228. acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
  1229. rereg_1_hcall, rereg_3_hcall);
  1230. return ret;
  1231. } /* end ehca_rereg_mr() */
  1232. /*----------------------------------------------------------------------*/
  1233. int ehca_unmap_one_fmr(struct ehca_shca *shca,
  1234. struct ehca_mr *e_fmr)
  1235. {
  1236. int ret = 0;
  1237. u64 h_ret;
  1238. struct ehca_pd *e_pd =
  1239. container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
  1240. struct ehca_mr save_fmr;
  1241. u32 tmp_lkey, tmp_rkey;
  1242. struct ehca_mr_pginfo pginfo;
  1243. struct ehca_mr_hipzout_parms hipzout;
  1244. struct ehca_mr save_mr;
  1245. if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
  1246. /*
  1247. * note: after using rereg hcall with len=0,
  1248. * rereg hcall must be used again for registering pages
  1249. */
  1250. h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
  1251. 0, 0, e_pd->fw_pd, 0, &hipzout);
  1252. if (h_ret == H_SUCCESS) {
  1253. /* successful reregistration */
  1254. e_fmr->start = NULL;
  1255. e_fmr->size = 0;
  1256. tmp_lkey = hipzout.lkey;
  1257. tmp_rkey = hipzout.rkey;
  1258. return 0;
  1259. }
  1260. /*
  1261. * should not happen, because length checked above,
  1262. * FMRs are not shared and no MW bound to FMRs
  1263. */
  1264. ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
  1265. "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
  1266. "mr_hndl=%llx lkey=%x lkey_out=%x",
  1267. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1268. e_fmr->ipz_mr_handle.handle,
  1269. e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
  1270. /* try free and rereg */
  1271. }
  1272. /* first free old FMR */
  1273. h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
  1274. if (h_ret != H_SUCCESS) {
  1275. ehca_err(&shca->ib_device, "hipz_free_mr failed, "
  1276. "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
  1277. "lkey=%x",
  1278. h_ret, e_fmr, shca->ipz_hca_handle.handle,
  1279. e_fmr->ipz_mr_handle.handle,
  1280. e_fmr->ib.ib_fmr.lkey);
  1281. ret = ehca2ib_return_code(h_ret);
  1282. goto ehca_unmap_one_fmr_exit0;
  1283. }
  1284. /* clean ehca_mr_t, without changing lock */
  1285. save_fmr = *e_fmr;
  1286. ehca_mr_deletenew(e_fmr);
  1287. /* set some MR values */
  1288. e_fmr->flags = save_fmr.flags;
  1289. e_fmr->hwpage_size = save_fmr.hwpage_size;
  1290. e_fmr->fmr_page_size = save_fmr.fmr_page_size;
  1291. e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
  1292. e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
  1293. e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
  1294. e_fmr->acl = save_fmr.acl;
  1295. memset(&pginfo, 0, sizeof(pginfo));
  1296. pginfo.type = EHCA_MR_PGI_FMR;
  1297. ret = ehca_reg_mr(shca, e_fmr, NULL,
  1298. (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
  1299. e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
  1300. &tmp_rkey, EHCA_REG_MR);
  1301. if (ret) {
  1302. u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
  1303. memcpy(&e_fmr->flags, &(save_mr.flags),
  1304. sizeof(struct ehca_mr) - offset);
  1305. }
  1306. ehca_unmap_one_fmr_exit0:
  1307. if (ret)
  1308. ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
  1309. "fmr_max_pages=%x",
  1310. ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
  1311. return ret;
  1312. } /* end ehca_unmap_one_fmr() */
  1313. /*----------------------------------------------------------------------*/
  1314. int ehca_reg_smr(struct ehca_shca *shca,
  1315. struct ehca_mr *e_origmr,
  1316. struct ehca_mr *e_newmr,
  1317. u64 *iova_start,
  1318. int acl,
  1319. struct ehca_pd *e_pd,
  1320. u32 *lkey, /*OUT*/
  1321. u32 *rkey) /*OUT*/
  1322. {
  1323. int ret = 0;
  1324. u64 h_ret;
  1325. u32 hipz_acl;
  1326. struct ehca_mr_hipzout_parms hipzout;
  1327. ehca_mrmw_map_acl(acl, &hipz_acl);
  1328. ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
  1329. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1330. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1331. &hipzout);
  1332. if (h_ret != H_SUCCESS) {
  1333. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
  1334. "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
  1335. "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
  1336. h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
  1337. shca->ipz_hca_handle.handle,
  1338. e_origmr->ipz_mr_handle.handle,
  1339. e_origmr->ib.ib_mr.lkey);
  1340. ret = ehca2ib_return_code(h_ret);
  1341. goto ehca_reg_smr_exit0;
  1342. }
  1343. /* successful registration */
  1344. e_newmr->num_kpages = e_origmr->num_kpages;
  1345. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1346. e_newmr->hwpage_size = e_origmr->hwpage_size;
  1347. e_newmr->start = iova_start;
  1348. e_newmr->size = e_origmr->size;
  1349. e_newmr->acl = acl;
  1350. e_newmr->ipz_mr_handle = hipzout.handle;
  1351. *lkey = hipzout.lkey;
  1352. *rkey = hipzout.rkey;
  1353. return 0;
  1354. ehca_reg_smr_exit0:
  1355. if (ret)
  1356. ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
  1357. "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
  1358. ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
  1359. return ret;
  1360. } /* end ehca_reg_smr() */
  1361. /*----------------------------------------------------------------------*/
  1362. static inline void *ehca_calc_sectbase(int top, int dir, int idx)
  1363. {
  1364. unsigned long ret = idx;
  1365. ret |= dir << EHCA_DIR_INDEX_SHIFT;
  1366. ret |= top << EHCA_TOP_INDEX_SHIFT;
  1367. return abs_to_virt(ret << SECTION_SIZE_BITS);
  1368. }
  1369. #define ehca_bmap_valid(entry) \
  1370. ((u64)entry != (u64)EHCA_INVAL_ADDR)
  1371. static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
  1372. struct ehca_shca *shca, struct ehca_mr *mr,
  1373. struct ehca_mr_pginfo *pginfo)
  1374. {
  1375. u64 h_ret = 0;
  1376. unsigned long page = 0;
  1377. u64 rpage = virt_to_abs(kpage);
  1378. int page_count;
  1379. void *sectbase = ehca_calc_sectbase(top, dir, idx);
  1380. if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
  1381. ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
  1382. "hwpage_size does not fit to "
  1383. "section start address");
  1384. }
  1385. page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
  1386. while (page < page_count) {
  1387. u64 rnum;
  1388. for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
  1389. rnum++) {
  1390. void *pg = sectbase + ((page++) * pginfo->hwpage_size);
  1391. kpage[rnum] = virt_to_abs(pg);
  1392. }
  1393. h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
  1394. ehca_encode_hwpage_size(pginfo->hwpage_size),
  1395. 0, rpage, rnum);
  1396. if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
  1397. ehca_err(&shca->ib_device, "register_rpage_mr failed");
  1398. return h_ret;
  1399. }
  1400. }
  1401. return h_ret;
  1402. }
  1403. static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
  1404. struct ehca_shca *shca, struct ehca_mr *mr,
  1405. struct ehca_mr_pginfo *pginfo)
  1406. {
  1407. u64 hret = H_SUCCESS;
  1408. int idx;
  1409. for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
  1410. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
  1411. continue;
  1412. hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
  1413. pginfo);
  1414. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  1415. return hret;
  1416. }
  1417. return hret;
  1418. }
  1419. static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
  1420. struct ehca_mr *mr,
  1421. struct ehca_mr_pginfo *pginfo)
  1422. {
  1423. u64 hret = H_SUCCESS;
  1424. int dir;
  1425. for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
  1426. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
  1427. continue;
  1428. hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
  1429. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  1430. return hret;
  1431. }
  1432. return hret;
  1433. }
  1434. /* register internal max-MR to internal SHCA */
  1435. int ehca_reg_internal_maxmr(
  1436. struct ehca_shca *shca,
  1437. struct ehca_pd *e_pd,
  1438. struct ehca_mr **e_maxmr) /*OUT*/
  1439. {
  1440. int ret;
  1441. struct ehca_mr *e_mr;
  1442. u64 *iova_start;
  1443. u64 size_maxmr;
  1444. struct ehca_mr_pginfo pginfo;
  1445. struct ib_phys_buf ib_pbuf;
  1446. u32 num_kpages;
  1447. u32 num_hwpages;
  1448. u64 hw_pgsize;
  1449. if (!ehca_bmap) {
  1450. ret = -EFAULT;
  1451. goto ehca_reg_internal_maxmr_exit0;
  1452. }
  1453. e_mr = ehca_mr_new();
  1454. if (!e_mr) {
  1455. ehca_err(&shca->ib_device, "out of memory");
  1456. ret = -ENOMEM;
  1457. goto ehca_reg_internal_maxmr_exit0;
  1458. }
  1459. e_mr->flags |= EHCA_MR_FLAG_MAXMR;
  1460. /* register internal max-MR on HCA */
  1461. size_maxmr = ehca_mr_len;
  1462. iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
  1463. ib_pbuf.addr = 0;
  1464. ib_pbuf.size = size_maxmr;
  1465. num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
  1466. PAGE_SIZE);
  1467. hw_pgsize = ehca_get_max_hwpage_size(shca);
  1468. num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
  1469. hw_pgsize);
  1470. memset(&pginfo, 0, sizeof(pginfo));
  1471. pginfo.type = EHCA_MR_PGI_PHYS;
  1472. pginfo.num_kpages = num_kpages;
  1473. pginfo.num_hwpages = num_hwpages;
  1474. pginfo.hwpage_size = hw_pgsize;
  1475. pginfo.u.phy.num_phys_buf = 1;
  1476. pginfo.u.phy.phys_buf_array = &ib_pbuf;
  1477. ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
  1478. &pginfo, &e_mr->ib.ib_mr.lkey,
  1479. &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
  1480. if (ret) {
  1481. ehca_err(&shca->ib_device, "reg of internal max MR failed, "
  1482. "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
  1483. "num_hwpages=%x", e_mr, iova_start, size_maxmr,
  1484. num_kpages, num_hwpages);
  1485. goto ehca_reg_internal_maxmr_exit1;
  1486. }
  1487. /* successful registration of all pages */
  1488. e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
  1489. e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
  1490. e_mr->ib.ib_mr.uobject = NULL;
  1491. atomic_inc(&(e_pd->ib_pd.usecnt));
  1492. atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
  1493. *e_maxmr = e_mr;
  1494. return 0;
  1495. ehca_reg_internal_maxmr_exit1:
  1496. ehca_mr_delete(e_mr);
  1497. ehca_reg_internal_maxmr_exit0:
  1498. if (ret)
  1499. ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
  1500. ret, shca, e_pd, e_maxmr);
  1501. return ret;
  1502. } /* end ehca_reg_internal_maxmr() */
  1503. /*----------------------------------------------------------------------*/
  1504. int ehca_reg_maxmr(struct ehca_shca *shca,
  1505. struct ehca_mr *e_newmr,
  1506. u64 *iova_start,
  1507. int acl,
  1508. struct ehca_pd *e_pd,
  1509. u32 *lkey,
  1510. u32 *rkey)
  1511. {
  1512. u64 h_ret;
  1513. struct ehca_mr *e_origmr = shca->maxmr;
  1514. u32 hipz_acl;
  1515. struct ehca_mr_hipzout_parms hipzout;
  1516. ehca_mrmw_map_acl(acl, &hipz_acl);
  1517. ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
  1518. h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
  1519. (u64)iova_start, hipz_acl, e_pd->fw_pd,
  1520. &hipzout);
  1521. if (h_ret != H_SUCCESS) {
  1522. ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
  1523. "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
  1524. h_ret, e_origmr, shca->ipz_hca_handle.handle,
  1525. e_origmr->ipz_mr_handle.handle,
  1526. e_origmr->ib.ib_mr.lkey);
  1527. return ehca2ib_return_code(h_ret);
  1528. }
  1529. /* successful registration */
  1530. e_newmr->num_kpages = e_origmr->num_kpages;
  1531. e_newmr->num_hwpages = e_origmr->num_hwpages;
  1532. e_newmr->hwpage_size = e_origmr->hwpage_size;
  1533. e_newmr->start = iova_start;
  1534. e_newmr->size = e_origmr->size;
  1535. e_newmr->acl = acl;
  1536. e_newmr->ipz_mr_handle = hipzout.handle;
  1537. *lkey = hipzout.lkey;
  1538. *rkey = hipzout.rkey;
  1539. return 0;
  1540. } /* end ehca_reg_maxmr() */
  1541. /*----------------------------------------------------------------------*/
  1542. int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
  1543. {
  1544. int ret;
  1545. struct ehca_mr *e_maxmr;
  1546. struct ib_pd *ib_pd;
  1547. if (!shca->maxmr) {
  1548. ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
  1549. ret = -EINVAL;
  1550. goto ehca_dereg_internal_maxmr_exit0;
  1551. }
  1552. e_maxmr = shca->maxmr;
  1553. ib_pd = e_maxmr->ib.ib_mr.pd;
  1554. shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
  1555. ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
  1556. if (ret) {
  1557. ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
  1558. "ret=%i e_maxmr=%p shca=%p lkey=%x",
  1559. ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
  1560. shca->maxmr = e_maxmr;
  1561. goto ehca_dereg_internal_maxmr_exit0;
  1562. }
  1563. atomic_dec(&ib_pd->usecnt);
  1564. ehca_dereg_internal_maxmr_exit0:
  1565. if (ret)
  1566. ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
  1567. ret, shca, shca->maxmr);
  1568. return ret;
  1569. } /* end ehca_dereg_internal_maxmr() */
  1570. /*----------------------------------------------------------------------*/
  1571. /*
  1572. * check physical buffer array of MR verbs for validness and
  1573. * calculates MR size
  1574. */
  1575. int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
  1576. int num_phys_buf,
  1577. u64 *iova_start,
  1578. u64 *size)
  1579. {
  1580. struct ib_phys_buf *pbuf = phys_buf_array;
  1581. u64 size_count = 0;
  1582. u32 i;
  1583. if (num_phys_buf == 0) {
  1584. ehca_gen_err("bad phys buf array len, num_phys_buf=0");
  1585. return -EINVAL;
  1586. }
  1587. /* check first buffer */
  1588. if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
  1589. ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
  1590. "pbuf->addr=%llx pbuf->size=%llx",
  1591. iova_start, pbuf->addr, pbuf->size);
  1592. return -EINVAL;
  1593. }
  1594. if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
  1595. (num_phys_buf > 1)) {
  1596. ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
  1597. "pbuf->size=%llx", pbuf->addr, pbuf->size);
  1598. return -EINVAL;
  1599. }
  1600. for (i = 0; i < num_phys_buf; i++) {
  1601. if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
  1602. ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
  1603. "pbuf->size=%llx",
  1604. i, pbuf->addr, pbuf->size);
  1605. return -EINVAL;
  1606. }
  1607. if (((i > 0) && /* not 1st */
  1608. (i < (num_phys_buf - 1)) && /* not last */
  1609. (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
  1610. ehca_gen_err("bad size, i=%x pbuf->size=%llx",
  1611. i, pbuf->size);
  1612. return -EINVAL;
  1613. }
  1614. size_count += pbuf->size;
  1615. pbuf++;
  1616. }
  1617. *size = size_count;
  1618. return 0;
  1619. } /* end ehca_mr_chk_buf_and_calc_size() */
  1620. /*----------------------------------------------------------------------*/
  1621. /* check page list of map FMR verb for validness */
  1622. int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
  1623. u64 *page_list,
  1624. int list_len)
  1625. {
  1626. u32 i;
  1627. u64 *page;
  1628. if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
  1629. ehca_gen_err("bad list_len, list_len=%x "
  1630. "e_fmr->fmr_max_pages=%x fmr=%p",
  1631. list_len, e_fmr->fmr_max_pages, e_fmr);
  1632. return -EINVAL;
  1633. }
  1634. /* each page must be aligned */
  1635. page = page_list;
  1636. for (i = 0; i < list_len; i++) {
  1637. if (*page % e_fmr->fmr_page_size) {
  1638. ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
  1639. "fmr_page_size=%x", i, *page, page, e_fmr,
  1640. e_fmr->fmr_page_size);
  1641. return -EINVAL;
  1642. }
  1643. page++;
  1644. }
  1645. return 0;
  1646. } /* end ehca_fmr_check_page_list() */
  1647. /*----------------------------------------------------------------------*/
  1648. /* PAGE_SIZE >= pginfo->hwpage_size */
  1649. static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
  1650. u32 number,
  1651. u64 *kpage)
  1652. {
  1653. int ret = 0;
  1654. struct ib_umem_chunk *prev_chunk;
  1655. struct ib_umem_chunk *chunk;
  1656. u64 pgaddr;
  1657. u32 i = 0;
  1658. u32 j = 0;
  1659. int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
  1660. /* loop over desired chunk entries */
  1661. chunk = pginfo->u.usr.next_chunk;
  1662. prev_chunk = pginfo->u.usr.next_chunk;
  1663. list_for_each_entry_continue(
  1664. chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
  1665. for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
  1666. pgaddr = page_to_pfn(sg_page(&chunk->page_list[i]))
  1667. << PAGE_SHIFT ;
  1668. *kpage = phys_to_abs(pgaddr +
  1669. (pginfo->next_hwpage *
  1670. pginfo->hwpage_size));
  1671. if ( !(*kpage) ) {
  1672. ehca_gen_err("pgaddr=%llx "
  1673. "chunk->page_list[i]=%llx "
  1674. "i=%x next_hwpage=%llx",
  1675. pgaddr, (u64)sg_dma_address(
  1676. &chunk->page_list[i]),
  1677. i, pginfo->next_hwpage);
  1678. return -EFAULT;
  1679. }
  1680. (pginfo->hwpage_cnt)++;
  1681. (pginfo->next_hwpage)++;
  1682. kpage++;
  1683. if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
  1684. (pginfo->kpage_cnt)++;
  1685. (pginfo->u.usr.next_nmap)++;
  1686. pginfo->next_hwpage = 0;
  1687. i++;
  1688. }
  1689. j++;
  1690. if (j >= number) break;
  1691. }
  1692. if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
  1693. (j >= number)) {
  1694. pginfo->u.usr.next_nmap = 0;
  1695. prev_chunk = chunk;
  1696. break;
  1697. } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
  1698. pginfo->u.usr.next_nmap = 0;
  1699. prev_chunk = chunk;
  1700. } else if (j >= number)
  1701. break;
  1702. else
  1703. prev_chunk = chunk;
  1704. }
  1705. pginfo->u.usr.next_chunk =
  1706. list_prepare_entry(prev_chunk,
  1707. (&(pginfo->u.usr.region->chunk_list)),
  1708. list);
  1709. return ret;
  1710. }
  1711. /*
  1712. * check given pages for contiguous layout
  1713. * last page addr is returned in prev_pgaddr for further check
  1714. */
  1715. static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
  1716. int start_idx, int end_idx,
  1717. u64 *prev_pgaddr)
  1718. {
  1719. int t;
  1720. for (t = start_idx; t <= end_idx; t++) {
  1721. u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
  1722. if (ehca_debug_level >= 3)
  1723. ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
  1724. *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
  1725. if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
  1726. ehca_gen_err("uncontiguous page found pgaddr=%llx "
  1727. "prev_pgaddr=%llx page_list_i=%x",
  1728. pgaddr, *prev_pgaddr, t);
  1729. return -EINVAL;
  1730. }
  1731. *prev_pgaddr = pgaddr;
  1732. }
  1733. return 0;
  1734. }
  1735. /* PAGE_SIZE < pginfo->hwpage_size */
  1736. static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
  1737. u32 number,
  1738. u64 *kpage)
  1739. {
  1740. int ret = 0;
  1741. struct ib_umem_chunk *prev_chunk;
  1742. struct ib_umem_chunk *chunk;
  1743. u64 pgaddr, prev_pgaddr;
  1744. u32 i = 0;
  1745. u32 j = 0;
  1746. int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
  1747. int nr_kpages = kpages_per_hwpage;
  1748. /* loop over desired chunk entries */
  1749. chunk = pginfo->u.usr.next_chunk;
  1750. prev_chunk = pginfo->u.usr.next_chunk;
  1751. list_for_each_entry_continue(
  1752. chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
  1753. for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
  1754. if (nr_kpages == kpages_per_hwpage) {
  1755. pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i]))
  1756. << PAGE_SHIFT );
  1757. *kpage = phys_to_abs(pgaddr);
  1758. if ( !(*kpage) ) {
  1759. ehca_gen_err("pgaddr=%llx i=%x",
  1760. pgaddr, i);
  1761. ret = -EFAULT;
  1762. return ret;
  1763. }
  1764. /*
  1765. * The first page in a hwpage must be aligned;
  1766. * the first MR page is exempt from this rule.
  1767. */
  1768. if (pgaddr & (pginfo->hwpage_size - 1)) {
  1769. if (pginfo->hwpage_cnt) {
  1770. ehca_gen_err(
  1771. "invalid alignment "
  1772. "pgaddr=%llx i=%x "
  1773. "mr_pgsize=%llx",
  1774. pgaddr, i,
  1775. pginfo->hwpage_size);
  1776. ret = -EFAULT;
  1777. return ret;
  1778. }
  1779. /* first MR page */
  1780. pginfo->kpage_cnt =
  1781. (pgaddr &
  1782. (pginfo->hwpage_size - 1)) >>
  1783. PAGE_SHIFT;
  1784. nr_kpages -= pginfo->kpage_cnt;
  1785. *kpage = phys_to_abs(
  1786. pgaddr &
  1787. ~(pginfo->hwpage_size - 1));
  1788. }
  1789. if (ehca_debug_level >= 3) {
  1790. u64 val = *(u64 *)abs_to_virt(
  1791. phys_to_abs(pgaddr));
  1792. ehca_gen_dbg("kpage=%llx chunk_page=%llx "
  1793. "value=%016llx",
  1794. *kpage, pgaddr, val);
  1795. }
  1796. prev_pgaddr = pgaddr;
  1797. i++;
  1798. pginfo->kpage_cnt++;
  1799. pginfo->u.usr.next_nmap++;
  1800. nr_kpages--;
  1801. if (!nr_kpages)
  1802. goto next_kpage;
  1803. continue;
  1804. }
  1805. if (i + nr_kpages > chunk->nmap) {
  1806. ret = ehca_check_kpages_per_ate(
  1807. chunk->page_list, i,
  1808. chunk->nmap - 1, &prev_pgaddr);
  1809. if (ret) return ret;
  1810. pginfo->kpage_cnt += chunk->nmap - i;
  1811. pginfo->u.usr.next_nmap += chunk->nmap - i;
  1812. nr_kpages -= chunk->nmap - i;
  1813. break;
  1814. }
  1815. ret = ehca_check_kpages_per_ate(chunk->page_list, i,
  1816. i + nr_kpages - 1,
  1817. &prev_pgaddr);
  1818. if (ret) return ret;
  1819. i += nr_kpages;
  1820. pginfo->kpage_cnt += nr_kpages;
  1821. pginfo->u.usr.next_nmap += nr_kpages;
  1822. next_kpage:
  1823. nr_kpages = kpages_per_hwpage;
  1824. (pginfo->hwpage_cnt)++;
  1825. kpage++;
  1826. j++;
  1827. if (j >= number) break;
  1828. }
  1829. if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
  1830. (j >= number)) {
  1831. pginfo->u.usr.next_nmap = 0;
  1832. prev_chunk = chunk;
  1833. break;
  1834. } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
  1835. pginfo->u.usr.next_nmap = 0;
  1836. prev_chunk = chunk;
  1837. } else if (j >= number)
  1838. break;
  1839. else
  1840. prev_chunk = chunk;
  1841. }
  1842. pginfo->u.usr.next_chunk =
  1843. list_prepare_entry(prev_chunk,
  1844. (&(pginfo->u.usr.region->chunk_list)),
  1845. list);
  1846. return ret;
  1847. }
  1848. static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
  1849. u32 number, u64 *kpage)
  1850. {
  1851. int ret = 0;
  1852. struct ib_phys_buf *pbuf;
  1853. u64 num_hw, offs_hw;
  1854. u32 i = 0;
  1855. /* loop over desired phys_buf_array entries */
  1856. while (i < number) {
  1857. pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
  1858. num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
  1859. pbuf->size, pginfo->hwpage_size);
  1860. offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
  1861. pginfo->hwpage_size;
  1862. while (pginfo->next_hwpage < offs_hw + num_hw) {
  1863. /* sanity check */
  1864. if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
  1865. (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
  1866. ehca_gen_err("kpage_cnt >= num_kpages, "
  1867. "kpage_cnt=%llx num_kpages=%llx "
  1868. "hwpage_cnt=%llx "
  1869. "num_hwpages=%llx i=%x",
  1870. pginfo->kpage_cnt,
  1871. pginfo->num_kpages,
  1872. pginfo->hwpage_cnt,
  1873. pginfo->num_hwpages, i);
  1874. return -EFAULT;
  1875. }
  1876. *kpage = phys_to_abs(
  1877. (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
  1878. (pginfo->next_hwpage * pginfo->hwpage_size));
  1879. if ( !(*kpage) && pbuf->addr ) {
  1880. ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
  1881. "next_hwpage=%llx", pbuf->addr,
  1882. pbuf->size, pginfo->next_hwpage);
  1883. return -EFAULT;
  1884. }
  1885. (pginfo->hwpage_cnt)++;
  1886. (pginfo->next_hwpage)++;
  1887. if (PAGE_SIZE >= pginfo->hwpage_size) {
  1888. if (pginfo->next_hwpage %
  1889. (PAGE_SIZE / pginfo->hwpage_size) == 0)
  1890. (pginfo->kpage_cnt)++;
  1891. } else
  1892. pginfo->kpage_cnt += pginfo->hwpage_size /
  1893. PAGE_SIZE;
  1894. kpage++;
  1895. i++;
  1896. if (i >= number) break;
  1897. }
  1898. if (pginfo->next_hwpage >= offs_hw + num_hw) {
  1899. (pginfo->u.phy.next_buf)++;
  1900. pginfo->next_hwpage = 0;
  1901. }
  1902. }
  1903. return ret;
  1904. }
  1905. static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
  1906. u32 number, u64 *kpage)
  1907. {
  1908. int ret = 0;
  1909. u64 *fmrlist;
  1910. u32 i;
  1911. /* loop over desired page_list entries */
  1912. fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
  1913. for (i = 0; i < number; i++) {
  1914. *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
  1915. pginfo->next_hwpage * pginfo->hwpage_size);
  1916. if ( !(*kpage) ) {
  1917. ehca_gen_err("*fmrlist=%llx fmrlist=%p "
  1918. "next_listelem=%llx next_hwpage=%llx",
  1919. *fmrlist, fmrlist,
  1920. pginfo->u.fmr.next_listelem,
  1921. pginfo->next_hwpage);
  1922. return -EFAULT;
  1923. }
  1924. (pginfo->hwpage_cnt)++;
  1925. if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
  1926. if (pginfo->next_hwpage %
  1927. (pginfo->u.fmr.fmr_pgsize /
  1928. pginfo->hwpage_size) == 0) {
  1929. (pginfo->kpage_cnt)++;
  1930. (pginfo->u.fmr.next_listelem)++;
  1931. fmrlist++;
  1932. pginfo->next_hwpage = 0;
  1933. } else
  1934. (pginfo->next_hwpage)++;
  1935. } else {
  1936. unsigned int cnt_per_hwpage = pginfo->hwpage_size /
  1937. pginfo->u.fmr.fmr_pgsize;
  1938. unsigned int j;
  1939. u64 prev = *kpage;
  1940. /* check if adrs are contiguous */
  1941. for (j = 1; j < cnt_per_hwpage; j++) {
  1942. u64 p = phys_to_abs(fmrlist[j] &
  1943. ~(pginfo->hwpage_size - 1));
  1944. if (prev + pginfo->u.fmr.fmr_pgsize != p) {
  1945. ehca_gen_err("uncontiguous fmr pages "
  1946. "found prev=%llx p=%llx "
  1947. "idx=%x", prev, p, i + j);
  1948. return -EINVAL;
  1949. }
  1950. prev = p;
  1951. }
  1952. pginfo->kpage_cnt += cnt_per_hwpage;
  1953. pginfo->u.fmr.next_listelem += cnt_per_hwpage;
  1954. fmrlist += cnt_per_hwpage;
  1955. }
  1956. kpage++;
  1957. }
  1958. return ret;
  1959. }
  1960. /* setup page buffer from page info */
  1961. int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
  1962. u32 number,
  1963. u64 *kpage)
  1964. {
  1965. int ret;
  1966. switch (pginfo->type) {
  1967. case EHCA_MR_PGI_PHYS:
  1968. ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
  1969. break;
  1970. case EHCA_MR_PGI_USER:
  1971. ret = PAGE_SIZE >= pginfo->hwpage_size ?
  1972. ehca_set_pagebuf_user1(pginfo, number, kpage) :
  1973. ehca_set_pagebuf_user2(pginfo, number, kpage);
  1974. break;
  1975. case EHCA_MR_PGI_FMR:
  1976. ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
  1977. break;
  1978. default:
  1979. ehca_gen_err("bad pginfo->type=%x", pginfo->type);
  1980. ret = -EFAULT;
  1981. break;
  1982. }
  1983. return ret;
  1984. } /* end ehca_set_pagebuf() */
  1985. /*----------------------------------------------------------------------*/
  1986. /*
  1987. * check MR if it is a max-MR, i.e. uses whole memory
  1988. * in case it's a max-MR 1 is returned, else 0
  1989. */
  1990. int ehca_mr_is_maxmr(u64 size,
  1991. u64 *iova_start)
  1992. {
  1993. /* a MR is treated as max-MR only if it fits following: */
  1994. if ((size == ehca_mr_len) &&
  1995. (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
  1996. ehca_gen_dbg("this is a max-MR");
  1997. return 1;
  1998. } else
  1999. return 0;
  2000. } /* end ehca_mr_is_maxmr() */
  2001. /*----------------------------------------------------------------------*/
  2002. /* map access control for MR/MW. This routine is used for MR and MW. */
  2003. void ehca_mrmw_map_acl(int ib_acl,
  2004. u32 *hipz_acl)
  2005. {
  2006. *hipz_acl = 0;
  2007. if (ib_acl & IB_ACCESS_REMOTE_READ)
  2008. *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
  2009. if (ib_acl & IB_ACCESS_REMOTE_WRITE)
  2010. *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
  2011. if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
  2012. *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
  2013. if (ib_acl & IB_ACCESS_LOCAL_WRITE)
  2014. *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
  2015. if (ib_acl & IB_ACCESS_MW_BIND)
  2016. *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
  2017. } /* end ehca_mrmw_map_acl() */
  2018. /*----------------------------------------------------------------------*/
  2019. /* sets page size in hipz access control for MR/MW. */
  2020. void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
  2021. {
  2022. *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
  2023. } /* end ehca_mrmw_set_pgsize_hipz_acl() */
  2024. /*----------------------------------------------------------------------*/
  2025. /*
  2026. * reverse map access control for MR/MW.
  2027. * This routine is used for MR and MW.
  2028. */
  2029. void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
  2030. int *ib_acl) /*OUT*/
  2031. {
  2032. *ib_acl = 0;
  2033. if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
  2034. *ib_acl |= IB_ACCESS_REMOTE_READ;
  2035. if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
  2036. *ib_acl |= IB_ACCESS_REMOTE_WRITE;
  2037. if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
  2038. *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
  2039. if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
  2040. *ib_acl |= IB_ACCESS_LOCAL_WRITE;
  2041. if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
  2042. *ib_acl |= IB_ACCESS_MW_BIND;
  2043. } /* end ehca_mrmw_reverse_map_acl() */
  2044. /*----------------------------------------------------------------------*/
  2045. /*
  2046. * MR destructor and constructor
  2047. * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
  2048. * except struct ib_mr and spinlock
  2049. */
  2050. void ehca_mr_deletenew(struct ehca_mr *mr)
  2051. {
  2052. mr->flags = 0;
  2053. mr->num_kpages = 0;
  2054. mr->num_hwpages = 0;
  2055. mr->acl = 0;
  2056. mr->start = NULL;
  2057. mr->fmr_page_size = 0;
  2058. mr->fmr_max_pages = 0;
  2059. mr->fmr_max_maps = 0;
  2060. mr->fmr_map_cnt = 0;
  2061. memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
  2062. memset(&mr->galpas, 0, sizeof(mr->galpas));
  2063. } /* end ehca_mr_deletenew() */
  2064. int ehca_init_mrmw_cache(void)
  2065. {
  2066. mr_cache = kmem_cache_create("ehca_cache_mr",
  2067. sizeof(struct ehca_mr), 0,
  2068. SLAB_HWCACHE_ALIGN,
  2069. NULL);
  2070. if (!mr_cache)
  2071. return -ENOMEM;
  2072. mw_cache = kmem_cache_create("ehca_cache_mw",
  2073. sizeof(struct ehca_mw), 0,
  2074. SLAB_HWCACHE_ALIGN,
  2075. NULL);
  2076. if (!mw_cache) {
  2077. kmem_cache_destroy(mr_cache);
  2078. mr_cache = NULL;
  2079. return -ENOMEM;
  2080. }
  2081. return 0;
  2082. }
  2083. void ehca_cleanup_mrmw_cache(void)
  2084. {
  2085. if (mr_cache)
  2086. kmem_cache_destroy(mr_cache);
  2087. if (mw_cache)
  2088. kmem_cache_destroy(mw_cache);
  2089. }
  2090. static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
  2091. int dir)
  2092. {
  2093. if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
  2094. ehca_top_bmap->dir[dir] =
  2095. kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
  2096. if (!ehca_top_bmap->dir[dir])
  2097. return -ENOMEM;
  2098. /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
  2099. memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
  2100. }
  2101. return 0;
  2102. }
  2103. static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
  2104. {
  2105. if (!ehca_bmap_valid(ehca_bmap->top[top])) {
  2106. ehca_bmap->top[top] =
  2107. kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
  2108. if (!ehca_bmap->top[top])
  2109. return -ENOMEM;
  2110. /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
  2111. memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
  2112. }
  2113. return ehca_init_top_bmap(ehca_bmap->top[top], dir);
  2114. }
  2115. static inline int ehca_calc_index(unsigned long i, unsigned long s)
  2116. {
  2117. return (i >> s) & EHCA_INDEX_MASK;
  2118. }
  2119. void ehca_destroy_busmap(void)
  2120. {
  2121. int top, dir;
  2122. if (!ehca_bmap)
  2123. return;
  2124. for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
  2125. if (!ehca_bmap_valid(ehca_bmap->top[top]))
  2126. continue;
  2127. for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
  2128. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
  2129. continue;
  2130. kfree(ehca_bmap->top[top]->dir[dir]);
  2131. }
  2132. kfree(ehca_bmap->top[top]);
  2133. }
  2134. kfree(ehca_bmap);
  2135. ehca_bmap = NULL;
  2136. }
  2137. static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
  2138. {
  2139. unsigned long i, start_section, end_section;
  2140. int top, dir, idx;
  2141. if (!nr_pages)
  2142. return 0;
  2143. if (!ehca_bmap) {
  2144. ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
  2145. if (!ehca_bmap)
  2146. return -ENOMEM;
  2147. /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
  2148. memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
  2149. }
  2150. start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE;
  2151. end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
  2152. for (i = start_section; i < end_section; i++) {
  2153. int ret;
  2154. top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
  2155. dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
  2156. idx = i & EHCA_INDEX_MASK;
  2157. ret = ehca_init_bmap(ehca_bmap, top, dir);
  2158. if (ret) {
  2159. ehca_destroy_busmap();
  2160. return ret;
  2161. }
  2162. ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
  2163. ehca_mr_len += EHCA_SECTSIZE;
  2164. }
  2165. return 0;
  2166. }
  2167. static int ehca_is_hugepage(unsigned long pfn)
  2168. {
  2169. int page_order;
  2170. if (pfn & EHCA_HUGEPAGE_PFN_MASK)
  2171. return 0;
  2172. page_order = compound_order(pfn_to_page(pfn));
  2173. if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
  2174. return 0;
  2175. return 1;
  2176. }
  2177. static int ehca_create_busmap_callback(unsigned long initial_pfn,
  2178. unsigned long total_nr_pages, void *arg)
  2179. {
  2180. int ret;
  2181. unsigned long pfn, start_pfn, end_pfn, nr_pages;
  2182. if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
  2183. return ehca_update_busmap(initial_pfn, total_nr_pages);
  2184. /* Given chunk is >= 16GB -> check for hugepages */
  2185. start_pfn = initial_pfn;
  2186. end_pfn = initial_pfn + total_nr_pages;
  2187. pfn = start_pfn;
  2188. while (pfn < end_pfn) {
  2189. if (ehca_is_hugepage(pfn)) {
  2190. /* Add mem found in front of the hugepage */
  2191. nr_pages = pfn - start_pfn;
  2192. ret = ehca_update_busmap(start_pfn, nr_pages);
  2193. if (ret)
  2194. return ret;
  2195. /* Skip the hugepage */
  2196. pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
  2197. start_pfn = pfn;
  2198. } else
  2199. pfn += (EHCA_SECTSIZE / PAGE_SIZE);
  2200. }
  2201. /* Add mem found behind the hugepage(s) */
  2202. nr_pages = pfn - start_pfn;
  2203. return ehca_update_busmap(start_pfn, nr_pages);
  2204. }
  2205. int ehca_create_busmap(void)
  2206. {
  2207. int ret;
  2208. ehca_mr_len = 0;
  2209. ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
  2210. ehca_create_busmap_callback);
  2211. return ret;
  2212. }
  2213. static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
  2214. struct ehca_mr *e_mr,
  2215. struct ehca_mr_pginfo *pginfo)
  2216. {
  2217. int top;
  2218. u64 hret, *kpage;
  2219. kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  2220. if (!kpage) {
  2221. ehca_err(&shca->ib_device, "kpage alloc failed");
  2222. return -ENOMEM;
  2223. }
  2224. for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
  2225. if (!ehca_bmap_valid(ehca_bmap->top[top]))
  2226. continue;
  2227. hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
  2228. if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
  2229. break;
  2230. }
  2231. ehca_free_fw_ctrlblock(kpage);
  2232. if (hret == H_SUCCESS)
  2233. return 0; /* Everything is fine */
  2234. else {
  2235. ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
  2236. "h_ret=%lli e_mr=%p top=%x lkey=%x "
  2237. "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
  2238. e_mr->ib.ib_mr.lkey,
  2239. shca->ipz_hca_handle.handle,
  2240. e_mr->ipz_mr_handle.handle);
  2241. return ehca2ib_return_code(hret);
  2242. }
  2243. }
  2244. static u64 ehca_map_vaddr(void *caddr)
  2245. {
  2246. int top, dir, idx;
  2247. unsigned long abs_addr, offset;
  2248. u64 entry;
  2249. if (!ehca_bmap)
  2250. return EHCA_INVAL_ADDR;
  2251. abs_addr = virt_to_abs(caddr);
  2252. top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
  2253. if (!ehca_bmap_valid(ehca_bmap->top[top]))
  2254. return EHCA_INVAL_ADDR;
  2255. dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
  2256. if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
  2257. return EHCA_INVAL_ADDR;
  2258. idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
  2259. entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
  2260. if (ehca_bmap_valid(entry)) {
  2261. offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
  2262. return entry | offset;
  2263. } else
  2264. return EHCA_INVAL_ADDR;
  2265. }
  2266. static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
  2267. {
  2268. return dma_addr == EHCA_INVAL_ADDR;
  2269. }
  2270. static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
  2271. size_t size, enum dma_data_direction direction)
  2272. {
  2273. if (cpu_addr)
  2274. return ehca_map_vaddr(cpu_addr);
  2275. else
  2276. return EHCA_INVAL_ADDR;
  2277. }
  2278. static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
  2279. enum dma_data_direction direction)
  2280. {
  2281. /* This is only a stub; nothing to be done here */
  2282. }
  2283. static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
  2284. unsigned long offset, size_t size,
  2285. enum dma_data_direction direction)
  2286. {
  2287. u64 addr;
  2288. if (offset + size > PAGE_SIZE)
  2289. return EHCA_INVAL_ADDR;
  2290. addr = ehca_map_vaddr(page_address(page));
  2291. if (!ehca_dma_mapping_error(dev, addr))
  2292. addr += offset;
  2293. return addr;
  2294. }
  2295. static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
  2296. enum dma_data_direction direction)
  2297. {
  2298. /* This is only a stub; nothing to be done here */
  2299. }
  2300. static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
  2301. int nents, enum dma_data_direction direction)
  2302. {
  2303. struct scatterlist *sg;
  2304. int i;
  2305. for_each_sg(sgl, sg, nents, i) {
  2306. u64 addr;
  2307. addr = ehca_map_vaddr(sg_virt(sg));
  2308. if (ehca_dma_mapping_error(dev, addr))
  2309. return 0;
  2310. sg->dma_address = addr;
  2311. sg->dma_length = sg->length;
  2312. }
  2313. return nents;
  2314. }
  2315. static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
  2316. int nents, enum dma_data_direction direction)
  2317. {
  2318. /* This is only a stub; nothing to be done here */
  2319. }
  2320. static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg)
  2321. {
  2322. return sg->dma_address;
  2323. }
  2324. static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg)
  2325. {
  2326. return sg->length;
  2327. }
  2328. static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
  2329. size_t size,
  2330. enum dma_data_direction dir)
  2331. {
  2332. dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
  2333. }
  2334. static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
  2335. size_t size,
  2336. enum dma_data_direction dir)
  2337. {
  2338. dma_sync_single_for_device(dev->dma_device, addr, size, dir);
  2339. }
  2340. static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
  2341. u64 *dma_handle, gfp_t flag)
  2342. {
  2343. struct page *p;
  2344. void *addr = NULL;
  2345. u64 dma_addr;
  2346. p = alloc_pages(flag, get_order(size));
  2347. if (p) {
  2348. addr = page_address(p);
  2349. dma_addr = ehca_map_vaddr(addr);
  2350. if (ehca_dma_mapping_error(dev, dma_addr)) {
  2351. free_pages((unsigned long)addr, get_order(size));
  2352. return NULL;
  2353. }
  2354. if (dma_handle)
  2355. *dma_handle = dma_addr;
  2356. return addr;
  2357. }
  2358. return NULL;
  2359. }
  2360. static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
  2361. void *cpu_addr, u64 dma_handle)
  2362. {
  2363. if (cpu_addr && size)
  2364. free_pages((unsigned long)cpu_addr, get_order(size));
  2365. }
  2366. struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
  2367. .mapping_error = ehca_dma_mapping_error,
  2368. .map_single = ehca_dma_map_single,
  2369. .unmap_single = ehca_dma_unmap_single,
  2370. .map_page = ehca_dma_map_page,
  2371. .unmap_page = ehca_dma_unmap_page,
  2372. .map_sg = ehca_dma_map_sg,
  2373. .unmap_sg = ehca_dma_unmap_sg,
  2374. .dma_address = ehca_dma_address,
  2375. .dma_len = ehca_dma_len,
  2376. .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
  2377. .sync_single_for_device = ehca_dma_sync_single_for_device,
  2378. .alloc_coherent = ehca_dma_alloc_coherent,
  2379. .free_coherent = ehca_dma_free_coherent,
  2380. };