ocrdma_verbs.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497
  1. /*******************************************************************
  2. * This file is part of the Emulex RoCE Device Driver for *
  3. * RoCE (RDMA over Converged Ethernet) adapters. *
  4. * Copyright (C) 2008-2012 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * *
  8. * This program is free software; you can redistribute it and/or *
  9. * modify it under the terms of version 2 of the GNU General *
  10. * Public License as published by the Free Software Foundation. *
  11. * This program is distributed in the hope that it will be useful. *
  12. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  13. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  14. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  15. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  16. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  17. * more details, a copy of which can be found in the file COPYING *
  18. * included with this package. *
  19. *
  20. * Contact Information:
  21. * linux-drivers@emulex.com
  22. *
  23. * Emulex
  24. * 3333 Susan Street
  25. * Costa Mesa, CA 92626
  26. *******************************************************************/
  27. #include <linux/dma-mapping.h>
  28. #include <rdma/ib_verbs.h>
  29. #include <rdma/ib_user_verbs.h>
  30. #include <rdma/iw_cm.h>
  31. #include <rdma/ib_umem.h>
  32. #include <rdma/ib_addr.h>
  33. #include "ocrdma.h"
  34. #include "ocrdma_hw.h"
  35. #include "ocrdma_verbs.h"
  36. #include "ocrdma_abi.h"
  37. int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
  38. {
  39. if (index > 1)
  40. return -EINVAL;
  41. *pkey = 0xffff;
  42. return 0;
  43. }
  44. int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
  45. int index, union ib_gid *sgid)
  46. {
  47. struct ocrdma_dev *dev;
  48. dev = get_ocrdma_dev(ibdev);
  49. memset(sgid, 0, sizeof(*sgid));
  50. if (index >= OCRDMA_MAX_SGID)
  51. return -EINVAL;
  52. memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
  53. return 0;
  54. }
  55. int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
  56. {
  57. struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
  58. memset(attr, 0, sizeof *attr);
  59. memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
  60. min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
  61. ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
  62. attr->max_mr_size = ~0ull;
  63. attr->page_size_cap = 0xffff000;
  64. attr->vendor_id = dev->nic_info.pdev->vendor;
  65. attr->vendor_part_id = dev->nic_info.pdev->device;
  66. attr->hw_ver = 0;
  67. attr->max_qp = dev->attr.max_qp;
  68. attr->max_ah = dev->attr.max_qp;
  69. attr->max_qp_wr = dev->attr.max_wqe;
  70. attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
  71. IB_DEVICE_RC_RNR_NAK_GEN |
  72. IB_DEVICE_SHUTDOWN_PORT |
  73. IB_DEVICE_SYS_IMAGE_GUID |
  74. IB_DEVICE_LOCAL_DMA_LKEY;
  75. attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
  76. attr->max_sge_rd = 0;
  77. attr->max_cq = dev->attr.max_cq;
  78. attr->max_cqe = dev->attr.max_cqe;
  79. attr->max_mr = dev->attr.max_mr;
  80. attr->max_mw = 0;
  81. attr->max_pd = dev->attr.max_pd;
  82. attr->atomic_cap = 0;
  83. attr->max_fmr = 0;
  84. attr->max_map_per_fmr = 0;
  85. attr->max_qp_rd_atom =
  86. min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
  87. attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
  88. attr->max_srq = (dev->attr.max_qp - 1);
  89. attr->max_srq_sge = dev->attr.max_srq_sge;
  90. attr->max_srq_wr = dev->attr.max_rqe;
  91. attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
  92. attr->max_fast_reg_page_list_len = 0;
  93. attr->max_pkeys = 1;
  94. return 0;
  95. }
  96. int ocrdma_query_port(struct ib_device *ibdev,
  97. u8 port, struct ib_port_attr *props)
  98. {
  99. enum ib_port_state port_state;
  100. struct ocrdma_dev *dev;
  101. struct net_device *netdev;
  102. dev = get_ocrdma_dev(ibdev);
  103. if (port > 1) {
  104. pr_err("%s(%d) invalid_port=0x%x\n", __func__,
  105. dev->id, port);
  106. return -EINVAL;
  107. }
  108. netdev = dev->nic_info.netdev;
  109. if (netif_running(netdev) && netif_oper_up(netdev)) {
  110. port_state = IB_PORT_ACTIVE;
  111. props->phys_state = 5;
  112. } else {
  113. port_state = IB_PORT_DOWN;
  114. props->phys_state = 3;
  115. }
  116. props->max_mtu = IB_MTU_4096;
  117. props->active_mtu = iboe_get_mtu(netdev->mtu);
  118. props->lid = 0;
  119. props->lmc = 0;
  120. props->sm_lid = 0;
  121. props->sm_sl = 0;
  122. props->state = port_state;
  123. props->port_cap_flags =
  124. IB_PORT_CM_SUP |
  125. IB_PORT_REINIT_SUP |
  126. IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
  127. props->gid_tbl_len = OCRDMA_MAX_SGID;
  128. props->pkey_tbl_len = 1;
  129. props->bad_pkey_cntr = 0;
  130. props->qkey_viol_cntr = 0;
  131. props->active_width = IB_WIDTH_1X;
  132. props->active_speed = 4;
  133. props->max_msg_sz = 0x80000000;
  134. props->max_vl_num = 4;
  135. return 0;
  136. }
  137. int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
  138. struct ib_port_modify *props)
  139. {
  140. struct ocrdma_dev *dev;
  141. dev = get_ocrdma_dev(ibdev);
  142. if (port > 1) {
  143. pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
  144. return -EINVAL;
  145. }
  146. return 0;
  147. }
  148. static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
  149. unsigned long len)
  150. {
  151. struct ocrdma_mm *mm;
  152. mm = kzalloc(sizeof(*mm), GFP_KERNEL);
  153. if (mm == NULL)
  154. return -ENOMEM;
  155. mm->key.phy_addr = phy_addr;
  156. mm->key.len = len;
  157. INIT_LIST_HEAD(&mm->entry);
  158. mutex_lock(&uctx->mm_list_lock);
  159. list_add_tail(&mm->entry, &uctx->mm_head);
  160. mutex_unlock(&uctx->mm_list_lock);
  161. return 0;
  162. }
  163. static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
  164. unsigned long len)
  165. {
  166. struct ocrdma_mm *mm, *tmp;
  167. mutex_lock(&uctx->mm_list_lock);
  168. list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
  169. if (len != mm->key.len || phy_addr != mm->key.phy_addr)
  170. continue;
  171. list_del(&mm->entry);
  172. kfree(mm);
  173. break;
  174. }
  175. mutex_unlock(&uctx->mm_list_lock);
  176. }
  177. static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
  178. unsigned long len)
  179. {
  180. bool found = false;
  181. struct ocrdma_mm *mm;
  182. mutex_lock(&uctx->mm_list_lock);
  183. list_for_each_entry(mm, &uctx->mm_head, entry) {
  184. if (len != mm->key.len || phy_addr != mm->key.phy_addr)
  185. continue;
  186. found = true;
  187. break;
  188. }
  189. mutex_unlock(&uctx->mm_list_lock);
  190. return found;
  191. }
  192. struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
  193. struct ib_udata *udata)
  194. {
  195. int status;
  196. struct ocrdma_ucontext *ctx;
  197. struct ocrdma_alloc_ucontext_resp resp;
  198. struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
  199. struct pci_dev *pdev = dev->nic_info.pdev;
  200. u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
  201. if (!udata)
  202. return ERR_PTR(-EFAULT);
  203. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  204. if (!ctx)
  205. return ERR_PTR(-ENOMEM);
  206. ctx->dev = dev;
  207. INIT_LIST_HEAD(&ctx->mm_head);
  208. mutex_init(&ctx->mm_list_lock);
  209. ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
  210. &ctx->ah_tbl.pa, GFP_KERNEL);
  211. if (!ctx->ah_tbl.va) {
  212. kfree(ctx);
  213. return ERR_PTR(-ENOMEM);
  214. }
  215. memset(ctx->ah_tbl.va, 0, map_len);
  216. ctx->ah_tbl.len = map_len;
  217. resp.ah_tbl_len = ctx->ah_tbl.len;
  218. resp.ah_tbl_page = ctx->ah_tbl.pa;
  219. status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
  220. if (status)
  221. goto map_err;
  222. resp.dev_id = dev->id;
  223. resp.max_inline_data = dev->attr.max_inline_data;
  224. resp.wqe_size = dev->attr.wqe_size;
  225. resp.rqe_size = dev->attr.rqe_size;
  226. resp.dpp_wqe_size = dev->attr.wqe_size;
  227. resp.rsvd = 0;
  228. memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
  229. status = ib_copy_to_udata(udata, &resp, sizeof(resp));
  230. if (status)
  231. goto cpy_err;
  232. return &ctx->ibucontext;
  233. cpy_err:
  234. ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
  235. map_err:
  236. dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
  237. ctx->ah_tbl.pa);
  238. kfree(ctx);
  239. return ERR_PTR(status);
  240. }
  241. int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
  242. {
  243. struct ocrdma_mm *mm, *tmp;
  244. struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
  245. struct pci_dev *pdev = uctx->dev->nic_info.pdev;
  246. ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
  247. dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
  248. uctx->ah_tbl.pa);
  249. list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
  250. list_del(&mm->entry);
  251. kfree(mm);
  252. }
  253. kfree(uctx);
  254. return 0;
  255. }
  256. int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  257. {
  258. struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
  259. struct ocrdma_dev *dev = ucontext->dev;
  260. unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
  261. u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
  262. unsigned long len = (vma->vm_end - vma->vm_start);
  263. int status = 0;
  264. bool found;
  265. if (vma->vm_start & (PAGE_SIZE - 1))
  266. return -EINVAL;
  267. found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
  268. if (!found)
  269. return -EINVAL;
  270. if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
  271. dev->nic_info.db_total_size)) &&
  272. (len <= dev->nic_info.db_page_size)) {
  273. /* doorbell mapping */
  274. status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  275. len, vma->vm_page_prot);
  276. } else if (dev->nic_info.dpp_unmapped_len &&
  277. (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
  278. (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
  279. dev->nic_info.dpp_unmapped_len)) &&
  280. (len <= dev->nic_info.dpp_unmapped_len)) {
  281. /* dpp area mapping */
  282. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  283. status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  284. len, vma->vm_page_prot);
  285. } else {
  286. /* queue memory mapping */
  287. status = remap_pfn_range(vma, vma->vm_start,
  288. vma->vm_pgoff, len, vma->vm_page_prot);
  289. }
  290. return status;
  291. }
  292. static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
  293. struct ib_ucontext *ib_ctx,
  294. struct ib_udata *udata)
  295. {
  296. int status;
  297. u64 db_page_addr;
  298. u64 dpp_page_addr = 0;
  299. u32 db_page_size;
  300. struct ocrdma_alloc_pd_uresp rsp;
  301. struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
  302. rsp.id = pd->id;
  303. rsp.dpp_enabled = pd->dpp_enabled;
  304. db_page_addr = pd->dev->nic_info.unmapped_db +
  305. (pd->id * pd->dev->nic_info.db_page_size);
  306. db_page_size = pd->dev->nic_info.db_page_size;
  307. status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
  308. if (status)
  309. return status;
  310. if (pd->dpp_enabled) {
  311. dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr +
  312. (pd->id * OCRDMA_DPP_PAGE_SIZE);
  313. status = ocrdma_add_mmap(uctx, dpp_page_addr,
  314. OCRDMA_DPP_PAGE_SIZE);
  315. if (status)
  316. goto dpp_map_err;
  317. rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
  318. rsp.dpp_page_addr_lo = dpp_page_addr;
  319. }
  320. status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
  321. if (status)
  322. goto ucopy_err;
  323. pd->uctx = uctx;
  324. return 0;
  325. ucopy_err:
  326. if (pd->dpp_enabled)
  327. ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE);
  328. dpp_map_err:
  329. ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
  330. return status;
  331. }
  332. struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
  333. struct ib_ucontext *context,
  334. struct ib_udata *udata)
  335. {
  336. struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
  337. struct ocrdma_pd *pd;
  338. int status;
  339. pd = kzalloc(sizeof(*pd), GFP_KERNEL);
  340. if (!pd)
  341. return ERR_PTR(-ENOMEM);
  342. pd->dev = dev;
  343. if (udata && context) {
  344. pd->dpp_enabled = (dev->nic_info.dev_family ==
  345. OCRDMA_GEN2_FAMILY) ? true : false;
  346. pd->num_dpp_qp =
  347. pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
  348. }
  349. status = ocrdma_mbx_alloc_pd(dev, pd);
  350. if (status) {
  351. kfree(pd);
  352. return ERR_PTR(status);
  353. }
  354. if (udata && context) {
  355. status = ocrdma_copy_pd_uresp(pd, context, udata);
  356. if (status)
  357. goto err;
  358. }
  359. return &pd->ibpd;
  360. err:
  361. ocrdma_dealloc_pd(&pd->ibpd);
  362. return ERR_PTR(status);
  363. }
  364. int ocrdma_dealloc_pd(struct ib_pd *ibpd)
  365. {
  366. struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
  367. struct ocrdma_dev *dev = pd->dev;
  368. int status;
  369. u64 usr_db;
  370. status = ocrdma_mbx_dealloc_pd(dev, pd);
  371. if (pd->uctx) {
  372. u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
  373. (pd->id * OCRDMA_DPP_PAGE_SIZE);
  374. if (pd->dpp_enabled)
  375. ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE);
  376. usr_db = dev->nic_info.unmapped_db +
  377. (pd->id * dev->nic_info.db_page_size);
  378. ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
  379. }
  380. kfree(pd);
  381. return status;
  382. }
  383. static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
  384. int acc, u32 num_pbls,
  385. u32 addr_check)
  386. {
  387. int status;
  388. struct ocrdma_mr *mr;
  389. struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
  390. struct ocrdma_dev *dev = pd->dev;
  391. if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
  392. pr_err("%s(%d) leaving err, invalid access rights\n",
  393. __func__, dev->id);
  394. return ERR_PTR(-EINVAL);
  395. }
  396. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  397. if (!mr)
  398. return ERR_PTR(-ENOMEM);
  399. mr->hwmr.dev = dev;
  400. mr->hwmr.fr_mr = 0;
  401. mr->hwmr.local_rd = 1;
  402. mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
  403. mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
  404. mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
  405. mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
  406. mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
  407. mr->hwmr.num_pbls = num_pbls;
  408. status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check);
  409. if (status) {
  410. kfree(mr);
  411. return ERR_PTR(-ENOMEM);
  412. }
  413. mr->pd = pd;
  414. mr->ibmr.lkey = mr->hwmr.lkey;
  415. if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
  416. mr->ibmr.rkey = mr->hwmr.lkey;
  417. return mr;
  418. }
  419. struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
  420. {
  421. struct ocrdma_mr *mr;
  422. mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE);
  423. if (IS_ERR(mr))
  424. return ERR_CAST(mr);
  425. return &mr->ibmr;
  426. }
  427. static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
  428. struct ocrdma_hw_mr *mr)
  429. {
  430. struct pci_dev *pdev = dev->nic_info.pdev;
  431. int i = 0;
  432. if (mr->pbl_table) {
  433. for (i = 0; i < mr->num_pbls; i++) {
  434. if (!mr->pbl_table[i].va)
  435. continue;
  436. dma_free_coherent(&pdev->dev, mr->pbl_size,
  437. mr->pbl_table[i].va,
  438. mr->pbl_table[i].pa);
  439. }
  440. kfree(mr->pbl_table);
  441. mr->pbl_table = NULL;
  442. }
  443. }
  444. static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
  445. {
  446. u32 num_pbls = 0;
  447. u32 idx = 0;
  448. int status = 0;
  449. u32 pbl_size;
  450. do {
  451. pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
  452. if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
  453. status = -EFAULT;
  454. break;
  455. }
  456. num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
  457. num_pbls = num_pbls / (pbl_size / sizeof(u64));
  458. idx++;
  459. } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl);
  460. mr->hwmr.num_pbes = num_pbes;
  461. mr->hwmr.num_pbls = num_pbls;
  462. mr->hwmr.pbl_size = pbl_size;
  463. return status;
  464. }
  465. static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
  466. {
  467. int status = 0;
  468. int i;
  469. u32 dma_len = mr->pbl_size;
  470. struct pci_dev *pdev = dev->nic_info.pdev;
  471. void *va;
  472. dma_addr_t pa;
  473. mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
  474. mr->num_pbls, GFP_KERNEL);
  475. if (!mr->pbl_table)
  476. return -ENOMEM;
  477. for (i = 0; i < mr->num_pbls; i++) {
  478. va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
  479. if (!va) {
  480. ocrdma_free_mr_pbl_tbl(dev, mr);
  481. status = -ENOMEM;
  482. break;
  483. }
  484. memset(va, 0, dma_len);
  485. mr->pbl_table[i].va = va;
  486. mr->pbl_table[i].pa = pa;
  487. }
  488. return status;
  489. }
  490. static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
  491. u32 num_pbes)
  492. {
  493. struct ocrdma_pbe *pbe;
  494. struct ib_umem_chunk *chunk;
  495. struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
  496. struct ib_umem *umem = mr->umem;
  497. int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
  498. if (!mr->hwmr.num_pbes)
  499. return;
  500. pbe = (struct ocrdma_pbe *)pbl_tbl->va;
  501. pbe_cnt = 0;
  502. shift = ilog2(umem->page_size);
  503. list_for_each_entry(chunk, &umem->chunk_list, list) {
  504. /* get all the dma regions from the chunk. */
  505. for (i = 0; i < chunk->nmap; i++) {
  506. pages = sg_dma_len(&chunk->page_list[i]) >> shift;
  507. for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
  508. /* store the page address in pbe */
  509. pbe->pa_lo =
  510. cpu_to_le32(sg_dma_address
  511. (&chunk->page_list[i]) +
  512. (umem->page_size * pg_cnt));
  513. pbe->pa_hi =
  514. cpu_to_le32(upper_32_bits
  515. ((sg_dma_address
  516. (&chunk->page_list[i]) +
  517. umem->page_size * pg_cnt)));
  518. pbe_cnt += 1;
  519. total_num_pbes += 1;
  520. pbe++;
  521. /* if done building pbes, issue the mbx cmd. */
  522. if (total_num_pbes == num_pbes)
  523. return;
  524. /* if the given pbl is full storing the pbes,
  525. * move to next pbl.
  526. */
  527. if (pbe_cnt ==
  528. (mr->hwmr.pbl_size / sizeof(u64))) {
  529. pbl_tbl++;
  530. pbe = (struct ocrdma_pbe *)pbl_tbl->va;
  531. pbe_cnt = 0;
  532. }
  533. }
  534. }
  535. }
  536. }
  537. struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
  538. u64 usr_addr, int acc, struct ib_udata *udata)
  539. {
  540. int status = -ENOMEM;
  541. struct ocrdma_dev *dev;
  542. struct ocrdma_mr *mr;
  543. struct ocrdma_pd *pd;
  544. u32 num_pbes;
  545. pd = get_ocrdma_pd(ibpd);
  546. dev = pd->dev;
  547. if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
  548. return ERR_PTR(-EINVAL);
  549. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  550. if (!mr)
  551. return ERR_PTR(status);
  552. mr->hwmr.dev = dev;
  553. mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
  554. if (IS_ERR(mr->umem)) {
  555. status = -EFAULT;
  556. goto umem_err;
  557. }
  558. num_pbes = ib_umem_page_count(mr->umem);
  559. status = ocrdma_get_pbl_info(mr, num_pbes);
  560. if (status)
  561. goto umem_err;
  562. mr->hwmr.pbe_size = mr->umem->page_size;
  563. mr->hwmr.fbo = mr->umem->offset;
  564. mr->hwmr.va = usr_addr;
  565. mr->hwmr.len = len;
  566. mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
  567. mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
  568. mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
  569. mr->hwmr.local_rd = 1;
  570. mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
  571. status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
  572. if (status)
  573. goto umem_err;
  574. build_user_pbes(dev, mr, num_pbes);
  575. status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
  576. if (status)
  577. goto mbx_err;
  578. mr->pd = pd;
  579. mr->ibmr.lkey = mr->hwmr.lkey;
  580. if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
  581. mr->ibmr.rkey = mr->hwmr.lkey;
  582. return &mr->ibmr;
  583. mbx_err:
  584. ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
  585. umem_err:
  586. kfree(mr);
  587. return ERR_PTR(status);
  588. }
  589. int ocrdma_dereg_mr(struct ib_mr *ib_mr)
  590. {
  591. struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
  592. struct ocrdma_dev *dev = mr->hwmr.dev;
  593. int status;
  594. status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
  595. if (mr->hwmr.fr_mr == 0)
  596. ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
  597. /* it could be user registered memory. */
  598. if (mr->umem)
  599. ib_umem_release(mr->umem);
  600. kfree(mr);
  601. return status;
  602. }
  603. static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
  604. struct ib_ucontext *ib_ctx)
  605. {
  606. int status;
  607. struct ocrdma_ucontext *uctx;
  608. struct ocrdma_create_cq_uresp uresp;
  609. uresp.cq_id = cq->id;
  610. uresp.page_size = cq->len;
  611. uresp.num_pages = 1;
  612. uresp.max_hw_cqe = cq->max_hw_cqe;
  613. uresp.page_addr[0] = cq->pa;
  614. uresp.db_page_addr = cq->dev->nic_info.unmapped_db;
  615. uresp.db_page_size = cq->dev->nic_info.db_page_size;
  616. uresp.phase_change = cq->phase_change ? 1 : 0;
  617. status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
  618. if (status) {
  619. pr_err("%s(%d) copy error cqid=0x%x.\n",
  620. __func__, cq->dev->id, cq->id);
  621. goto err;
  622. }
  623. uctx = get_ocrdma_ucontext(ib_ctx);
  624. status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
  625. if (status)
  626. goto err;
  627. status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
  628. if (status) {
  629. ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
  630. goto err;
  631. }
  632. cq->ucontext = uctx;
  633. err:
  634. return status;
  635. }
  636. struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
  637. struct ib_ucontext *ib_ctx,
  638. struct ib_udata *udata)
  639. {
  640. struct ocrdma_cq *cq;
  641. struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
  642. int status;
  643. struct ocrdma_create_cq_ureq ureq;
  644. if (udata) {
  645. if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
  646. return ERR_PTR(-EFAULT);
  647. } else
  648. ureq.dpp_cq = 0;
  649. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  650. if (!cq)
  651. return ERR_PTR(-ENOMEM);
  652. spin_lock_init(&cq->cq_lock);
  653. spin_lock_init(&cq->comp_handler_lock);
  654. INIT_LIST_HEAD(&cq->sq_head);
  655. INIT_LIST_HEAD(&cq->rq_head);
  656. cq->dev = dev;
  657. status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
  658. if (status) {
  659. kfree(cq);
  660. return ERR_PTR(status);
  661. }
  662. if (ib_ctx) {
  663. status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx);
  664. if (status)
  665. goto ctx_err;
  666. }
  667. cq->phase = OCRDMA_CQE_VALID;
  668. cq->arm_needed = true;
  669. dev->cq_tbl[cq->id] = cq;
  670. return &cq->ibcq;
  671. ctx_err:
  672. ocrdma_mbx_destroy_cq(dev, cq);
  673. kfree(cq);
  674. return ERR_PTR(status);
  675. }
  676. int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
  677. struct ib_udata *udata)
  678. {
  679. int status = 0;
  680. struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
  681. if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
  682. status = -EINVAL;
  683. return status;
  684. }
  685. ibcq->cqe = new_cnt;
  686. return status;
  687. }
  688. int ocrdma_destroy_cq(struct ib_cq *ibcq)
  689. {
  690. int status;
  691. struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
  692. struct ocrdma_dev *dev = cq->dev;
  693. status = ocrdma_mbx_destroy_cq(dev, cq);
  694. if (cq->ucontext) {
  695. ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len);
  696. ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
  697. dev->nic_info.db_page_size);
  698. }
  699. dev->cq_tbl[cq->id] = NULL;
  700. kfree(cq);
  701. return status;
  702. }
  703. static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
  704. {
  705. int status = -EINVAL;
  706. if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
  707. dev->qp_tbl[qp->id] = qp;
  708. status = 0;
  709. }
  710. return status;
  711. }
  712. static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
  713. {
  714. dev->qp_tbl[qp->id] = NULL;
  715. }
  716. static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
  717. struct ib_qp_init_attr *attrs)
  718. {
  719. if (attrs->qp_type != IB_QPT_GSI &&
  720. attrs->qp_type != IB_QPT_RC &&
  721. attrs->qp_type != IB_QPT_UD) {
  722. pr_err("%s(%d) unsupported qp type=0x%x requested\n",
  723. __func__, dev->id, attrs->qp_type);
  724. return -EINVAL;
  725. }
  726. if (attrs->cap.max_send_wr > dev->attr.max_wqe) {
  727. pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
  728. __func__, dev->id, attrs->cap.max_send_wr);
  729. pr_err("%s(%d) supported send_wr=0x%x\n",
  730. __func__, dev->id, dev->attr.max_wqe);
  731. return -EINVAL;
  732. }
  733. if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
  734. pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
  735. __func__, dev->id, attrs->cap.max_recv_wr);
  736. pr_err("%s(%d) supported recv_wr=0x%x\n",
  737. __func__, dev->id, dev->attr.max_rqe);
  738. return -EINVAL;
  739. }
  740. if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
  741. pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
  742. __func__, dev->id, attrs->cap.max_inline_data);
  743. pr_err("%s(%d) supported inline data size=0x%x\n",
  744. __func__, dev->id, dev->attr.max_inline_data);
  745. return -EINVAL;
  746. }
  747. if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
  748. pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
  749. __func__, dev->id, attrs->cap.max_send_sge);
  750. pr_err("%s(%d) supported send_sge=0x%x\n",
  751. __func__, dev->id, dev->attr.max_send_sge);
  752. return -EINVAL;
  753. }
  754. if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
  755. pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
  756. __func__, dev->id, attrs->cap.max_recv_sge);
  757. pr_err("%s(%d) supported recv_sge=0x%x\n",
  758. __func__, dev->id, dev->attr.max_recv_sge);
  759. return -EINVAL;
  760. }
  761. /* unprivileged user space cannot create special QP */
  762. if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
  763. pr_err
  764. ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
  765. __func__, dev->id, attrs->qp_type);
  766. return -EINVAL;
  767. }
  768. /* allow creating only one GSI type of QP */
  769. if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
  770. pr_err("%s(%d) GSI special QPs already created.\n",
  771. __func__, dev->id);
  772. return -EINVAL;
  773. }
  774. /* verify consumer QPs are not trying to use GSI QP's CQ */
  775. if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
  776. if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
  777. (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) ||
  778. (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
  779. (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
  780. pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
  781. __func__, dev->id);
  782. return -EINVAL;
  783. }
  784. }
  785. return 0;
  786. }
  787. static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
  788. struct ib_udata *udata, int dpp_offset,
  789. int dpp_credit_lmt, int srq)
  790. {
  791. int status = 0;
  792. u64 usr_db;
  793. struct ocrdma_create_qp_uresp uresp;
  794. struct ocrdma_dev *dev = qp->dev;
  795. struct ocrdma_pd *pd = qp->pd;
  796. memset(&uresp, 0, sizeof(uresp));
  797. usr_db = dev->nic_info.unmapped_db +
  798. (pd->id * dev->nic_info.db_page_size);
  799. uresp.qp_id = qp->id;
  800. uresp.sq_dbid = qp->sq.dbid;
  801. uresp.num_sq_pages = 1;
  802. uresp.sq_page_size = qp->sq.len;
  803. uresp.sq_page_addr[0] = qp->sq.pa;
  804. uresp.num_wqe_allocated = qp->sq.max_cnt;
  805. if (!srq) {
  806. uresp.rq_dbid = qp->rq.dbid;
  807. uresp.num_rq_pages = 1;
  808. uresp.rq_page_size = qp->rq.len;
  809. uresp.rq_page_addr[0] = qp->rq.pa;
  810. uresp.num_rqe_allocated = qp->rq.max_cnt;
  811. }
  812. uresp.db_page_addr = usr_db;
  813. uresp.db_page_size = dev->nic_info.db_page_size;
  814. if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
  815. uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
  816. uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ?
  817. OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET;
  818. uresp.db_shift = (qp->id < 128) ? 24 : 16;
  819. } else {
  820. uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
  821. uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
  822. uresp.db_shift = 16;
  823. }
  824. if (qp->dpp_enabled) {
  825. uresp.dpp_credit = dpp_credit_lmt;
  826. uresp.dpp_offset = dpp_offset;
  827. }
  828. status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
  829. if (status) {
  830. pr_err("%s(%d) user copy error.\n", __func__, dev->id);
  831. goto err;
  832. }
  833. status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
  834. uresp.sq_page_size);
  835. if (status)
  836. goto err;
  837. if (!srq) {
  838. status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
  839. uresp.rq_page_size);
  840. if (status)
  841. goto rq_map_err;
  842. }
  843. return status;
  844. rq_map_err:
  845. ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
  846. err:
  847. return status;
  848. }
  849. static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
  850. struct ocrdma_pd *pd)
  851. {
  852. if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
  853. qp->sq_db = dev->nic_info.db +
  854. (pd->id * dev->nic_info.db_page_size) +
  855. OCRDMA_DB_GEN2_SQ_OFFSET;
  856. qp->rq_db = dev->nic_info.db +
  857. (pd->id * dev->nic_info.db_page_size) +
  858. ((qp->id < 128) ?
  859. OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET);
  860. } else {
  861. qp->sq_db = dev->nic_info.db +
  862. (pd->id * dev->nic_info.db_page_size) +
  863. OCRDMA_DB_SQ_OFFSET;
  864. qp->rq_db = dev->nic_info.db +
  865. (pd->id * dev->nic_info.db_page_size) +
  866. OCRDMA_DB_RQ_OFFSET;
  867. }
  868. }
  869. static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
  870. {
  871. qp->wqe_wr_id_tbl =
  872. kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
  873. GFP_KERNEL);
  874. if (qp->wqe_wr_id_tbl == NULL)
  875. return -ENOMEM;
  876. qp->rqe_wr_id_tbl =
  877. kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
  878. if (qp->rqe_wr_id_tbl == NULL)
  879. return -ENOMEM;
  880. return 0;
  881. }
  882. static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
  883. struct ocrdma_pd *pd,
  884. struct ib_qp_init_attr *attrs)
  885. {
  886. qp->pd = pd;
  887. spin_lock_init(&qp->q_lock);
  888. INIT_LIST_HEAD(&qp->sq_entry);
  889. INIT_LIST_HEAD(&qp->rq_entry);
  890. qp->qp_type = attrs->qp_type;
  891. qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
  892. qp->max_inline_data = attrs->cap.max_inline_data;
  893. qp->sq.max_sges = attrs->cap.max_send_sge;
  894. qp->rq.max_sges = attrs->cap.max_recv_sge;
  895. qp->state = OCRDMA_QPS_RST;
  896. }
  897. static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
  898. struct ib_qp_init_attr *attrs)
  899. {
  900. if (attrs->qp_type == IB_QPT_GSI) {
  901. dev->gsi_qp_created = 1;
  902. dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
  903. dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
  904. }
  905. }
  906. struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
  907. struct ib_qp_init_attr *attrs,
  908. struct ib_udata *udata)
  909. {
  910. int status;
  911. struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
  912. struct ocrdma_qp *qp;
  913. struct ocrdma_dev *dev = pd->dev;
  914. struct ocrdma_create_qp_ureq ureq;
  915. u16 dpp_credit_lmt, dpp_offset;
  916. status = ocrdma_check_qp_params(ibpd, dev, attrs);
  917. if (status)
  918. goto gen_err;
  919. memset(&ureq, 0, sizeof(ureq));
  920. if (udata) {
  921. if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
  922. return ERR_PTR(-EFAULT);
  923. }
  924. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  925. if (!qp) {
  926. status = -ENOMEM;
  927. goto gen_err;
  928. }
  929. qp->dev = dev;
  930. ocrdma_set_qp_init_params(qp, pd, attrs);
  931. mutex_lock(&dev->dev_lock);
  932. status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
  933. ureq.dpp_cq_id,
  934. &dpp_offset, &dpp_credit_lmt);
  935. if (status)
  936. goto mbx_err;
  937. /* user space QP's wr_id table are managed in library */
  938. if (udata == NULL) {
  939. qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
  940. OCRDMA_QP_FAST_REG);
  941. status = ocrdma_alloc_wr_id_tbl(qp);
  942. if (status)
  943. goto map_err;
  944. }
  945. status = ocrdma_add_qpn_map(dev, qp);
  946. if (status)
  947. goto map_err;
  948. ocrdma_set_qp_db(dev, qp, pd);
  949. if (udata) {
  950. status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
  951. dpp_credit_lmt,
  952. (attrs->srq != NULL));
  953. if (status)
  954. goto cpy_err;
  955. }
  956. ocrdma_store_gsi_qp_cq(dev, attrs);
  957. qp->ibqp.qp_num = qp->id;
  958. mutex_unlock(&dev->dev_lock);
  959. return &qp->ibqp;
  960. cpy_err:
  961. ocrdma_del_qpn_map(dev, qp);
  962. map_err:
  963. ocrdma_mbx_destroy_qp(dev, qp);
  964. mbx_err:
  965. mutex_unlock(&dev->dev_lock);
  966. kfree(qp->wqe_wr_id_tbl);
  967. kfree(qp->rqe_wr_id_tbl);
  968. kfree(qp);
  969. pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
  970. gen_err:
  971. return ERR_PTR(status);
  972. }
  973. int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  974. int attr_mask)
  975. {
  976. int status = 0;
  977. struct ocrdma_qp *qp;
  978. struct ocrdma_dev *dev;
  979. enum ib_qp_state old_qps;
  980. qp = get_ocrdma_qp(ibqp);
  981. dev = qp->dev;
  982. if (attr_mask & IB_QP_STATE)
  983. status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps);
  984. /* if new and previous states are same hw doesn't need to
  985. * know about it.
  986. */
  987. if (status < 0)
  988. return status;
  989. status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
  990. return status;
  991. }
  992. int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  993. int attr_mask, struct ib_udata *udata)
  994. {
  995. unsigned long flags;
  996. int status = -EINVAL;
  997. struct ocrdma_qp *qp;
  998. struct ocrdma_dev *dev;
  999. enum ib_qp_state old_qps, new_qps;
  1000. qp = get_ocrdma_qp(ibqp);
  1001. dev = qp->dev;
  1002. /* syncronize with multiple context trying to change, retrive qps */
  1003. mutex_lock(&dev->dev_lock);
  1004. /* syncronize with wqe, rqe posting and cqe processing contexts */
  1005. spin_lock_irqsave(&qp->q_lock, flags);
  1006. old_qps = get_ibqp_state(qp->state);
  1007. if (attr_mask & IB_QP_STATE)
  1008. new_qps = attr->qp_state;
  1009. else
  1010. new_qps = old_qps;
  1011. spin_unlock_irqrestore(&qp->q_lock, flags);
  1012. if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
  1013. pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
  1014. "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
  1015. __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
  1016. old_qps, new_qps);
  1017. goto param_err;
  1018. }
  1019. status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
  1020. if (status > 0)
  1021. status = 0;
  1022. param_err:
  1023. mutex_unlock(&dev->dev_lock);
  1024. return status;
  1025. }
  1026. static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
  1027. {
  1028. switch (mtu) {
  1029. case 256:
  1030. return IB_MTU_256;
  1031. case 512:
  1032. return IB_MTU_512;
  1033. case 1024:
  1034. return IB_MTU_1024;
  1035. case 2048:
  1036. return IB_MTU_2048;
  1037. case 4096:
  1038. return IB_MTU_4096;
  1039. default:
  1040. return IB_MTU_1024;
  1041. }
  1042. }
  1043. static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
  1044. {
  1045. int ib_qp_acc_flags = 0;
  1046. if (qp_cap_flags & OCRDMA_QP_INB_WR)
  1047. ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
  1048. if (qp_cap_flags & OCRDMA_QP_INB_RD)
  1049. ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
  1050. return ib_qp_acc_flags;
  1051. }
  1052. int ocrdma_query_qp(struct ib_qp *ibqp,
  1053. struct ib_qp_attr *qp_attr,
  1054. int attr_mask, struct ib_qp_init_attr *qp_init_attr)
  1055. {
  1056. int status;
  1057. u32 qp_state;
  1058. struct ocrdma_qp_params params;
  1059. struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
  1060. struct ocrdma_dev *dev = qp->dev;
  1061. memset(&params, 0, sizeof(params));
  1062. mutex_lock(&dev->dev_lock);
  1063. status = ocrdma_mbx_query_qp(dev, qp, &params);
  1064. mutex_unlock(&dev->dev_lock);
  1065. if (status)
  1066. goto mbx_err;
  1067. qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
  1068. qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
  1069. qp_attr->path_mtu =
  1070. ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
  1071. OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
  1072. OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
  1073. qp_attr->path_mig_state = IB_MIG_MIGRATED;
  1074. qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
  1075. qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
  1076. qp_attr->dest_qp_num =
  1077. params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
  1078. qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
  1079. qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
  1080. qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
  1081. qp_attr->cap.max_send_sge = qp->sq.max_sges;
  1082. qp_attr->cap.max_recv_sge = qp->rq.max_sges;
  1083. qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
  1084. qp_init_attr->cap = qp_attr->cap;
  1085. memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
  1086. sizeof(params.dgid));
  1087. qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
  1088. OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
  1089. qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
  1090. qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
  1091. OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
  1092. OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
  1093. qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
  1094. OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
  1095. OCRDMA_QP_PARAMS_TCLASS_SHIFT;
  1096. qp_attr->ah_attr.ah_flags = IB_AH_GRH;
  1097. qp_attr->ah_attr.port_num = 1;
  1098. qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
  1099. OCRDMA_QP_PARAMS_SL_MASK) >>
  1100. OCRDMA_QP_PARAMS_SL_SHIFT;
  1101. qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
  1102. OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
  1103. OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
  1104. qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
  1105. OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
  1106. OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
  1107. qp_attr->retry_cnt =
  1108. (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
  1109. OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
  1110. qp_attr->min_rnr_timer = 0;
  1111. qp_attr->pkey_index = 0;
  1112. qp_attr->port_num = 1;
  1113. qp_attr->ah_attr.src_path_bits = 0;
  1114. qp_attr->ah_attr.static_rate = 0;
  1115. qp_attr->alt_pkey_index = 0;
  1116. qp_attr->alt_port_num = 0;
  1117. qp_attr->alt_timeout = 0;
  1118. memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
  1119. qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
  1120. OCRDMA_QP_PARAMS_STATE_SHIFT;
  1121. qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
  1122. qp_attr->max_dest_rd_atomic =
  1123. params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
  1124. qp_attr->max_rd_atomic =
  1125. params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
  1126. qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
  1127. OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
  1128. mbx_err:
  1129. return status;
  1130. }
  1131. static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
  1132. {
  1133. int i = idx / 32;
  1134. unsigned int mask = (1 << (idx % 32));
  1135. if (srq->idx_bit_fields[i] & mask)
  1136. srq->idx_bit_fields[i] &= ~mask;
  1137. else
  1138. srq->idx_bit_fields[i] |= mask;
  1139. }
  1140. static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
  1141. {
  1142. int free_cnt;
  1143. if (q->head >= q->tail)
  1144. free_cnt = (q->max_cnt - q->head) + q->tail;
  1145. else
  1146. free_cnt = q->tail - q->head;
  1147. return free_cnt;
  1148. }
  1149. static int is_hw_sq_empty(struct ocrdma_qp *qp)
  1150. {
  1151. return (qp->sq.tail == qp->sq.head &&
  1152. ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0);
  1153. }
  1154. static int is_hw_rq_empty(struct ocrdma_qp *qp)
  1155. {
  1156. return (qp->rq.tail == qp->rq.head) ? 1 : 0;
  1157. }
  1158. static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
  1159. {
  1160. return q->va + (q->head * q->entry_size);
  1161. }
  1162. static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
  1163. u32 idx)
  1164. {
  1165. return q->va + (idx * q->entry_size);
  1166. }
  1167. static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
  1168. {
  1169. q->head = (q->head + 1) & q->max_wqe_idx;
  1170. }
  1171. static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
  1172. {
  1173. q->tail = (q->tail + 1) & q->max_wqe_idx;
  1174. }
  1175. /* discard the cqe for a given QP */
  1176. static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
  1177. {
  1178. unsigned long cq_flags;
  1179. unsigned long flags;
  1180. int discard_cnt = 0;
  1181. u32 cur_getp, stop_getp;
  1182. struct ocrdma_cqe *cqe;
  1183. u32 qpn = 0;
  1184. spin_lock_irqsave(&cq->cq_lock, cq_flags);
  1185. /* traverse through the CQEs in the hw CQ,
  1186. * find the matching CQE for a given qp,
  1187. * mark the matching one discarded by clearing qpn.
  1188. * ring the doorbell in the poll_cq() as
  1189. * we don't complete out of order cqe.
  1190. */
  1191. cur_getp = cq->getp;
  1192. /* find upto when do we reap the cq. */
  1193. stop_getp = cur_getp;
  1194. do {
  1195. if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
  1196. break;
  1197. cqe = cq->va + cur_getp;
  1198. /* if (a) done reaping whole hw cq, or
  1199. * (b) qp_xq becomes empty.
  1200. * then exit
  1201. */
  1202. qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
  1203. /* if previously discarded cqe found, skip that too. */
  1204. /* check for matching qp */
  1205. if (qpn == 0 || qpn != qp->id)
  1206. goto skip_cqe;
  1207. /* mark cqe discarded so that it is not picked up later
  1208. * in the poll_cq().
  1209. */
  1210. discard_cnt += 1;
  1211. cqe->cmn.qpn = 0;
  1212. if (is_cqe_for_sq(cqe))
  1213. ocrdma_hwq_inc_tail(&qp->sq);
  1214. else {
  1215. if (qp->srq) {
  1216. spin_lock_irqsave(&qp->srq->q_lock, flags);
  1217. ocrdma_hwq_inc_tail(&qp->srq->rq);
  1218. ocrdma_srq_toggle_bit(qp->srq, cur_getp);
  1219. spin_unlock_irqrestore(&qp->srq->q_lock, flags);
  1220. } else
  1221. ocrdma_hwq_inc_tail(&qp->rq);
  1222. }
  1223. skip_cqe:
  1224. cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
  1225. } while (cur_getp != stop_getp);
  1226. spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
  1227. }
  1228. static void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
  1229. {
  1230. int found = false;
  1231. unsigned long flags;
  1232. struct ocrdma_dev *dev = qp->dev;
  1233. /* sync with any active CQ poll */
  1234. spin_lock_irqsave(&dev->flush_q_lock, flags);
  1235. found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
  1236. if (found)
  1237. list_del(&qp->sq_entry);
  1238. if (!qp->srq) {
  1239. found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
  1240. if (found)
  1241. list_del(&qp->rq_entry);
  1242. }
  1243. spin_unlock_irqrestore(&dev->flush_q_lock, flags);
  1244. }
  1245. int ocrdma_destroy_qp(struct ib_qp *ibqp)
  1246. {
  1247. int status;
  1248. struct ocrdma_pd *pd;
  1249. struct ocrdma_qp *qp;
  1250. struct ocrdma_dev *dev;
  1251. struct ib_qp_attr attrs;
  1252. int attr_mask = IB_QP_STATE;
  1253. unsigned long flags;
  1254. qp = get_ocrdma_qp(ibqp);
  1255. dev = qp->dev;
  1256. attrs.qp_state = IB_QPS_ERR;
  1257. pd = qp->pd;
  1258. /* change the QP state to ERROR */
  1259. _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
  1260. /* ensure that CQEs for newly created QP (whose id may be same with
  1261. * one which just getting destroyed are same), dont get
  1262. * discarded until the old CQEs are discarded.
  1263. */
  1264. mutex_lock(&dev->dev_lock);
  1265. status = ocrdma_mbx_destroy_qp(dev, qp);
  1266. /*
  1267. * acquire CQ lock while destroy is in progress, in order to
  1268. * protect against proessing in-flight CQEs for this QP.
  1269. */
  1270. spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
  1271. if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
  1272. spin_lock(&qp->rq_cq->cq_lock);
  1273. ocrdma_del_qpn_map(dev, qp);
  1274. if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
  1275. spin_unlock(&qp->rq_cq->cq_lock);
  1276. spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
  1277. if (!pd->uctx) {
  1278. ocrdma_discard_cqes(qp, qp->sq_cq);
  1279. ocrdma_discard_cqes(qp, qp->rq_cq);
  1280. }
  1281. mutex_unlock(&dev->dev_lock);
  1282. if (pd->uctx) {
  1283. ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len);
  1284. if (!qp->srq)
  1285. ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len);
  1286. }
  1287. ocrdma_del_flush_qp(qp);
  1288. kfree(qp->wqe_wr_id_tbl);
  1289. kfree(qp->rqe_wr_id_tbl);
  1290. kfree(qp);
  1291. return status;
  1292. }
  1293. static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
  1294. {
  1295. int status;
  1296. struct ocrdma_create_srq_uresp uresp;
  1297. uresp.rq_dbid = srq->rq.dbid;
  1298. uresp.num_rq_pages = 1;
  1299. uresp.rq_page_addr[0] = srq->rq.pa;
  1300. uresp.rq_page_size = srq->rq.len;
  1301. uresp.db_page_addr = srq->dev->nic_info.unmapped_db +
  1302. (srq->pd->id * srq->dev->nic_info.db_page_size);
  1303. uresp.db_page_size = srq->dev->nic_info.db_page_size;
  1304. uresp.num_rqe_allocated = srq->rq.max_cnt;
  1305. if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
  1306. uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
  1307. uresp.db_shift = 24;
  1308. } else {
  1309. uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
  1310. uresp.db_shift = 16;
  1311. }
  1312. status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
  1313. if (status)
  1314. return status;
  1315. status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
  1316. uresp.rq_page_size);
  1317. if (status)
  1318. return status;
  1319. return status;
  1320. }
  1321. struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
  1322. struct ib_srq_init_attr *init_attr,
  1323. struct ib_udata *udata)
  1324. {
  1325. int status = -ENOMEM;
  1326. struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
  1327. struct ocrdma_dev *dev = pd->dev;
  1328. struct ocrdma_srq *srq;
  1329. if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
  1330. return ERR_PTR(-EINVAL);
  1331. if (init_attr->attr.max_wr > dev->attr.max_rqe)
  1332. return ERR_PTR(-EINVAL);
  1333. srq = kzalloc(sizeof(*srq), GFP_KERNEL);
  1334. if (!srq)
  1335. return ERR_PTR(status);
  1336. spin_lock_init(&srq->q_lock);
  1337. srq->dev = dev;
  1338. srq->pd = pd;
  1339. srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
  1340. status = ocrdma_mbx_create_srq(srq, init_attr, pd);
  1341. if (status)
  1342. goto err;
  1343. if (udata == NULL) {
  1344. srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
  1345. GFP_KERNEL);
  1346. if (srq->rqe_wr_id_tbl == NULL)
  1347. goto arm_err;
  1348. srq->bit_fields_len = (srq->rq.max_cnt / 32) +
  1349. (srq->rq.max_cnt % 32 ? 1 : 0);
  1350. srq->idx_bit_fields =
  1351. kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
  1352. if (srq->idx_bit_fields == NULL)
  1353. goto arm_err;
  1354. memset(srq->idx_bit_fields, 0xff,
  1355. srq->bit_fields_len * sizeof(u32));
  1356. }
  1357. if (init_attr->attr.srq_limit) {
  1358. status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
  1359. if (status)
  1360. goto arm_err;
  1361. }
  1362. if (udata) {
  1363. status = ocrdma_copy_srq_uresp(srq, udata);
  1364. if (status)
  1365. goto arm_err;
  1366. }
  1367. return &srq->ibsrq;
  1368. arm_err:
  1369. ocrdma_mbx_destroy_srq(dev, srq);
  1370. err:
  1371. kfree(srq->rqe_wr_id_tbl);
  1372. kfree(srq->idx_bit_fields);
  1373. kfree(srq);
  1374. return ERR_PTR(status);
  1375. }
  1376. int ocrdma_modify_srq(struct ib_srq *ibsrq,
  1377. struct ib_srq_attr *srq_attr,
  1378. enum ib_srq_attr_mask srq_attr_mask,
  1379. struct ib_udata *udata)
  1380. {
  1381. int status = 0;
  1382. struct ocrdma_srq *srq;
  1383. srq = get_ocrdma_srq(ibsrq);
  1384. if (srq_attr_mask & IB_SRQ_MAX_WR)
  1385. status = -EINVAL;
  1386. else
  1387. status = ocrdma_mbx_modify_srq(srq, srq_attr);
  1388. return status;
  1389. }
  1390. int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
  1391. {
  1392. int status;
  1393. struct ocrdma_srq *srq;
  1394. srq = get_ocrdma_srq(ibsrq);
  1395. status = ocrdma_mbx_query_srq(srq, srq_attr);
  1396. return status;
  1397. }
  1398. int ocrdma_destroy_srq(struct ib_srq *ibsrq)
  1399. {
  1400. int status;
  1401. struct ocrdma_srq *srq;
  1402. struct ocrdma_dev *dev;
  1403. srq = get_ocrdma_srq(ibsrq);
  1404. dev = srq->dev;
  1405. status = ocrdma_mbx_destroy_srq(dev, srq);
  1406. if (srq->pd->uctx)
  1407. ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len);
  1408. kfree(srq->idx_bit_fields);
  1409. kfree(srq->rqe_wr_id_tbl);
  1410. kfree(srq);
  1411. return status;
  1412. }
  1413. /* unprivileged verbs and their support functions. */
  1414. static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
  1415. struct ocrdma_hdr_wqe *hdr,
  1416. struct ib_send_wr *wr)
  1417. {
  1418. struct ocrdma_ewqe_ud_hdr *ud_hdr =
  1419. (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
  1420. struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
  1421. ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
  1422. if (qp->qp_type == IB_QPT_GSI)
  1423. ud_hdr->qkey = qp->qkey;
  1424. else
  1425. ud_hdr->qkey = wr->wr.ud.remote_qkey;
  1426. ud_hdr->rsvd_ahid = ah->id;
  1427. }
  1428. static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
  1429. struct ocrdma_sge *sge, int num_sge,
  1430. struct ib_sge *sg_list)
  1431. {
  1432. int i;
  1433. for (i = 0; i < num_sge; i++) {
  1434. sge[i].lrkey = sg_list[i].lkey;
  1435. sge[i].addr_lo = sg_list[i].addr;
  1436. sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
  1437. sge[i].len = sg_list[i].length;
  1438. hdr->total_len += sg_list[i].length;
  1439. }
  1440. if (num_sge == 0)
  1441. memset(sge, 0, sizeof(*sge));
  1442. }
  1443. static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
  1444. struct ocrdma_hdr_wqe *hdr,
  1445. struct ocrdma_sge *sge,
  1446. struct ib_send_wr *wr, u32 wqe_size)
  1447. {
  1448. if (wr->send_flags & IB_SEND_INLINE) {
  1449. if (wr->sg_list[0].length > qp->max_inline_data) {
  1450. pr_err("%s() supported_len=0x%x,\n"
  1451. " unspported len req=0x%x\n", __func__,
  1452. qp->max_inline_data, wr->sg_list[0].length);
  1453. return -EINVAL;
  1454. }
  1455. memcpy(sge,
  1456. (void *)(unsigned long)wr->sg_list[0].addr,
  1457. wr->sg_list[0].length);
  1458. hdr->total_len = wr->sg_list[0].length;
  1459. wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
  1460. hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
  1461. } else {
  1462. ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
  1463. if (wr->num_sge)
  1464. wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
  1465. else
  1466. wqe_size += sizeof(struct ocrdma_sge);
  1467. hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
  1468. }
  1469. hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
  1470. return 0;
  1471. }
  1472. static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
  1473. struct ib_send_wr *wr)
  1474. {
  1475. int status;
  1476. struct ocrdma_sge *sge;
  1477. u32 wqe_size = sizeof(*hdr);
  1478. if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
  1479. ocrdma_build_ud_hdr(qp, hdr, wr);
  1480. sge = (struct ocrdma_sge *)(hdr + 2);
  1481. wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
  1482. } else
  1483. sge = (struct ocrdma_sge *)(hdr + 1);
  1484. status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
  1485. return status;
  1486. }
  1487. static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
  1488. struct ib_send_wr *wr)
  1489. {
  1490. int status;
  1491. struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
  1492. struct ocrdma_sge *sge = ext_rw + 1;
  1493. u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
  1494. status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
  1495. if (status)
  1496. return status;
  1497. ext_rw->addr_lo = wr->wr.rdma.remote_addr;
  1498. ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
  1499. ext_rw->lrkey = wr->wr.rdma.rkey;
  1500. ext_rw->len = hdr->total_len;
  1501. return 0;
  1502. }
  1503. static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
  1504. struct ib_send_wr *wr)
  1505. {
  1506. struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
  1507. struct ocrdma_sge *sge = ext_rw + 1;
  1508. u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
  1509. sizeof(struct ocrdma_hdr_wqe);
  1510. ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
  1511. hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
  1512. hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
  1513. hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
  1514. ext_rw->addr_lo = wr->wr.rdma.remote_addr;
  1515. ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
  1516. ext_rw->lrkey = wr->wr.rdma.rkey;
  1517. ext_rw->len = hdr->total_len;
  1518. }
  1519. static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
  1520. {
  1521. u32 val = qp->sq.dbid | (1 << 16);
  1522. iowrite32(val, qp->sq_db);
  1523. }
  1524. int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  1525. struct ib_send_wr **bad_wr)
  1526. {
  1527. int status = 0;
  1528. struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
  1529. struct ocrdma_hdr_wqe *hdr;
  1530. unsigned long flags;
  1531. spin_lock_irqsave(&qp->q_lock, flags);
  1532. if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
  1533. spin_unlock_irqrestore(&qp->q_lock, flags);
  1534. return -EINVAL;
  1535. }
  1536. while (wr) {
  1537. if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
  1538. wr->num_sge > qp->sq.max_sges) {
  1539. status = -ENOMEM;
  1540. break;
  1541. }
  1542. hdr = ocrdma_hwq_head(&qp->sq);
  1543. hdr->cw = 0;
  1544. if (wr->send_flags & IB_SEND_SIGNALED)
  1545. hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
  1546. if (wr->send_flags & IB_SEND_FENCE)
  1547. hdr->cw |=
  1548. (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
  1549. if (wr->send_flags & IB_SEND_SOLICITED)
  1550. hdr->cw |=
  1551. (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
  1552. hdr->total_len = 0;
  1553. switch (wr->opcode) {
  1554. case IB_WR_SEND_WITH_IMM:
  1555. hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
  1556. hdr->immdt = ntohl(wr->ex.imm_data);
  1557. case IB_WR_SEND:
  1558. hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
  1559. ocrdma_build_send(qp, hdr, wr);
  1560. break;
  1561. case IB_WR_SEND_WITH_INV:
  1562. hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
  1563. hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
  1564. hdr->lkey = wr->ex.invalidate_rkey;
  1565. status = ocrdma_build_send(qp, hdr, wr);
  1566. break;
  1567. case IB_WR_RDMA_WRITE_WITH_IMM:
  1568. hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
  1569. hdr->immdt = ntohl(wr->ex.imm_data);
  1570. case IB_WR_RDMA_WRITE:
  1571. hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
  1572. status = ocrdma_build_write(qp, hdr, wr);
  1573. break;
  1574. case IB_WR_RDMA_READ_WITH_INV:
  1575. hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
  1576. case IB_WR_RDMA_READ:
  1577. ocrdma_build_read(qp, hdr, wr);
  1578. break;
  1579. case IB_WR_LOCAL_INV:
  1580. hdr->cw |=
  1581. (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
  1582. hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) /
  1583. OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
  1584. hdr->lkey = wr->ex.invalidate_rkey;
  1585. break;
  1586. default:
  1587. status = -EINVAL;
  1588. break;
  1589. }
  1590. if (status) {
  1591. *bad_wr = wr;
  1592. break;
  1593. }
  1594. if (wr->send_flags & IB_SEND_SIGNALED)
  1595. qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
  1596. else
  1597. qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
  1598. qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
  1599. ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
  1600. OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
  1601. /* make sure wqe is written before adapter can access it */
  1602. wmb();
  1603. /* inform hw to start processing it */
  1604. ocrdma_ring_sq_db(qp);
  1605. /* update pointer, counter for next wr */
  1606. ocrdma_hwq_inc_head(&qp->sq);
  1607. wr = wr->next;
  1608. }
  1609. spin_unlock_irqrestore(&qp->q_lock, flags);
  1610. return status;
  1611. }
  1612. static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
  1613. {
  1614. u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp));
  1615. iowrite32(val, qp->rq_db);
  1616. }
  1617. static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
  1618. u16 tag)
  1619. {
  1620. u32 wqe_size = 0;
  1621. struct ocrdma_sge *sge;
  1622. if (wr->num_sge)
  1623. wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
  1624. else
  1625. wqe_size = sizeof(*sge) + sizeof(*rqe);
  1626. rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
  1627. OCRDMA_WQE_SIZE_SHIFT);
  1628. rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
  1629. rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
  1630. rqe->total_len = 0;
  1631. rqe->rsvd_tag = tag;
  1632. sge = (struct ocrdma_sge *)(rqe + 1);
  1633. ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
  1634. ocrdma_cpu_to_le32(rqe, wqe_size);
  1635. }
  1636. int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  1637. struct ib_recv_wr **bad_wr)
  1638. {
  1639. int status = 0;
  1640. unsigned long flags;
  1641. struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
  1642. struct ocrdma_hdr_wqe *rqe;
  1643. spin_lock_irqsave(&qp->q_lock, flags);
  1644. if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
  1645. spin_unlock_irqrestore(&qp->q_lock, flags);
  1646. *bad_wr = wr;
  1647. return -EINVAL;
  1648. }
  1649. while (wr) {
  1650. if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
  1651. wr->num_sge > qp->rq.max_sges) {
  1652. *bad_wr = wr;
  1653. status = -ENOMEM;
  1654. break;
  1655. }
  1656. rqe = ocrdma_hwq_head(&qp->rq);
  1657. ocrdma_build_rqe(rqe, wr, 0);
  1658. qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
  1659. /* make sure rqe is written before adapter can access it */
  1660. wmb();
  1661. /* inform hw to start processing it */
  1662. ocrdma_ring_rq_db(qp);
  1663. /* update pointer, counter for next wr */
  1664. ocrdma_hwq_inc_head(&qp->rq);
  1665. wr = wr->next;
  1666. }
  1667. spin_unlock_irqrestore(&qp->q_lock, flags);
  1668. return status;
  1669. }
  1670. /* cqe for srq's rqe can potentially arrive out of order.
  1671. * index gives the entry in the shadow table where to store
  1672. * the wr_id. tag/index is returned in cqe to reference back
  1673. * for a given rqe.
  1674. */
  1675. static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
  1676. {
  1677. int row = 0;
  1678. int indx = 0;
  1679. for (row = 0; row < srq->bit_fields_len; row++) {
  1680. if (srq->idx_bit_fields[row]) {
  1681. indx = ffs(srq->idx_bit_fields[row]);
  1682. indx = (row * 32) + (indx - 1);
  1683. if (indx >= srq->rq.max_cnt)
  1684. BUG();
  1685. ocrdma_srq_toggle_bit(srq, indx);
  1686. break;
  1687. }
  1688. }
  1689. if (row == srq->bit_fields_len)
  1690. BUG();
  1691. return indx;
  1692. }
  1693. static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
  1694. {
  1695. u32 val = srq->rq.dbid | (1 << 16);
  1696. iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
  1697. }
  1698. int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  1699. struct ib_recv_wr **bad_wr)
  1700. {
  1701. int status = 0;
  1702. unsigned long flags;
  1703. struct ocrdma_srq *srq;
  1704. struct ocrdma_hdr_wqe *rqe;
  1705. u16 tag;
  1706. srq = get_ocrdma_srq(ibsrq);
  1707. spin_lock_irqsave(&srq->q_lock, flags);
  1708. while (wr) {
  1709. if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
  1710. wr->num_sge > srq->rq.max_sges) {
  1711. status = -ENOMEM;
  1712. *bad_wr = wr;
  1713. break;
  1714. }
  1715. tag = ocrdma_srq_get_idx(srq);
  1716. rqe = ocrdma_hwq_head(&srq->rq);
  1717. ocrdma_build_rqe(rqe, wr, tag);
  1718. srq->rqe_wr_id_tbl[tag] = wr->wr_id;
  1719. /* make sure rqe is written before adapter can perform DMA */
  1720. wmb();
  1721. /* inform hw to start processing it */
  1722. ocrdma_ring_srq_db(srq);
  1723. /* update pointer, counter for next wr */
  1724. ocrdma_hwq_inc_head(&srq->rq);
  1725. wr = wr->next;
  1726. }
  1727. spin_unlock_irqrestore(&srq->q_lock, flags);
  1728. return status;
  1729. }
  1730. static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
  1731. {
  1732. enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR;
  1733. switch (status) {
  1734. case OCRDMA_CQE_GENERAL_ERR:
  1735. ibwc_status = IB_WC_GENERAL_ERR;
  1736. break;
  1737. case OCRDMA_CQE_LOC_LEN_ERR:
  1738. ibwc_status = IB_WC_LOC_LEN_ERR;
  1739. break;
  1740. case OCRDMA_CQE_LOC_QP_OP_ERR:
  1741. ibwc_status = IB_WC_LOC_QP_OP_ERR;
  1742. break;
  1743. case OCRDMA_CQE_LOC_EEC_OP_ERR:
  1744. ibwc_status = IB_WC_LOC_EEC_OP_ERR;
  1745. break;
  1746. case OCRDMA_CQE_LOC_PROT_ERR:
  1747. ibwc_status = IB_WC_LOC_PROT_ERR;
  1748. break;
  1749. case OCRDMA_CQE_WR_FLUSH_ERR:
  1750. ibwc_status = IB_WC_WR_FLUSH_ERR;
  1751. break;
  1752. case OCRDMA_CQE_MW_BIND_ERR:
  1753. ibwc_status = IB_WC_MW_BIND_ERR;
  1754. break;
  1755. case OCRDMA_CQE_BAD_RESP_ERR:
  1756. ibwc_status = IB_WC_BAD_RESP_ERR;
  1757. break;
  1758. case OCRDMA_CQE_LOC_ACCESS_ERR:
  1759. ibwc_status = IB_WC_LOC_ACCESS_ERR;
  1760. break;
  1761. case OCRDMA_CQE_REM_INV_REQ_ERR:
  1762. ibwc_status = IB_WC_REM_INV_REQ_ERR;
  1763. break;
  1764. case OCRDMA_CQE_REM_ACCESS_ERR:
  1765. ibwc_status = IB_WC_REM_ACCESS_ERR;
  1766. break;
  1767. case OCRDMA_CQE_REM_OP_ERR:
  1768. ibwc_status = IB_WC_REM_OP_ERR;
  1769. break;
  1770. case OCRDMA_CQE_RETRY_EXC_ERR:
  1771. ibwc_status = IB_WC_RETRY_EXC_ERR;
  1772. break;
  1773. case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
  1774. ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
  1775. break;
  1776. case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
  1777. ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
  1778. break;
  1779. case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
  1780. ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
  1781. break;
  1782. case OCRDMA_CQE_REM_ABORT_ERR:
  1783. ibwc_status = IB_WC_REM_ABORT_ERR;
  1784. break;
  1785. case OCRDMA_CQE_INV_EECN_ERR:
  1786. ibwc_status = IB_WC_INV_EECN_ERR;
  1787. break;
  1788. case OCRDMA_CQE_INV_EEC_STATE_ERR:
  1789. ibwc_status = IB_WC_INV_EEC_STATE_ERR;
  1790. break;
  1791. case OCRDMA_CQE_FATAL_ERR:
  1792. ibwc_status = IB_WC_FATAL_ERR;
  1793. break;
  1794. case OCRDMA_CQE_RESP_TIMEOUT_ERR:
  1795. ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
  1796. break;
  1797. default:
  1798. ibwc_status = IB_WC_GENERAL_ERR;
  1799. break;
  1800. };
  1801. return ibwc_status;
  1802. }
  1803. static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
  1804. u32 wqe_idx)
  1805. {
  1806. struct ocrdma_hdr_wqe *hdr;
  1807. struct ocrdma_sge *rw;
  1808. int opcode;
  1809. hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
  1810. ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
  1811. /* Undo the hdr->cw swap */
  1812. opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
  1813. switch (opcode) {
  1814. case OCRDMA_WRITE:
  1815. ibwc->opcode = IB_WC_RDMA_WRITE;
  1816. break;
  1817. case OCRDMA_READ:
  1818. rw = (struct ocrdma_sge *)(hdr + 1);
  1819. ibwc->opcode = IB_WC_RDMA_READ;
  1820. ibwc->byte_len = rw->len;
  1821. break;
  1822. case OCRDMA_SEND:
  1823. ibwc->opcode = IB_WC_SEND;
  1824. break;
  1825. case OCRDMA_LKEY_INV:
  1826. ibwc->opcode = IB_WC_LOCAL_INV;
  1827. break;
  1828. default:
  1829. ibwc->status = IB_WC_GENERAL_ERR;
  1830. pr_err("%s() invalid opcode received = 0x%x\n",
  1831. __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
  1832. break;
  1833. };
  1834. }
  1835. static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
  1836. struct ocrdma_cqe *cqe)
  1837. {
  1838. if (is_cqe_for_sq(cqe)) {
  1839. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  1840. cqe->flags_status_srcqpn) &
  1841. ~OCRDMA_CQE_STATUS_MASK);
  1842. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  1843. cqe->flags_status_srcqpn) |
  1844. (OCRDMA_CQE_WR_FLUSH_ERR <<
  1845. OCRDMA_CQE_STATUS_SHIFT));
  1846. } else {
  1847. if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
  1848. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  1849. cqe->flags_status_srcqpn) &
  1850. ~OCRDMA_CQE_UD_STATUS_MASK);
  1851. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  1852. cqe->flags_status_srcqpn) |
  1853. (OCRDMA_CQE_WR_FLUSH_ERR <<
  1854. OCRDMA_CQE_UD_STATUS_SHIFT));
  1855. } else {
  1856. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  1857. cqe->flags_status_srcqpn) &
  1858. ~OCRDMA_CQE_STATUS_MASK);
  1859. cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
  1860. cqe->flags_status_srcqpn) |
  1861. (OCRDMA_CQE_WR_FLUSH_ERR <<
  1862. OCRDMA_CQE_STATUS_SHIFT));
  1863. }
  1864. }
  1865. }
  1866. static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
  1867. struct ocrdma_qp *qp, int status)
  1868. {
  1869. bool expand = false;
  1870. ibwc->byte_len = 0;
  1871. ibwc->qp = &qp->ibqp;
  1872. ibwc->status = ocrdma_to_ibwc_err(status);
  1873. ocrdma_flush_qp(qp);
  1874. ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL);
  1875. /* if wqe/rqe pending for which cqe needs to be returned,
  1876. * trigger inflating it.
  1877. */
  1878. if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
  1879. expand = true;
  1880. ocrdma_set_cqe_status_flushed(qp, cqe);
  1881. }
  1882. return expand;
  1883. }
  1884. static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
  1885. struct ocrdma_qp *qp, int status)
  1886. {
  1887. ibwc->opcode = IB_WC_RECV;
  1888. ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
  1889. ocrdma_hwq_inc_tail(&qp->rq);
  1890. return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
  1891. }
  1892. static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
  1893. struct ocrdma_qp *qp, int status)
  1894. {
  1895. ocrdma_update_wc(qp, ibwc, qp->sq.tail);
  1896. ocrdma_hwq_inc_tail(&qp->sq);
  1897. return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
  1898. }
  1899. static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
  1900. struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
  1901. bool *polled, bool *stop)
  1902. {
  1903. bool expand;
  1904. int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  1905. OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
  1906. /* when hw sq is empty, but rq is not empty, so we continue
  1907. * to keep the cqe in order to get the cq event again.
  1908. */
  1909. if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
  1910. /* when cq for rq and sq is same, it is safe to return
  1911. * flush cqe for RQEs.
  1912. */
  1913. if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
  1914. *polled = true;
  1915. status = OCRDMA_CQE_WR_FLUSH_ERR;
  1916. expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
  1917. } else {
  1918. /* stop processing further cqe as this cqe is used for
  1919. * triggering cq event on buddy cq of RQ.
  1920. * When QP is destroyed, this cqe will be removed
  1921. * from the cq's hardware q.
  1922. */
  1923. *polled = false;
  1924. *stop = true;
  1925. expand = false;
  1926. }
  1927. } else {
  1928. *polled = true;
  1929. expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
  1930. }
  1931. return expand;
  1932. }
  1933. static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
  1934. struct ocrdma_cqe *cqe,
  1935. struct ib_wc *ibwc, bool *polled)
  1936. {
  1937. bool expand = false;
  1938. int tail = qp->sq.tail;
  1939. u32 wqe_idx;
  1940. if (!qp->wqe_wr_id_tbl[tail].signaled) {
  1941. *polled = false; /* WC cannot be consumed yet */
  1942. } else {
  1943. ibwc->status = IB_WC_SUCCESS;
  1944. ibwc->wc_flags = 0;
  1945. ibwc->qp = &qp->ibqp;
  1946. ocrdma_update_wc(qp, ibwc, tail);
  1947. *polled = true;
  1948. }
  1949. wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
  1950. if (tail != wqe_idx)
  1951. expand = true; /* Coalesced CQE can't be consumed yet */
  1952. ocrdma_hwq_inc_tail(&qp->sq);
  1953. return expand;
  1954. }
  1955. static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
  1956. struct ib_wc *ibwc, bool *polled, bool *stop)
  1957. {
  1958. int status;
  1959. bool expand;
  1960. status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  1961. OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
  1962. if (status == OCRDMA_CQE_SUCCESS)
  1963. expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
  1964. else
  1965. expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
  1966. return expand;
  1967. }
  1968. static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
  1969. {
  1970. int status;
  1971. status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  1972. OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
  1973. ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
  1974. OCRDMA_CQE_SRCQP_MASK;
  1975. ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
  1976. OCRDMA_CQE_PKEY_MASK;
  1977. ibwc->wc_flags = IB_WC_GRH;
  1978. ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
  1979. OCRDMA_CQE_UD_XFER_LEN_SHIFT);
  1980. return status;
  1981. }
  1982. static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
  1983. struct ocrdma_cqe *cqe,
  1984. struct ocrdma_qp *qp)
  1985. {
  1986. unsigned long flags;
  1987. struct ocrdma_srq *srq;
  1988. u32 wqe_idx;
  1989. srq = get_ocrdma_srq(qp->ibqp.srq);
  1990. wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT;
  1991. ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
  1992. spin_lock_irqsave(&srq->q_lock, flags);
  1993. ocrdma_srq_toggle_bit(srq, wqe_idx);
  1994. spin_unlock_irqrestore(&srq->q_lock, flags);
  1995. ocrdma_hwq_inc_tail(&srq->rq);
  1996. }
  1997. static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
  1998. struct ib_wc *ibwc, bool *polled, bool *stop,
  1999. int status)
  2000. {
  2001. bool expand;
  2002. /* when hw_rq is empty, but wq is not empty, so continue
  2003. * to keep the cqe to get the cq event again.
  2004. */
  2005. if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
  2006. if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
  2007. *polled = true;
  2008. status = OCRDMA_CQE_WR_FLUSH_ERR;
  2009. expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
  2010. } else {
  2011. *polled = false;
  2012. *stop = true;
  2013. expand = false;
  2014. }
  2015. } else {
  2016. *polled = true;
  2017. expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
  2018. }
  2019. return expand;
  2020. }
  2021. static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
  2022. struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
  2023. {
  2024. ibwc->opcode = IB_WC_RECV;
  2025. ibwc->qp = &qp->ibqp;
  2026. ibwc->status = IB_WC_SUCCESS;
  2027. if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
  2028. ocrdma_update_ud_rcqe(ibwc, cqe);
  2029. else
  2030. ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
  2031. if (is_cqe_imm(cqe)) {
  2032. ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
  2033. ibwc->wc_flags |= IB_WC_WITH_IMM;
  2034. } else if (is_cqe_wr_imm(cqe)) {
  2035. ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2036. ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
  2037. ibwc->wc_flags |= IB_WC_WITH_IMM;
  2038. } else if (is_cqe_invalidated(cqe)) {
  2039. ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
  2040. ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
  2041. }
  2042. if (qp->ibqp.srq)
  2043. ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
  2044. else {
  2045. ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
  2046. ocrdma_hwq_inc_tail(&qp->rq);
  2047. }
  2048. }
  2049. static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
  2050. struct ib_wc *ibwc, bool *polled, bool *stop)
  2051. {
  2052. int status;
  2053. bool expand = false;
  2054. ibwc->wc_flags = 0;
  2055. if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
  2056. status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  2057. OCRDMA_CQE_UD_STATUS_MASK) >>
  2058. OCRDMA_CQE_UD_STATUS_SHIFT;
  2059. else
  2060. status = (le32_to_cpu(cqe->flags_status_srcqpn) &
  2061. OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
  2062. if (status == OCRDMA_CQE_SUCCESS) {
  2063. *polled = true;
  2064. ocrdma_poll_success_rcqe(qp, cqe, ibwc);
  2065. } else {
  2066. expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
  2067. status);
  2068. }
  2069. return expand;
  2070. }
  2071. static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
  2072. u16 cur_getp)
  2073. {
  2074. if (cq->phase_change) {
  2075. if (cur_getp == 0)
  2076. cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
  2077. } else
  2078. /* clear valid bit */
  2079. cqe->flags_status_srcqpn = 0;
  2080. }
  2081. static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
  2082. struct ib_wc *ibwc)
  2083. {
  2084. u16 qpn = 0;
  2085. int i = 0;
  2086. bool expand = false;
  2087. int polled_hw_cqes = 0;
  2088. struct ocrdma_qp *qp = NULL;
  2089. struct ocrdma_dev *dev = cq->dev;
  2090. struct ocrdma_cqe *cqe;
  2091. u16 cur_getp; bool polled = false; bool stop = false;
  2092. cur_getp = cq->getp;
  2093. while (num_entries) {
  2094. cqe = cq->va + cur_getp;
  2095. /* check whether valid cqe or not */
  2096. if (!is_cqe_valid(cq, cqe))
  2097. break;
  2098. qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
  2099. /* ignore discarded cqe */
  2100. if (qpn == 0)
  2101. goto skip_cqe;
  2102. qp = dev->qp_tbl[qpn];
  2103. BUG_ON(qp == NULL);
  2104. if (is_cqe_for_sq(cqe)) {
  2105. expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
  2106. &stop);
  2107. } else {
  2108. expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
  2109. &stop);
  2110. }
  2111. if (expand)
  2112. goto expand_cqe;
  2113. if (stop)
  2114. goto stop_cqe;
  2115. /* clear qpn to avoid duplicate processing by discard_cqe() */
  2116. cqe->cmn.qpn = 0;
  2117. skip_cqe:
  2118. polled_hw_cqes += 1;
  2119. cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
  2120. ocrdma_change_cq_phase(cq, cqe, cur_getp);
  2121. expand_cqe:
  2122. if (polled) {
  2123. num_entries -= 1;
  2124. i += 1;
  2125. ibwc = ibwc + 1;
  2126. polled = false;
  2127. }
  2128. }
  2129. stop_cqe:
  2130. cq->getp = cur_getp;
  2131. if (polled_hw_cqes || expand || stop) {
  2132. ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
  2133. polled_hw_cqes);
  2134. }
  2135. return i;
  2136. }
  2137. /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
  2138. static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
  2139. struct ocrdma_qp *qp, struct ib_wc *ibwc)
  2140. {
  2141. int err_cqes = 0;
  2142. while (num_entries) {
  2143. if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
  2144. break;
  2145. if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
  2146. ocrdma_update_wc(qp, ibwc, qp->sq.tail);
  2147. ocrdma_hwq_inc_tail(&qp->sq);
  2148. } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
  2149. ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
  2150. ocrdma_hwq_inc_tail(&qp->rq);
  2151. } else
  2152. return err_cqes;
  2153. ibwc->byte_len = 0;
  2154. ibwc->status = IB_WC_WR_FLUSH_ERR;
  2155. ibwc = ibwc + 1;
  2156. err_cqes += 1;
  2157. num_entries -= 1;
  2158. }
  2159. return err_cqes;
  2160. }
  2161. int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  2162. {
  2163. int cqes_to_poll = num_entries;
  2164. struct ocrdma_cq *cq = NULL;
  2165. unsigned long flags;
  2166. struct ocrdma_dev *dev;
  2167. int num_os_cqe = 0, err_cqes = 0;
  2168. struct ocrdma_qp *qp;
  2169. cq = get_ocrdma_cq(ibcq);
  2170. dev = cq->dev;
  2171. /* poll cqes from adapter CQ */
  2172. spin_lock_irqsave(&cq->cq_lock, flags);
  2173. num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
  2174. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2175. cqes_to_poll -= num_os_cqe;
  2176. if (cqes_to_poll) {
  2177. wc = wc + num_os_cqe;
  2178. /* adapter returns single error cqe when qp moves to
  2179. * error state. So insert error cqes with wc_status as
  2180. * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
  2181. * respectively which uses this CQ.
  2182. */
  2183. spin_lock_irqsave(&dev->flush_q_lock, flags);
  2184. list_for_each_entry(qp, &cq->sq_head, sq_entry) {
  2185. if (cqes_to_poll == 0)
  2186. break;
  2187. err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
  2188. cqes_to_poll -= err_cqes;
  2189. num_os_cqe += err_cqes;
  2190. wc = wc + err_cqes;
  2191. }
  2192. spin_unlock_irqrestore(&dev->flush_q_lock, flags);
  2193. }
  2194. return num_os_cqe;
  2195. }
  2196. int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
  2197. {
  2198. struct ocrdma_cq *cq;
  2199. unsigned long flags;
  2200. struct ocrdma_dev *dev;
  2201. u16 cq_id;
  2202. u16 cur_getp;
  2203. struct ocrdma_cqe *cqe;
  2204. cq = get_ocrdma_cq(ibcq);
  2205. cq_id = cq->id;
  2206. dev = cq->dev;
  2207. spin_lock_irqsave(&cq->cq_lock, flags);
  2208. if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
  2209. cq->armed = true;
  2210. if (cq_flags & IB_CQ_SOLICITED)
  2211. cq->solicited = true;
  2212. cur_getp = cq->getp;
  2213. cqe = cq->va + cur_getp;
  2214. /* check whether any valid cqe exist or not, if not then safe to
  2215. * arm. If cqe is not yet consumed, then let it get consumed and then
  2216. * we arm it to avoid false interrupts.
  2217. */
  2218. if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
  2219. cq->arm_needed = false;
  2220. ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
  2221. }
  2222. spin_unlock_irqrestore(&cq->cq_lock, flags);
  2223. return 0;
  2224. }