qp.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include "iw_cxgb4.h"
  34. static int db_delay_usecs = 1;
  35. module_param(db_delay_usecs, int, 0644);
  36. MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
  37. static int ocqp_support = 1;
  38. module_param(ocqp_support, int, 0644);
  39. MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
  40. int db_fc_threshold = 2000;
  41. module_param(db_fc_threshold, int, 0644);
  42. MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
  43. "db flow control mode (default = 2000)");
  44. static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
  45. {
  46. unsigned long flag;
  47. spin_lock_irqsave(&qhp->lock, flag);
  48. qhp->attr.state = state;
  49. spin_unlock_irqrestore(&qhp->lock, flag);
  50. }
  51. static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
  52. {
  53. c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
  54. }
  55. static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
  56. {
  57. dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
  58. pci_unmap_addr(sq, mapping));
  59. }
  60. static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
  61. {
  62. if (t4_sq_onchip(sq))
  63. dealloc_oc_sq(rdev, sq);
  64. else
  65. dealloc_host_sq(rdev, sq);
  66. }
  67. static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
  68. {
  69. if (!ocqp_support || !t4_ocqp_supported())
  70. return -ENOSYS;
  71. sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
  72. if (!sq->dma_addr)
  73. return -ENOMEM;
  74. sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
  75. rdev->lldi.vr->ocq.start;
  76. sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
  77. rdev->lldi.vr->ocq.start);
  78. sq->flags |= T4_SQ_ONCHIP;
  79. return 0;
  80. }
  81. static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
  82. {
  83. sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
  84. &(sq->dma_addr), GFP_KERNEL);
  85. if (!sq->queue)
  86. return -ENOMEM;
  87. sq->phys_addr = virt_to_phys(sq->queue);
  88. pci_unmap_addr_set(sq, mapping, sq->dma_addr);
  89. return 0;
  90. }
  91. static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
  92. struct c4iw_dev_ucontext *uctx)
  93. {
  94. /*
  95. * uP clears EQ contexts when the connection exits rdma mode,
  96. * so no need to post a RESET WR for these EQs.
  97. */
  98. dma_free_coherent(&(rdev->lldi.pdev->dev),
  99. wq->rq.memsize, wq->rq.queue,
  100. dma_unmap_addr(&wq->rq, mapping));
  101. dealloc_sq(rdev, &wq->sq);
  102. c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
  103. kfree(wq->rq.sw_rq);
  104. kfree(wq->sq.sw_sq);
  105. c4iw_put_qpid(rdev, wq->rq.qid, uctx);
  106. c4iw_put_qpid(rdev, wq->sq.qid, uctx);
  107. return 0;
  108. }
  109. static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
  110. struct t4_cq *rcq, struct t4_cq *scq,
  111. struct c4iw_dev_ucontext *uctx)
  112. {
  113. int user = (uctx != &rdev->uctx);
  114. struct fw_ri_res_wr *res_wr;
  115. struct fw_ri_res *res;
  116. int wr_len;
  117. struct c4iw_wr_wait wr_wait;
  118. struct sk_buff *skb;
  119. int ret;
  120. int eqsize;
  121. wq->sq.qid = c4iw_get_qpid(rdev, uctx);
  122. if (!wq->sq.qid)
  123. return -ENOMEM;
  124. wq->rq.qid = c4iw_get_qpid(rdev, uctx);
  125. if (!wq->rq.qid) {
  126. ret = -ENOMEM;
  127. goto free_sq_qid;
  128. }
  129. if (!user) {
  130. wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
  131. GFP_KERNEL);
  132. if (!wq->sq.sw_sq) {
  133. ret = -ENOMEM;
  134. goto free_rq_qid;
  135. }
  136. wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
  137. GFP_KERNEL);
  138. if (!wq->rq.sw_rq) {
  139. ret = -ENOMEM;
  140. goto free_sw_sq;
  141. }
  142. }
  143. /*
  144. * RQT must be a power of 2.
  145. */
  146. wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
  147. wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
  148. if (!wq->rq.rqt_hwaddr) {
  149. ret = -ENOMEM;
  150. goto free_sw_rq;
  151. }
  152. if (user) {
  153. ret = alloc_oc_sq(rdev, &wq->sq);
  154. if (ret)
  155. goto free_hwaddr;
  156. ret = alloc_host_sq(rdev, &wq->sq);
  157. if (ret)
  158. goto free_sq;
  159. } else
  160. ret = alloc_host_sq(rdev, &wq->sq);
  161. if (ret)
  162. goto free_hwaddr;
  163. memset(wq->sq.queue, 0, wq->sq.memsize);
  164. dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
  165. wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
  166. wq->rq.memsize, &(wq->rq.dma_addr),
  167. GFP_KERNEL);
  168. if (!wq->rq.queue) {
  169. ret = -ENOMEM;
  170. goto free_sq;
  171. }
  172. PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
  173. __func__, wq->sq.queue,
  174. (unsigned long long)virt_to_phys(wq->sq.queue),
  175. wq->rq.queue,
  176. (unsigned long long)virt_to_phys(wq->rq.queue));
  177. memset(wq->rq.queue, 0, wq->rq.memsize);
  178. dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
  179. wq->db = rdev->lldi.db_reg;
  180. wq->gts = rdev->lldi.gts_reg;
  181. if (user) {
  182. wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
  183. (wq->sq.qid << rdev->qpshift);
  184. wq->sq.udb &= PAGE_MASK;
  185. wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
  186. (wq->rq.qid << rdev->qpshift);
  187. wq->rq.udb &= PAGE_MASK;
  188. }
  189. wq->rdev = rdev;
  190. wq->rq.msn = 1;
  191. /* build fw_ri_res_wr */
  192. wr_len = sizeof *res_wr + 2 * sizeof *res;
  193. skb = alloc_skb(wr_len, GFP_KERNEL);
  194. if (!skb) {
  195. ret = -ENOMEM;
  196. goto free_dma;
  197. }
  198. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  199. res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  200. memset(res_wr, 0, wr_len);
  201. res_wr->op_nres = cpu_to_be32(
  202. FW_WR_OP(FW_RI_RES_WR) |
  203. V_FW_RI_RES_WR_NRES(2) |
  204. FW_WR_COMPL(1));
  205. res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  206. res_wr->cookie = (unsigned long) &wr_wait;
  207. res = res_wr->res;
  208. res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
  209. res->u.sqrq.op = FW_RI_RES_OP_WRITE;
  210. /*
  211. * eqsize is the number of 64B entries plus the status page size.
  212. */
  213. eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
  214. res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
  215. V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
  216. V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
  217. V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
  218. (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
  219. V_FW_RI_RES_WR_IQID(scq->cqid));
  220. res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
  221. V_FW_RI_RES_WR_DCAEN(0) |
  222. V_FW_RI_RES_WR_DCACPU(0) |
  223. V_FW_RI_RES_WR_FBMIN(2) |
  224. V_FW_RI_RES_WR_FBMAX(2) |
  225. V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
  226. V_FW_RI_RES_WR_CIDXFTHRESH(0) |
  227. V_FW_RI_RES_WR_EQSIZE(eqsize));
  228. res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
  229. res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
  230. res++;
  231. res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
  232. res->u.sqrq.op = FW_RI_RES_OP_WRITE;
  233. /*
  234. * eqsize is the number of 64B entries plus the status page size.
  235. */
  236. eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
  237. res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
  238. V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
  239. V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
  240. V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
  241. V_FW_RI_RES_WR_IQID(rcq->cqid));
  242. res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
  243. V_FW_RI_RES_WR_DCAEN(0) |
  244. V_FW_RI_RES_WR_DCACPU(0) |
  245. V_FW_RI_RES_WR_FBMIN(2) |
  246. V_FW_RI_RES_WR_FBMAX(2) |
  247. V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
  248. V_FW_RI_RES_WR_CIDXFTHRESH(0) |
  249. V_FW_RI_RES_WR_EQSIZE(eqsize));
  250. res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
  251. res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
  252. c4iw_init_wr_wait(&wr_wait);
  253. ret = c4iw_ofld_send(rdev, skb);
  254. if (ret)
  255. goto free_dma;
  256. ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
  257. if (ret)
  258. goto free_dma;
  259. PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
  260. __func__, wq->sq.qid, wq->rq.qid, wq->db,
  261. (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
  262. return 0;
  263. free_dma:
  264. dma_free_coherent(&(rdev->lldi.pdev->dev),
  265. wq->rq.memsize, wq->rq.queue,
  266. dma_unmap_addr(&wq->rq, mapping));
  267. free_sq:
  268. dealloc_sq(rdev, &wq->sq);
  269. free_hwaddr:
  270. c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
  271. free_sw_rq:
  272. kfree(wq->rq.sw_rq);
  273. free_sw_sq:
  274. kfree(wq->sq.sw_sq);
  275. free_rq_qid:
  276. c4iw_put_qpid(rdev, wq->rq.qid, uctx);
  277. free_sq_qid:
  278. c4iw_put_qpid(rdev, wq->sq.qid, uctx);
  279. return ret;
  280. }
  281. static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
  282. struct ib_send_wr *wr, int max, u32 *plenp)
  283. {
  284. u8 *dstp, *srcp;
  285. u32 plen = 0;
  286. int i;
  287. int rem, len;
  288. dstp = (u8 *)immdp->data;
  289. for (i = 0; i < wr->num_sge; i++) {
  290. if ((plen + wr->sg_list[i].length) > max)
  291. return -EMSGSIZE;
  292. srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
  293. plen += wr->sg_list[i].length;
  294. rem = wr->sg_list[i].length;
  295. while (rem) {
  296. if (dstp == (u8 *)&sq->queue[sq->size])
  297. dstp = (u8 *)sq->queue;
  298. if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
  299. len = rem;
  300. else
  301. len = (u8 *)&sq->queue[sq->size] - dstp;
  302. memcpy(dstp, srcp, len);
  303. dstp += len;
  304. srcp += len;
  305. rem -= len;
  306. }
  307. }
  308. len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
  309. if (len)
  310. memset(dstp, 0, len);
  311. immdp->op = FW_RI_DATA_IMMD;
  312. immdp->r1 = 0;
  313. immdp->r2 = 0;
  314. immdp->immdlen = cpu_to_be32(plen);
  315. *plenp = plen;
  316. return 0;
  317. }
  318. static int build_isgl(__be64 *queue_start, __be64 *queue_end,
  319. struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
  320. int num_sge, u32 *plenp)
  321. {
  322. int i;
  323. u32 plen = 0;
  324. __be64 *flitp = (__be64 *)isglp->sge;
  325. for (i = 0; i < num_sge; i++) {
  326. if ((plen + sg_list[i].length) < plen)
  327. return -EMSGSIZE;
  328. plen += sg_list[i].length;
  329. *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
  330. sg_list[i].length);
  331. if (++flitp == queue_end)
  332. flitp = queue_start;
  333. *flitp = cpu_to_be64(sg_list[i].addr);
  334. if (++flitp == queue_end)
  335. flitp = queue_start;
  336. }
  337. *flitp = (__force __be64)0;
  338. isglp->op = FW_RI_DATA_ISGL;
  339. isglp->r1 = 0;
  340. isglp->nsge = cpu_to_be16(num_sge);
  341. isglp->r2 = 0;
  342. if (plenp)
  343. *plenp = plen;
  344. return 0;
  345. }
  346. static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
  347. struct ib_send_wr *wr, u8 *len16)
  348. {
  349. u32 plen;
  350. int size;
  351. int ret;
  352. if (wr->num_sge > T4_MAX_SEND_SGE)
  353. return -EINVAL;
  354. switch (wr->opcode) {
  355. case IB_WR_SEND:
  356. if (wr->send_flags & IB_SEND_SOLICITED)
  357. wqe->send.sendop_pkd = cpu_to_be32(
  358. V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
  359. else
  360. wqe->send.sendop_pkd = cpu_to_be32(
  361. V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
  362. wqe->send.stag_inv = 0;
  363. break;
  364. case IB_WR_SEND_WITH_INV:
  365. if (wr->send_flags & IB_SEND_SOLICITED)
  366. wqe->send.sendop_pkd = cpu_to_be32(
  367. V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
  368. else
  369. wqe->send.sendop_pkd = cpu_to_be32(
  370. V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
  371. wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
  372. break;
  373. default:
  374. return -EINVAL;
  375. }
  376. plen = 0;
  377. if (wr->num_sge) {
  378. if (wr->send_flags & IB_SEND_INLINE) {
  379. ret = build_immd(sq, wqe->send.u.immd_src, wr,
  380. T4_MAX_SEND_INLINE, &plen);
  381. if (ret)
  382. return ret;
  383. size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
  384. plen;
  385. } else {
  386. ret = build_isgl((__be64 *)sq->queue,
  387. (__be64 *)&sq->queue[sq->size],
  388. wqe->send.u.isgl_src,
  389. wr->sg_list, wr->num_sge, &plen);
  390. if (ret)
  391. return ret;
  392. size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
  393. wr->num_sge * sizeof(struct fw_ri_sge);
  394. }
  395. } else {
  396. wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
  397. wqe->send.u.immd_src[0].r1 = 0;
  398. wqe->send.u.immd_src[0].r2 = 0;
  399. wqe->send.u.immd_src[0].immdlen = 0;
  400. size = sizeof wqe->send + sizeof(struct fw_ri_immd);
  401. plen = 0;
  402. }
  403. *len16 = DIV_ROUND_UP(size, 16);
  404. wqe->send.plen = cpu_to_be32(plen);
  405. return 0;
  406. }
  407. static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
  408. struct ib_send_wr *wr, u8 *len16)
  409. {
  410. u32 plen;
  411. int size;
  412. int ret;
  413. if (wr->num_sge > T4_MAX_SEND_SGE)
  414. return -EINVAL;
  415. wqe->write.r2 = 0;
  416. wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
  417. wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
  418. if (wr->num_sge) {
  419. if (wr->send_flags & IB_SEND_INLINE) {
  420. ret = build_immd(sq, wqe->write.u.immd_src, wr,
  421. T4_MAX_WRITE_INLINE, &plen);
  422. if (ret)
  423. return ret;
  424. size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
  425. plen;
  426. } else {
  427. ret = build_isgl((__be64 *)sq->queue,
  428. (__be64 *)&sq->queue[sq->size],
  429. wqe->write.u.isgl_src,
  430. wr->sg_list, wr->num_sge, &plen);
  431. if (ret)
  432. return ret;
  433. size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
  434. wr->num_sge * sizeof(struct fw_ri_sge);
  435. }
  436. } else {
  437. wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
  438. wqe->write.u.immd_src[0].r1 = 0;
  439. wqe->write.u.immd_src[0].r2 = 0;
  440. wqe->write.u.immd_src[0].immdlen = 0;
  441. size = sizeof wqe->write + sizeof(struct fw_ri_immd);
  442. plen = 0;
  443. }
  444. *len16 = DIV_ROUND_UP(size, 16);
  445. wqe->write.plen = cpu_to_be32(plen);
  446. return 0;
  447. }
  448. static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
  449. {
  450. if (wr->num_sge > 1)
  451. return -EINVAL;
  452. if (wr->num_sge) {
  453. wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
  454. wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
  455. >> 32));
  456. wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
  457. wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
  458. wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
  459. wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
  460. >> 32));
  461. wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
  462. } else {
  463. wqe->read.stag_src = cpu_to_be32(2);
  464. wqe->read.to_src_hi = 0;
  465. wqe->read.to_src_lo = 0;
  466. wqe->read.stag_sink = cpu_to_be32(2);
  467. wqe->read.plen = 0;
  468. wqe->read.to_sink_hi = 0;
  469. wqe->read.to_sink_lo = 0;
  470. }
  471. wqe->read.r2 = 0;
  472. wqe->read.r5 = 0;
  473. *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
  474. return 0;
  475. }
  476. static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
  477. struct ib_recv_wr *wr, u8 *len16)
  478. {
  479. int ret;
  480. ret = build_isgl((__be64 *)qhp->wq.rq.queue,
  481. (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
  482. &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
  483. if (ret)
  484. return ret;
  485. *len16 = DIV_ROUND_UP(sizeof wqe->recv +
  486. wr->num_sge * sizeof(struct fw_ri_sge), 16);
  487. return 0;
  488. }
  489. static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
  490. struct ib_send_wr *wr, u8 *len16)
  491. {
  492. struct fw_ri_immd *imdp;
  493. __be64 *p;
  494. int i;
  495. int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
  496. int rem;
  497. if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
  498. return -EINVAL;
  499. wqe->fr.qpbinde_to_dcacpu = 0;
  500. wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
  501. wqe->fr.addr_type = FW_RI_VA_BASED_TO;
  502. wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
  503. wqe->fr.len_hi = 0;
  504. wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
  505. wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
  506. wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
  507. wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
  508. 0xffffffff);
  509. WARN_ON(pbllen > T4_MAX_FR_IMMD);
  510. imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
  511. imdp->op = FW_RI_DATA_IMMD;
  512. imdp->r1 = 0;
  513. imdp->r2 = 0;
  514. imdp->immdlen = cpu_to_be32(pbllen);
  515. p = (__be64 *)(imdp + 1);
  516. rem = pbllen;
  517. for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
  518. *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
  519. rem -= sizeof *p;
  520. if (++p == (__be64 *)&sq->queue[sq->size])
  521. p = (__be64 *)sq->queue;
  522. }
  523. BUG_ON(rem < 0);
  524. while (rem) {
  525. *p = 0;
  526. rem -= sizeof *p;
  527. if (++p == (__be64 *)&sq->queue[sq->size])
  528. p = (__be64 *)sq->queue;
  529. }
  530. *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
  531. return 0;
  532. }
  533. static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
  534. u8 *len16)
  535. {
  536. wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
  537. wqe->inv.r2 = 0;
  538. *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
  539. return 0;
  540. }
  541. void c4iw_qp_add_ref(struct ib_qp *qp)
  542. {
  543. PDBG("%s ib_qp %p\n", __func__, qp);
  544. atomic_inc(&(to_c4iw_qp(qp)->refcnt));
  545. }
  546. void c4iw_qp_rem_ref(struct ib_qp *qp)
  547. {
  548. PDBG("%s ib_qp %p\n", __func__, qp);
  549. if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
  550. wake_up(&(to_c4iw_qp(qp)->wait));
  551. }
  552. int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  553. struct ib_send_wr **bad_wr)
  554. {
  555. int err = 0;
  556. u8 len16 = 0;
  557. enum fw_wr_opcodes fw_opcode = 0;
  558. enum fw_ri_wr_flags fw_flags;
  559. struct c4iw_qp *qhp;
  560. union t4_wr *wqe;
  561. u32 num_wrs;
  562. struct t4_swsqe *swsqe;
  563. unsigned long flag;
  564. u16 idx = 0;
  565. qhp = to_c4iw_qp(ibqp);
  566. spin_lock_irqsave(&qhp->lock, flag);
  567. if (t4_wq_in_error(&qhp->wq)) {
  568. spin_unlock_irqrestore(&qhp->lock, flag);
  569. return -EINVAL;
  570. }
  571. num_wrs = t4_sq_avail(&qhp->wq);
  572. if (num_wrs == 0) {
  573. spin_unlock_irqrestore(&qhp->lock, flag);
  574. return -ENOMEM;
  575. }
  576. while (wr) {
  577. if (num_wrs == 0) {
  578. err = -ENOMEM;
  579. *bad_wr = wr;
  580. break;
  581. }
  582. wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
  583. qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
  584. fw_flags = 0;
  585. if (wr->send_flags & IB_SEND_SOLICITED)
  586. fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
  587. if (wr->send_flags & IB_SEND_SIGNALED)
  588. fw_flags |= FW_RI_COMPLETION_FLAG;
  589. swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
  590. switch (wr->opcode) {
  591. case IB_WR_SEND_WITH_INV:
  592. case IB_WR_SEND:
  593. if (wr->send_flags & IB_SEND_FENCE)
  594. fw_flags |= FW_RI_READ_FENCE_FLAG;
  595. fw_opcode = FW_RI_SEND_WR;
  596. if (wr->opcode == IB_WR_SEND)
  597. swsqe->opcode = FW_RI_SEND;
  598. else
  599. swsqe->opcode = FW_RI_SEND_WITH_INV;
  600. err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
  601. break;
  602. case IB_WR_RDMA_WRITE:
  603. fw_opcode = FW_RI_RDMA_WRITE_WR;
  604. swsqe->opcode = FW_RI_RDMA_WRITE;
  605. err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
  606. break;
  607. case IB_WR_RDMA_READ:
  608. case IB_WR_RDMA_READ_WITH_INV:
  609. fw_opcode = FW_RI_RDMA_READ_WR;
  610. swsqe->opcode = FW_RI_READ_REQ;
  611. if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
  612. fw_flags = FW_RI_RDMA_READ_INVALIDATE;
  613. else
  614. fw_flags = 0;
  615. err = build_rdma_read(wqe, wr, &len16);
  616. if (err)
  617. break;
  618. swsqe->read_len = wr->sg_list[0].length;
  619. if (!qhp->wq.sq.oldest_read)
  620. qhp->wq.sq.oldest_read = swsqe;
  621. break;
  622. case IB_WR_FAST_REG_MR:
  623. fw_opcode = FW_RI_FR_NSMR_WR;
  624. swsqe->opcode = FW_RI_FAST_REGISTER;
  625. err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
  626. break;
  627. case IB_WR_LOCAL_INV:
  628. if (wr->send_flags & IB_SEND_FENCE)
  629. fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
  630. fw_opcode = FW_RI_INV_LSTAG_WR;
  631. swsqe->opcode = FW_RI_LOCAL_INV;
  632. err = build_inv_stag(wqe, wr, &len16);
  633. break;
  634. default:
  635. PDBG("%s post of type=%d TBD!\n", __func__,
  636. wr->opcode);
  637. err = -EINVAL;
  638. }
  639. if (err) {
  640. *bad_wr = wr;
  641. break;
  642. }
  643. swsqe->idx = qhp->wq.sq.pidx;
  644. swsqe->complete = 0;
  645. swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
  646. swsqe->wr_id = wr->wr_id;
  647. init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
  648. PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
  649. __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
  650. swsqe->opcode, swsqe->read_len);
  651. wr = wr->next;
  652. num_wrs--;
  653. t4_sq_produce(&qhp->wq, len16);
  654. idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
  655. }
  656. if (t4_wq_db_enabled(&qhp->wq))
  657. t4_ring_sq_db(&qhp->wq, idx);
  658. spin_unlock_irqrestore(&qhp->lock, flag);
  659. return err;
  660. }
  661. int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  662. struct ib_recv_wr **bad_wr)
  663. {
  664. int err = 0;
  665. struct c4iw_qp *qhp;
  666. union t4_recv_wr *wqe;
  667. u32 num_wrs;
  668. u8 len16 = 0;
  669. unsigned long flag;
  670. u16 idx = 0;
  671. qhp = to_c4iw_qp(ibqp);
  672. spin_lock_irqsave(&qhp->lock, flag);
  673. if (t4_wq_in_error(&qhp->wq)) {
  674. spin_unlock_irqrestore(&qhp->lock, flag);
  675. return -EINVAL;
  676. }
  677. num_wrs = t4_rq_avail(&qhp->wq);
  678. if (num_wrs == 0) {
  679. spin_unlock_irqrestore(&qhp->lock, flag);
  680. return -ENOMEM;
  681. }
  682. while (wr) {
  683. if (wr->num_sge > T4_MAX_RECV_SGE) {
  684. err = -EINVAL;
  685. *bad_wr = wr;
  686. break;
  687. }
  688. wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
  689. qhp->wq.rq.wq_pidx *
  690. T4_EQ_ENTRY_SIZE);
  691. if (num_wrs)
  692. err = build_rdma_recv(qhp, wqe, wr, &len16);
  693. else
  694. err = -ENOMEM;
  695. if (err) {
  696. *bad_wr = wr;
  697. break;
  698. }
  699. qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
  700. wqe->recv.opcode = FW_RI_RECV_WR;
  701. wqe->recv.r1 = 0;
  702. wqe->recv.wrid = qhp->wq.rq.pidx;
  703. wqe->recv.r2[0] = 0;
  704. wqe->recv.r2[1] = 0;
  705. wqe->recv.r2[2] = 0;
  706. wqe->recv.len16 = len16;
  707. PDBG("%s cookie 0x%llx pidx %u\n", __func__,
  708. (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
  709. t4_rq_produce(&qhp->wq, len16);
  710. idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
  711. wr = wr->next;
  712. num_wrs--;
  713. }
  714. if (t4_wq_db_enabled(&qhp->wq))
  715. t4_ring_rq_db(&qhp->wq, idx);
  716. spin_unlock_irqrestore(&qhp->lock, flag);
  717. return err;
  718. }
  719. int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
  720. {
  721. return -ENOSYS;
  722. }
  723. static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
  724. u8 *ecode)
  725. {
  726. int status;
  727. int tagged;
  728. int opcode;
  729. int rqtype;
  730. int send_inv;
  731. if (!err_cqe) {
  732. *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
  733. *ecode = 0;
  734. return;
  735. }
  736. status = CQE_STATUS(err_cqe);
  737. opcode = CQE_OPCODE(err_cqe);
  738. rqtype = RQ_TYPE(err_cqe);
  739. send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
  740. (opcode == FW_RI_SEND_WITH_SE_INV);
  741. tagged = (opcode == FW_RI_RDMA_WRITE) ||
  742. (rqtype && (opcode == FW_RI_READ_RESP));
  743. switch (status) {
  744. case T4_ERR_STAG:
  745. if (send_inv) {
  746. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  747. *ecode = RDMAP_CANT_INV_STAG;
  748. } else {
  749. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  750. *ecode = RDMAP_INV_STAG;
  751. }
  752. break;
  753. case T4_ERR_PDID:
  754. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  755. if ((opcode == FW_RI_SEND_WITH_INV) ||
  756. (opcode == FW_RI_SEND_WITH_SE_INV))
  757. *ecode = RDMAP_CANT_INV_STAG;
  758. else
  759. *ecode = RDMAP_STAG_NOT_ASSOC;
  760. break;
  761. case T4_ERR_QPID:
  762. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  763. *ecode = RDMAP_STAG_NOT_ASSOC;
  764. break;
  765. case T4_ERR_ACCESS:
  766. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  767. *ecode = RDMAP_ACC_VIOL;
  768. break;
  769. case T4_ERR_WRAP:
  770. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  771. *ecode = RDMAP_TO_WRAP;
  772. break;
  773. case T4_ERR_BOUND:
  774. if (tagged) {
  775. *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
  776. *ecode = DDPT_BASE_BOUNDS;
  777. } else {
  778. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  779. *ecode = RDMAP_BASE_BOUNDS;
  780. }
  781. break;
  782. case T4_ERR_INVALIDATE_SHARED_MR:
  783. case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
  784. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  785. *ecode = RDMAP_CANT_INV_STAG;
  786. break;
  787. case T4_ERR_ECC:
  788. case T4_ERR_ECC_PSTAG:
  789. case T4_ERR_INTERNAL_ERR:
  790. *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
  791. *ecode = 0;
  792. break;
  793. case T4_ERR_OUT_OF_RQE:
  794. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  795. *ecode = DDPU_INV_MSN_NOBUF;
  796. break;
  797. case T4_ERR_PBL_ADDR_BOUND:
  798. *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
  799. *ecode = DDPT_BASE_BOUNDS;
  800. break;
  801. case T4_ERR_CRC:
  802. *layer_type = LAYER_MPA|DDP_LLP;
  803. *ecode = MPA_CRC_ERR;
  804. break;
  805. case T4_ERR_MARKER:
  806. *layer_type = LAYER_MPA|DDP_LLP;
  807. *ecode = MPA_MARKER_ERR;
  808. break;
  809. case T4_ERR_PDU_LEN_ERR:
  810. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  811. *ecode = DDPU_MSG_TOOBIG;
  812. break;
  813. case T4_ERR_DDP_VERSION:
  814. if (tagged) {
  815. *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
  816. *ecode = DDPT_INV_VERS;
  817. } else {
  818. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  819. *ecode = DDPU_INV_VERS;
  820. }
  821. break;
  822. case T4_ERR_RDMA_VERSION:
  823. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  824. *ecode = RDMAP_INV_VERS;
  825. break;
  826. case T4_ERR_OPCODE:
  827. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  828. *ecode = RDMAP_INV_OPCODE;
  829. break;
  830. case T4_ERR_DDP_QUEUE_NUM:
  831. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  832. *ecode = DDPU_INV_QN;
  833. break;
  834. case T4_ERR_MSN:
  835. case T4_ERR_MSN_GAP:
  836. case T4_ERR_MSN_RANGE:
  837. case T4_ERR_IRD_OVERFLOW:
  838. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  839. *ecode = DDPU_INV_MSN_RANGE;
  840. break;
  841. case T4_ERR_TBIT:
  842. *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
  843. *ecode = 0;
  844. break;
  845. case T4_ERR_MO:
  846. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  847. *ecode = DDPU_INV_MO;
  848. break;
  849. default:
  850. *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
  851. *ecode = 0;
  852. break;
  853. }
  854. }
  855. static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
  856. gfp_t gfp)
  857. {
  858. struct fw_ri_wr *wqe;
  859. struct sk_buff *skb;
  860. struct terminate_message *term;
  861. PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
  862. qhp->ep->hwtid);
  863. skb = alloc_skb(sizeof *wqe, gfp);
  864. if (!skb)
  865. return;
  866. set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
  867. wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
  868. memset(wqe, 0, sizeof *wqe);
  869. wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
  870. wqe->flowid_len16 = cpu_to_be32(
  871. FW_WR_FLOWID(qhp->ep->hwtid) |
  872. FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
  873. wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
  874. wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
  875. term = (struct terminate_message *)wqe->u.terminate.termmsg;
  876. if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
  877. term->layer_etype = qhp->attr.layer_etype;
  878. term->ecode = qhp->attr.ecode;
  879. } else
  880. build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
  881. c4iw_ofld_send(&qhp->rhp->rdev, skb);
  882. }
  883. /*
  884. * Assumes qhp lock is held.
  885. */
  886. static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
  887. struct c4iw_cq *schp)
  888. {
  889. int count;
  890. int flushed;
  891. unsigned long flag;
  892. PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
  893. /* locking hierarchy: cq lock first, then qp lock. */
  894. spin_lock_irqsave(&rchp->lock, flag);
  895. spin_lock(&qhp->lock);
  896. c4iw_flush_hw_cq(&rchp->cq);
  897. c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
  898. flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
  899. spin_unlock(&qhp->lock);
  900. spin_unlock_irqrestore(&rchp->lock, flag);
  901. if (flushed) {
  902. spin_lock_irqsave(&rchp->comp_handler_lock, flag);
  903. (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
  904. spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
  905. }
  906. /* locking hierarchy: cq lock first, then qp lock. */
  907. spin_lock_irqsave(&schp->lock, flag);
  908. spin_lock(&qhp->lock);
  909. c4iw_flush_hw_cq(&schp->cq);
  910. c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
  911. flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
  912. spin_unlock(&qhp->lock);
  913. spin_unlock_irqrestore(&schp->lock, flag);
  914. if (flushed) {
  915. spin_lock_irqsave(&schp->comp_handler_lock, flag);
  916. (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
  917. spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
  918. }
  919. }
  920. static void flush_qp(struct c4iw_qp *qhp)
  921. {
  922. struct c4iw_cq *rchp, *schp;
  923. unsigned long flag;
  924. rchp = get_chp(qhp->rhp, qhp->attr.rcq);
  925. schp = get_chp(qhp->rhp, qhp->attr.scq);
  926. if (qhp->ibqp.uobject) {
  927. t4_set_wq_in_error(&qhp->wq);
  928. t4_set_cq_in_error(&rchp->cq);
  929. spin_lock_irqsave(&rchp->comp_handler_lock, flag);
  930. (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
  931. spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
  932. if (schp != rchp) {
  933. t4_set_cq_in_error(&schp->cq);
  934. spin_lock_irqsave(&schp->comp_handler_lock, flag);
  935. (*schp->ibcq.comp_handler)(&schp->ibcq,
  936. schp->ibcq.cq_context);
  937. spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
  938. }
  939. return;
  940. }
  941. __flush_qp(qhp, rchp, schp);
  942. }
  943. static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
  944. struct c4iw_ep *ep)
  945. {
  946. struct fw_ri_wr *wqe;
  947. int ret;
  948. struct sk_buff *skb;
  949. PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
  950. ep->hwtid);
  951. skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
  952. if (!skb)
  953. return -ENOMEM;
  954. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  955. wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
  956. memset(wqe, 0, sizeof *wqe);
  957. wqe->op_compl = cpu_to_be32(
  958. FW_WR_OP(FW_RI_INIT_WR) |
  959. FW_WR_COMPL(1));
  960. wqe->flowid_len16 = cpu_to_be32(
  961. FW_WR_FLOWID(ep->hwtid) |
  962. FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
  963. wqe->cookie = (unsigned long) &ep->com.wr_wait;
  964. wqe->u.fini.type = FW_RI_TYPE_FINI;
  965. ret = c4iw_ofld_send(&rhp->rdev, skb);
  966. if (ret)
  967. goto out;
  968. ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
  969. qhp->wq.sq.qid, __func__);
  970. out:
  971. PDBG("%s ret %d\n", __func__, ret);
  972. return ret;
  973. }
  974. static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
  975. {
  976. PDBG("%s p2p_type = %d\n", __func__, p2p_type);
  977. memset(&init->u, 0, sizeof init->u);
  978. switch (p2p_type) {
  979. case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
  980. init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
  981. init->u.write.stag_sink = cpu_to_be32(1);
  982. init->u.write.to_sink = cpu_to_be64(1);
  983. init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
  984. init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
  985. sizeof(struct fw_ri_immd),
  986. 16);
  987. break;
  988. case FW_RI_INIT_P2PTYPE_READ_REQ:
  989. init->u.write.opcode = FW_RI_RDMA_READ_WR;
  990. init->u.read.stag_src = cpu_to_be32(1);
  991. init->u.read.to_src_lo = cpu_to_be32(1);
  992. init->u.read.stag_sink = cpu_to_be32(1);
  993. init->u.read.to_sink_lo = cpu_to_be32(1);
  994. init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
  995. break;
  996. }
  997. }
  998. static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
  999. {
  1000. struct fw_ri_wr *wqe;
  1001. int ret;
  1002. struct sk_buff *skb;
  1003. PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
  1004. qhp->ep->hwtid);
  1005. skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
  1006. if (!skb)
  1007. return -ENOMEM;
  1008. set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
  1009. wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
  1010. memset(wqe, 0, sizeof *wqe);
  1011. wqe->op_compl = cpu_to_be32(
  1012. FW_WR_OP(FW_RI_INIT_WR) |
  1013. FW_WR_COMPL(1));
  1014. wqe->flowid_len16 = cpu_to_be32(
  1015. FW_WR_FLOWID(qhp->ep->hwtid) |
  1016. FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
  1017. wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
  1018. wqe->u.init.type = FW_RI_TYPE_INIT;
  1019. wqe->u.init.mpareqbit_p2ptype =
  1020. V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
  1021. V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
  1022. wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
  1023. if (qhp->attr.mpa_attr.recv_marker_enabled)
  1024. wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
  1025. if (qhp->attr.mpa_attr.xmit_marker_enabled)
  1026. wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
  1027. if (qhp->attr.mpa_attr.crc_enabled)
  1028. wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
  1029. wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
  1030. FW_RI_QP_RDMA_WRITE_ENABLE |
  1031. FW_RI_QP_BIND_ENABLE;
  1032. if (!qhp->ibqp.uobject)
  1033. wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
  1034. FW_RI_QP_STAG0_ENABLE;
  1035. wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
  1036. wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
  1037. wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
  1038. wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
  1039. wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
  1040. wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
  1041. wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
  1042. wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
  1043. wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
  1044. wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
  1045. wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
  1046. wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
  1047. wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
  1048. rhp->rdev.lldi.vr->rq.start);
  1049. if (qhp->attr.mpa_attr.initiator)
  1050. build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
  1051. ret = c4iw_ofld_send(&rhp->rdev, skb);
  1052. if (ret)
  1053. goto out;
  1054. ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
  1055. qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
  1056. out:
  1057. PDBG("%s ret %d\n", __func__, ret);
  1058. return ret;
  1059. }
  1060. /*
  1061. * Called by the library when the qp has user dbs disabled due to
  1062. * a DB_FULL condition. This function will single-thread all user
  1063. * DB rings to avoid overflowing the hw db-fifo.
  1064. */
  1065. static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
  1066. {
  1067. int delay = db_delay_usecs;
  1068. mutex_lock(&qhp->rhp->db_mutex);
  1069. do {
  1070. /*
  1071. * The interrupt threshold is dbfifo_int_thresh << 6. So
  1072. * make sure we don't cross that and generate an interrupt.
  1073. */
  1074. if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
  1075. (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
  1076. writel(QID(qid) | PIDX(inc), qhp->wq.db);
  1077. break;
  1078. }
  1079. set_current_state(TASK_UNINTERRUPTIBLE);
  1080. schedule_timeout(usecs_to_jiffies(delay));
  1081. delay = min(delay << 1, 2000);
  1082. } while (1);
  1083. mutex_unlock(&qhp->rhp->db_mutex);
  1084. return 0;
  1085. }
  1086. int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
  1087. enum c4iw_qp_attr_mask mask,
  1088. struct c4iw_qp_attributes *attrs,
  1089. int internal)
  1090. {
  1091. int ret = 0;
  1092. struct c4iw_qp_attributes newattr = qhp->attr;
  1093. int disconnect = 0;
  1094. int terminate = 0;
  1095. int abort = 0;
  1096. int free = 0;
  1097. struct c4iw_ep *ep = NULL;
  1098. PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
  1099. qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
  1100. (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
  1101. mutex_lock(&qhp->mutex);
  1102. /* Process attr changes if in IDLE */
  1103. if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
  1104. if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
  1105. ret = -EIO;
  1106. goto out;
  1107. }
  1108. if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
  1109. newattr.enable_rdma_read = attrs->enable_rdma_read;
  1110. if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
  1111. newattr.enable_rdma_write = attrs->enable_rdma_write;
  1112. if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
  1113. newattr.enable_bind = attrs->enable_bind;
  1114. if (mask & C4IW_QP_ATTR_MAX_ORD) {
  1115. if (attrs->max_ord > c4iw_max_read_depth) {
  1116. ret = -EINVAL;
  1117. goto out;
  1118. }
  1119. newattr.max_ord = attrs->max_ord;
  1120. }
  1121. if (mask & C4IW_QP_ATTR_MAX_IRD) {
  1122. if (attrs->max_ird > c4iw_max_read_depth) {
  1123. ret = -EINVAL;
  1124. goto out;
  1125. }
  1126. newattr.max_ird = attrs->max_ird;
  1127. }
  1128. qhp->attr = newattr;
  1129. }
  1130. if (mask & C4IW_QP_ATTR_SQ_DB) {
  1131. ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
  1132. goto out;
  1133. }
  1134. if (mask & C4IW_QP_ATTR_RQ_DB) {
  1135. ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
  1136. goto out;
  1137. }
  1138. if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
  1139. goto out;
  1140. if (qhp->attr.state == attrs->next_state)
  1141. goto out;
  1142. switch (qhp->attr.state) {
  1143. case C4IW_QP_STATE_IDLE:
  1144. switch (attrs->next_state) {
  1145. case C4IW_QP_STATE_RTS:
  1146. if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
  1147. ret = -EINVAL;
  1148. goto out;
  1149. }
  1150. if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
  1151. ret = -EINVAL;
  1152. goto out;
  1153. }
  1154. qhp->attr.mpa_attr = attrs->mpa_attr;
  1155. qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
  1156. qhp->ep = qhp->attr.llp_stream_handle;
  1157. set_state(qhp, C4IW_QP_STATE_RTS);
  1158. /*
  1159. * Ref the endpoint here and deref when we
  1160. * disassociate the endpoint from the QP. This
  1161. * happens in CLOSING->IDLE transition or *->ERROR
  1162. * transition.
  1163. */
  1164. c4iw_get_ep(&qhp->ep->com);
  1165. ret = rdma_init(rhp, qhp);
  1166. if (ret)
  1167. goto err;
  1168. break;
  1169. case C4IW_QP_STATE_ERROR:
  1170. set_state(qhp, C4IW_QP_STATE_ERROR);
  1171. flush_qp(qhp);
  1172. break;
  1173. default:
  1174. ret = -EINVAL;
  1175. goto out;
  1176. }
  1177. break;
  1178. case C4IW_QP_STATE_RTS:
  1179. switch (attrs->next_state) {
  1180. case C4IW_QP_STATE_CLOSING:
  1181. BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
  1182. set_state(qhp, C4IW_QP_STATE_CLOSING);
  1183. ep = qhp->ep;
  1184. if (!internal) {
  1185. abort = 0;
  1186. disconnect = 1;
  1187. c4iw_get_ep(&qhp->ep->com);
  1188. }
  1189. if (qhp->ibqp.uobject)
  1190. t4_set_wq_in_error(&qhp->wq);
  1191. ret = rdma_fini(rhp, qhp, ep);
  1192. if (ret)
  1193. goto err;
  1194. break;
  1195. case C4IW_QP_STATE_TERMINATE:
  1196. set_state(qhp, C4IW_QP_STATE_TERMINATE);
  1197. qhp->attr.layer_etype = attrs->layer_etype;
  1198. qhp->attr.ecode = attrs->ecode;
  1199. if (qhp->ibqp.uobject)
  1200. t4_set_wq_in_error(&qhp->wq);
  1201. ep = qhp->ep;
  1202. if (!internal)
  1203. terminate = 1;
  1204. disconnect = 1;
  1205. c4iw_get_ep(&qhp->ep->com);
  1206. break;
  1207. case C4IW_QP_STATE_ERROR:
  1208. set_state(qhp, C4IW_QP_STATE_ERROR);
  1209. if (qhp->ibqp.uobject)
  1210. t4_set_wq_in_error(&qhp->wq);
  1211. if (!internal) {
  1212. abort = 1;
  1213. disconnect = 1;
  1214. ep = qhp->ep;
  1215. c4iw_get_ep(&qhp->ep->com);
  1216. }
  1217. goto err;
  1218. break;
  1219. default:
  1220. ret = -EINVAL;
  1221. goto out;
  1222. }
  1223. break;
  1224. case C4IW_QP_STATE_CLOSING:
  1225. if (!internal) {
  1226. ret = -EINVAL;
  1227. goto out;
  1228. }
  1229. switch (attrs->next_state) {
  1230. case C4IW_QP_STATE_IDLE:
  1231. flush_qp(qhp);
  1232. set_state(qhp, C4IW_QP_STATE_IDLE);
  1233. qhp->attr.llp_stream_handle = NULL;
  1234. c4iw_put_ep(&qhp->ep->com);
  1235. qhp->ep = NULL;
  1236. wake_up(&qhp->wait);
  1237. break;
  1238. case C4IW_QP_STATE_ERROR:
  1239. goto err;
  1240. default:
  1241. ret = -EINVAL;
  1242. goto err;
  1243. }
  1244. break;
  1245. case C4IW_QP_STATE_ERROR:
  1246. if (attrs->next_state != C4IW_QP_STATE_IDLE) {
  1247. ret = -EINVAL;
  1248. goto out;
  1249. }
  1250. if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
  1251. ret = -EINVAL;
  1252. goto out;
  1253. }
  1254. set_state(qhp, C4IW_QP_STATE_IDLE);
  1255. break;
  1256. case C4IW_QP_STATE_TERMINATE:
  1257. if (!internal) {
  1258. ret = -EINVAL;
  1259. goto out;
  1260. }
  1261. goto err;
  1262. break;
  1263. default:
  1264. printk(KERN_ERR "%s in a bad state %d\n",
  1265. __func__, qhp->attr.state);
  1266. ret = -EINVAL;
  1267. goto err;
  1268. break;
  1269. }
  1270. goto out;
  1271. err:
  1272. PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
  1273. qhp->wq.sq.qid);
  1274. /* disassociate the LLP connection */
  1275. qhp->attr.llp_stream_handle = NULL;
  1276. if (!ep)
  1277. ep = qhp->ep;
  1278. qhp->ep = NULL;
  1279. set_state(qhp, C4IW_QP_STATE_ERROR);
  1280. free = 1;
  1281. abort = 1;
  1282. wake_up(&qhp->wait);
  1283. BUG_ON(!ep);
  1284. flush_qp(qhp);
  1285. out:
  1286. mutex_unlock(&qhp->mutex);
  1287. if (terminate)
  1288. post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
  1289. /*
  1290. * If disconnect is 1, then we need to initiate a disconnect
  1291. * on the EP. This can be a normal close (RTS->CLOSING) or
  1292. * an abnormal close (RTS/CLOSING->ERROR).
  1293. */
  1294. if (disconnect) {
  1295. c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
  1296. GFP_KERNEL);
  1297. c4iw_put_ep(&ep->com);
  1298. }
  1299. /*
  1300. * If free is 1, then we've disassociated the EP from the QP
  1301. * and we need to dereference the EP.
  1302. */
  1303. if (free)
  1304. c4iw_put_ep(&ep->com);
  1305. PDBG("%s exit state %d\n", __func__, qhp->attr.state);
  1306. return ret;
  1307. }
  1308. static int enable_qp_db(int id, void *p, void *data)
  1309. {
  1310. struct c4iw_qp *qp = p;
  1311. t4_enable_wq_db(&qp->wq);
  1312. return 0;
  1313. }
  1314. int c4iw_destroy_qp(struct ib_qp *ib_qp)
  1315. {
  1316. struct c4iw_dev *rhp;
  1317. struct c4iw_qp *qhp;
  1318. struct c4iw_qp_attributes attrs;
  1319. struct c4iw_ucontext *ucontext;
  1320. qhp = to_c4iw_qp(ib_qp);
  1321. rhp = qhp->rhp;
  1322. attrs.next_state = C4IW_QP_STATE_ERROR;
  1323. if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
  1324. c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
  1325. else
  1326. c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
  1327. wait_event(qhp->wait, !qhp->ep);
  1328. spin_lock_irq(&rhp->lock);
  1329. remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
  1330. rhp->qpcnt--;
  1331. BUG_ON(rhp->qpcnt < 0);
  1332. if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
  1333. rhp->rdev.stats.db_state_transitions++;
  1334. rhp->db_state = NORMAL;
  1335. idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
  1336. }
  1337. spin_unlock_irq(&rhp->lock);
  1338. atomic_dec(&qhp->refcnt);
  1339. wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
  1340. ucontext = ib_qp->uobject ?
  1341. to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
  1342. destroy_qp(&rhp->rdev, &qhp->wq,
  1343. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  1344. PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
  1345. kfree(qhp);
  1346. return 0;
  1347. }
  1348. static int disable_qp_db(int id, void *p, void *data)
  1349. {
  1350. struct c4iw_qp *qp = p;
  1351. t4_disable_wq_db(&qp->wq);
  1352. return 0;
  1353. }
  1354. struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
  1355. struct ib_udata *udata)
  1356. {
  1357. struct c4iw_dev *rhp;
  1358. struct c4iw_qp *qhp;
  1359. struct c4iw_pd *php;
  1360. struct c4iw_cq *schp;
  1361. struct c4iw_cq *rchp;
  1362. struct c4iw_create_qp_resp uresp;
  1363. int sqsize, rqsize;
  1364. struct c4iw_ucontext *ucontext;
  1365. int ret;
  1366. struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
  1367. PDBG("%s ib_pd %p\n", __func__, pd);
  1368. if (attrs->qp_type != IB_QPT_RC)
  1369. return ERR_PTR(-EINVAL);
  1370. php = to_c4iw_pd(pd);
  1371. rhp = php->rhp;
  1372. schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
  1373. rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
  1374. if (!schp || !rchp)
  1375. return ERR_PTR(-EINVAL);
  1376. if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
  1377. return ERR_PTR(-EINVAL);
  1378. rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
  1379. if (rqsize > T4_MAX_RQ_SIZE)
  1380. return ERR_PTR(-E2BIG);
  1381. sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
  1382. if (sqsize > T4_MAX_SQ_SIZE)
  1383. return ERR_PTR(-E2BIG);
  1384. ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
  1385. qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
  1386. if (!qhp)
  1387. return ERR_PTR(-ENOMEM);
  1388. qhp->wq.sq.size = sqsize;
  1389. qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
  1390. qhp->wq.rq.size = rqsize;
  1391. qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
  1392. if (ucontext) {
  1393. qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
  1394. qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
  1395. }
  1396. PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
  1397. __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
  1398. ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
  1399. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  1400. if (ret)
  1401. goto err1;
  1402. attrs->cap.max_recv_wr = rqsize - 1;
  1403. attrs->cap.max_send_wr = sqsize - 1;
  1404. attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
  1405. qhp->rhp = rhp;
  1406. qhp->attr.pd = php->pdid;
  1407. qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
  1408. qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
  1409. qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
  1410. qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
  1411. qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
  1412. qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
  1413. qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
  1414. qhp->attr.state = C4IW_QP_STATE_IDLE;
  1415. qhp->attr.next_state = C4IW_QP_STATE_IDLE;
  1416. qhp->attr.enable_rdma_read = 1;
  1417. qhp->attr.enable_rdma_write = 1;
  1418. qhp->attr.enable_bind = 1;
  1419. qhp->attr.max_ord = 1;
  1420. qhp->attr.max_ird = 1;
  1421. spin_lock_init(&qhp->lock);
  1422. mutex_init(&qhp->mutex);
  1423. init_waitqueue_head(&qhp->wait);
  1424. atomic_set(&qhp->refcnt, 1);
  1425. spin_lock_irq(&rhp->lock);
  1426. if (rhp->db_state != NORMAL)
  1427. t4_disable_wq_db(&qhp->wq);
  1428. if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
  1429. rhp->rdev.stats.db_state_transitions++;
  1430. rhp->db_state = FLOW_CONTROL;
  1431. idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
  1432. }
  1433. ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
  1434. spin_unlock_irq(&rhp->lock);
  1435. if (ret)
  1436. goto err2;
  1437. if (udata) {
  1438. mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
  1439. if (!mm1) {
  1440. ret = -ENOMEM;
  1441. goto err3;
  1442. }
  1443. mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
  1444. if (!mm2) {
  1445. ret = -ENOMEM;
  1446. goto err4;
  1447. }
  1448. mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
  1449. if (!mm3) {
  1450. ret = -ENOMEM;
  1451. goto err5;
  1452. }
  1453. mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
  1454. if (!mm4) {
  1455. ret = -ENOMEM;
  1456. goto err6;
  1457. }
  1458. if (t4_sq_onchip(&qhp->wq.sq)) {
  1459. mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
  1460. if (!mm5) {
  1461. ret = -ENOMEM;
  1462. goto err7;
  1463. }
  1464. uresp.flags = C4IW_QPF_ONCHIP;
  1465. } else
  1466. uresp.flags = 0;
  1467. uresp.qid_mask = rhp->rdev.qpmask;
  1468. uresp.sqid = qhp->wq.sq.qid;
  1469. uresp.sq_size = qhp->wq.sq.size;
  1470. uresp.sq_memsize = qhp->wq.sq.memsize;
  1471. uresp.rqid = qhp->wq.rq.qid;
  1472. uresp.rq_size = qhp->wq.rq.size;
  1473. uresp.rq_memsize = qhp->wq.rq.memsize;
  1474. spin_lock(&ucontext->mmap_lock);
  1475. if (mm5) {
  1476. uresp.ma_sync_key = ucontext->key;
  1477. ucontext->key += PAGE_SIZE;
  1478. }
  1479. uresp.sq_key = ucontext->key;
  1480. ucontext->key += PAGE_SIZE;
  1481. uresp.rq_key = ucontext->key;
  1482. ucontext->key += PAGE_SIZE;
  1483. uresp.sq_db_gts_key = ucontext->key;
  1484. ucontext->key += PAGE_SIZE;
  1485. uresp.rq_db_gts_key = ucontext->key;
  1486. ucontext->key += PAGE_SIZE;
  1487. spin_unlock(&ucontext->mmap_lock);
  1488. ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
  1489. if (ret)
  1490. goto err8;
  1491. mm1->key = uresp.sq_key;
  1492. mm1->addr = qhp->wq.sq.phys_addr;
  1493. mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
  1494. insert_mmap(ucontext, mm1);
  1495. mm2->key = uresp.rq_key;
  1496. mm2->addr = virt_to_phys(qhp->wq.rq.queue);
  1497. mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
  1498. insert_mmap(ucontext, mm2);
  1499. mm3->key = uresp.sq_db_gts_key;
  1500. mm3->addr = qhp->wq.sq.udb;
  1501. mm3->len = PAGE_SIZE;
  1502. insert_mmap(ucontext, mm3);
  1503. mm4->key = uresp.rq_db_gts_key;
  1504. mm4->addr = qhp->wq.rq.udb;
  1505. mm4->len = PAGE_SIZE;
  1506. insert_mmap(ucontext, mm4);
  1507. if (mm5) {
  1508. mm5->key = uresp.ma_sync_key;
  1509. mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
  1510. + A_PCIE_MA_SYNC) & PAGE_MASK;
  1511. mm5->len = PAGE_SIZE;
  1512. insert_mmap(ucontext, mm5);
  1513. }
  1514. }
  1515. qhp->ibqp.qp_num = qhp->wq.sq.qid;
  1516. init_timer(&(qhp->timer));
  1517. PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
  1518. __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
  1519. qhp->wq.sq.qid);
  1520. return &qhp->ibqp;
  1521. err8:
  1522. kfree(mm5);
  1523. err7:
  1524. kfree(mm4);
  1525. err6:
  1526. kfree(mm3);
  1527. err5:
  1528. kfree(mm2);
  1529. err4:
  1530. kfree(mm1);
  1531. err3:
  1532. remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
  1533. err2:
  1534. destroy_qp(&rhp->rdev, &qhp->wq,
  1535. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  1536. err1:
  1537. kfree(qhp);
  1538. return ERR_PTR(ret);
  1539. }
  1540. int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  1541. int attr_mask, struct ib_udata *udata)
  1542. {
  1543. struct c4iw_dev *rhp;
  1544. struct c4iw_qp *qhp;
  1545. enum c4iw_qp_attr_mask mask = 0;
  1546. struct c4iw_qp_attributes attrs;
  1547. PDBG("%s ib_qp %p\n", __func__, ibqp);
  1548. /* iwarp does not support the RTR state */
  1549. if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
  1550. attr_mask &= ~IB_QP_STATE;
  1551. /* Make sure we still have something left to do */
  1552. if (!attr_mask)
  1553. return 0;
  1554. memset(&attrs, 0, sizeof attrs);
  1555. qhp = to_c4iw_qp(ibqp);
  1556. rhp = qhp->rhp;
  1557. attrs.next_state = c4iw_convert_state(attr->qp_state);
  1558. attrs.enable_rdma_read = (attr->qp_access_flags &
  1559. IB_ACCESS_REMOTE_READ) ? 1 : 0;
  1560. attrs.enable_rdma_write = (attr->qp_access_flags &
  1561. IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
  1562. attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
  1563. mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
  1564. mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
  1565. (C4IW_QP_ATTR_ENABLE_RDMA_READ |
  1566. C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
  1567. C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
  1568. /*
  1569. * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
  1570. * ringing the queue db when we're in DB_FULL mode.
  1571. */
  1572. attrs.sq_db_inc = attr->sq_psn;
  1573. attrs.rq_db_inc = attr->rq_psn;
  1574. mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
  1575. mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
  1576. return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
  1577. }
  1578. struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
  1579. {
  1580. PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
  1581. return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
  1582. }
  1583. int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  1584. int attr_mask, struct ib_qp_init_attr *init_attr)
  1585. {
  1586. struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
  1587. memset(attr, 0, sizeof *attr);
  1588. memset(init_attr, 0, sizeof *init_attr);
  1589. attr->qp_state = to_ib_qp_state(qhp->attr.state);
  1590. return 0;
  1591. }