iwch_qp.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/sched.h>
  33. #include <linux/gfp.h>
  34. #include "iwch_provider.h"
  35. #include "iwch.h"
  36. #include "iwch_cm.h"
  37. #include "cxio_hal.h"
  38. #include "cxio_resource.h"
  39. #define NO_SUPPORT -1
  40. static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
  41. u8 * flit_cnt)
  42. {
  43. int i;
  44. u32 plen;
  45. switch (wr->opcode) {
  46. case IB_WR_SEND:
  47. if (wr->send_flags & IB_SEND_SOLICITED)
  48. wqe->send.rdmaop = T3_SEND_WITH_SE;
  49. else
  50. wqe->send.rdmaop = T3_SEND;
  51. wqe->send.rem_stag = 0;
  52. break;
  53. case IB_WR_SEND_WITH_INV:
  54. if (wr->send_flags & IB_SEND_SOLICITED)
  55. wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
  56. else
  57. wqe->send.rdmaop = T3_SEND_WITH_INV;
  58. wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
  59. break;
  60. default:
  61. return -EINVAL;
  62. }
  63. if (wr->num_sge > T3_MAX_SGE)
  64. return -EINVAL;
  65. wqe->send.reserved[0] = 0;
  66. wqe->send.reserved[1] = 0;
  67. wqe->send.reserved[2] = 0;
  68. plen = 0;
  69. for (i = 0; i < wr->num_sge; i++) {
  70. if ((plen + wr->sg_list[i].length) < plen)
  71. return -EMSGSIZE;
  72. plen += wr->sg_list[i].length;
  73. wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
  74. wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
  75. wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
  76. }
  77. wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
  78. *flit_cnt = 4 + ((wr->num_sge) << 1);
  79. wqe->send.plen = cpu_to_be32(plen);
  80. return 0;
  81. }
  82. static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
  83. u8 *flit_cnt)
  84. {
  85. int i;
  86. u32 plen;
  87. if (wr->num_sge > T3_MAX_SGE)
  88. return -EINVAL;
  89. wqe->write.rdmaop = T3_RDMA_WRITE;
  90. wqe->write.reserved[0] = 0;
  91. wqe->write.reserved[1] = 0;
  92. wqe->write.reserved[2] = 0;
  93. wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
  94. wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
  95. if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
  96. plen = 4;
  97. wqe->write.sgl[0].stag = wr->ex.imm_data;
  98. wqe->write.sgl[0].len = cpu_to_be32(0);
  99. wqe->write.num_sgle = cpu_to_be32(0);
  100. *flit_cnt = 6;
  101. } else {
  102. plen = 0;
  103. for (i = 0; i < wr->num_sge; i++) {
  104. if ((plen + wr->sg_list[i].length) < plen) {
  105. return -EMSGSIZE;
  106. }
  107. plen += wr->sg_list[i].length;
  108. wqe->write.sgl[i].stag =
  109. cpu_to_be32(wr->sg_list[i].lkey);
  110. wqe->write.sgl[i].len =
  111. cpu_to_be32(wr->sg_list[i].length);
  112. wqe->write.sgl[i].to =
  113. cpu_to_be64(wr->sg_list[i].addr);
  114. }
  115. wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
  116. *flit_cnt = 5 + ((wr->num_sge) << 1);
  117. }
  118. wqe->write.plen = cpu_to_be32(plen);
  119. return 0;
  120. }
  121. static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
  122. u8 *flit_cnt)
  123. {
  124. if (wr->num_sge > 1)
  125. return -EINVAL;
  126. wqe->read.rdmaop = T3_READ_REQ;
  127. if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
  128. wqe->read.local_inv = 1;
  129. else
  130. wqe->read.local_inv = 0;
  131. wqe->read.reserved[0] = 0;
  132. wqe->read.reserved[1] = 0;
  133. wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
  134. wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
  135. wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
  136. wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
  137. wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
  138. *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
  139. return 0;
  140. }
  141. static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
  142. u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
  143. {
  144. int i;
  145. __be64 *p;
  146. if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
  147. return -EINVAL;
  148. *wr_cnt = 1;
  149. wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
  150. wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
  151. wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
  152. wqe->fastreg.va_base_lo_fbo =
  153. cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
  154. wqe->fastreg.page_type_perms = cpu_to_be32(
  155. V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
  156. V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
  157. V_FR_TYPE(TPT_VATO) |
  158. V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
  159. p = &wqe->fastreg.pbl_addrs[0];
  160. for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
  161. /* If we need a 2nd WR, then set it up */
  162. if (i == T3_MAX_FASTREG_FRAG) {
  163. *wr_cnt = 2;
  164. wqe = (union t3_wr *)(wq->queue +
  165. Q_PTR2IDX((wq->wptr+1), wq->size_log2));
  166. build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
  167. Q_GENBIT(wq->wptr + 1, wq->size_log2),
  168. 0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
  169. T3_EOP);
  170. p = &wqe->pbl_frag.pbl_addrs[0];
  171. }
  172. *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
  173. }
  174. *flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
  175. if (*flit_cnt > 15)
  176. *flit_cnt = 15;
  177. return 0;
  178. }
  179. static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
  180. u8 *flit_cnt)
  181. {
  182. wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
  183. wqe->local_inv.reserved = 0;
  184. *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
  185. return 0;
  186. }
  187. static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
  188. u32 num_sgle, u32 * pbl_addr, u8 * page_size)
  189. {
  190. int i;
  191. struct iwch_mr *mhp;
  192. u64 offset;
  193. for (i = 0; i < num_sgle; i++) {
  194. mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
  195. if (!mhp) {
  196. PDBG("%s %d\n", __func__, __LINE__);
  197. return -EIO;
  198. }
  199. if (!mhp->attr.state) {
  200. PDBG("%s %d\n", __func__, __LINE__);
  201. return -EIO;
  202. }
  203. if (mhp->attr.zbva) {
  204. PDBG("%s %d\n", __func__, __LINE__);
  205. return -EIO;
  206. }
  207. if (sg_list[i].addr < mhp->attr.va_fbo) {
  208. PDBG("%s %d\n", __func__, __LINE__);
  209. return -EINVAL;
  210. }
  211. if (sg_list[i].addr + ((u64) sg_list[i].length) <
  212. sg_list[i].addr) {
  213. PDBG("%s %d\n", __func__, __LINE__);
  214. return -EINVAL;
  215. }
  216. if (sg_list[i].addr + ((u64) sg_list[i].length) >
  217. mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
  218. PDBG("%s %d\n", __func__, __LINE__);
  219. return -EINVAL;
  220. }
  221. offset = sg_list[i].addr - mhp->attr.va_fbo;
  222. offset += mhp->attr.va_fbo &
  223. ((1UL << (12 + mhp->attr.page_size)) - 1);
  224. pbl_addr[i] = ((mhp->attr.pbl_addr -
  225. rhp->rdev.rnic_info.pbl_base) >> 3) +
  226. (offset >> (12 + mhp->attr.page_size));
  227. page_size[i] = mhp->attr.page_size;
  228. }
  229. return 0;
  230. }
  231. static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
  232. struct ib_recv_wr *wr)
  233. {
  234. int i, err = 0;
  235. u32 pbl_addr[T3_MAX_SGE];
  236. u8 page_size[T3_MAX_SGE];
  237. err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
  238. page_size);
  239. if (err)
  240. return err;
  241. wqe->recv.pagesz[0] = page_size[0];
  242. wqe->recv.pagesz[1] = page_size[1];
  243. wqe->recv.pagesz[2] = page_size[2];
  244. wqe->recv.pagesz[3] = page_size[3];
  245. wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
  246. for (i = 0; i < wr->num_sge; i++) {
  247. wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
  248. wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
  249. /* to in the WQE == the offset into the page */
  250. wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
  251. ((1UL << (12 + page_size[i])) - 1));
  252. /* pbl_addr is the adapters address in the PBL */
  253. wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
  254. }
  255. for (; i < T3_MAX_SGE; i++) {
  256. wqe->recv.sgl[i].stag = 0;
  257. wqe->recv.sgl[i].len = 0;
  258. wqe->recv.sgl[i].to = 0;
  259. wqe->recv.pbl_addr[i] = 0;
  260. }
  261. qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
  262. qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
  263. qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
  264. qhp->wq.rq_size_log2)].pbl_addr = 0;
  265. return 0;
  266. }
  267. static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
  268. struct ib_recv_wr *wr)
  269. {
  270. int i;
  271. u32 pbl_addr;
  272. u32 pbl_offset;
  273. /*
  274. * The T3 HW requires the PBL in the HW recv descriptor to reference
  275. * a PBL entry. So we allocate the max needed PBL memory here and pass
  276. * it to the uP in the recv WR. The uP will build the PBL and setup
  277. * the HW recv descriptor.
  278. */
  279. pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
  280. if (!pbl_addr)
  281. return -ENOMEM;
  282. /*
  283. * Compute the 8B aligned offset.
  284. */
  285. pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
  286. wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
  287. for (i = 0; i < wr->num_sge; i++) {
  288. /*
  289. * Use a 128MB page size. This and an imposed 128MB
  290. * sge length limit allows us to require only a 2-entry HW
  291. * PBL for each SGE. This restriction is acceptable since
  292. * since it is not possible to allocate 128MB of contiguous
  293. * DMA coherent memory!
  294. */
  295. if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
  296. return -EINVAL;
  297. wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
  298. /*
  299. * T3 restricts a recv to all zero-stag or all non-zero-stag.
  300. */
  301. if (wr->sg_list[i].lkey != 0)
  302. return -EINVAL;
  303. wqe->recv.sgl[i].stag = 0;
  304. wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
  305. wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
  306. wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
  307. pbl_offset += 2;
  308. }
  309. for (; i < T3_MAX_SGE; i++) {
  310. wqe->recv.pagesz[i] = 0;
  311. wqe->recv.sgl[i].stag = 0;
  312. wqe->recv.sgl[i].len = 0;
  313. wqe->recv.sgl[i].to = 0;
  314. wqe->recv.pbl_addr[i] = 0;
  315. }
  316. qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
  317. qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
  318. qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
  319. qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
  320. return 0;
  321. }
  322. int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  323. struct ib_send_wr **bad_wr)
  324. {
  325. int err = 0;
  326. u8 uninitialized_var(t3_wr_flit_cnt);
  327. enum t3_wr_opcode t3_wr_opcode = 0;
  328. enum t3_wr_flags t3_wr_flags;
  329. struct iwch_qp *qhp;
  330. u32 idx;
  331. union t3_wr *wqe;
  332. u32 num_wrs;
  333. unsigned long flag;
  334. struct t3_swsq *sqp;
  335. int wr_cnt = 1;
  336. qhp = to_iwch_qp(ibqp);
  337. spin_lock_irqsave(&qhp->lock, flag);
  338. if (qhp->attr.state > IWCH_QP_STATE_RTS) {
  339. spin_unlock_irqrestore(&qhp->lock, flag);
  340. err = -EINVAL;
  341. goto out;
  342. }
  343. num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
  344. qhp->wq.sq_size_log2);
  345. if (num_wrs == 0) {
  346. spin_unlock_irqrestore(&qhp->lock, flag);
  347. err = -ENOMEM;
  348. goto out;
  349. }
  350. while (wr) {
  351. if (num_wrs == 0) {
  352. err = -ENOMEM;
  353. break;
  354. }
  355. idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
  356. wqe = (union t3_wr *) (qhp->wq.queue + idx);
  357. t3_wr_flags = 0;
  358. if (wr->send_flags & IB_SEND_SOLICITED)
  359. t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
  360. if (wr->send_flags & IB_SEND_SIGNALED)
  361. t3_wr_flags |= T3_COMPLETION_FLAG;
  362. sqp = qhp->wq.sq +
  363. Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
  364. switch (wr->opcode) {
  365. case IB_WR_SEND:
  366. case IB_WR_SEND_WITH_INV:
  367. if (wr->send_flags & IB_SEND_FENCE)
  368. t3_wr_flags |= T3_READ_FENCE_FLAG;
  369. t3_wr_opcode = T3_WR_SEND;
  370. err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
  371. break;
  372. case IB_WR_RDMA_WRITE:
  373. case IB_WR_RDMA_WRITE_WITH_IMM:
  374. t3_wr_opcode = T3_WR_WRITE;
  375. err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
  376. break;
  377. case IB_WR_RDMA_READ:
  378. case IB_WR_RDMA_READ_WITH_INV:
  379. t3_wr_opcode = T3_WR_READ;
  380. t3_wr_flags = 0; /* T3 reads are always signaled */
  381. err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
  382. if (err)
  383. break;
  384. sqp->read_len = wqe->read.local_len;
  385. if (!qhp->wq.oldest_read)
  386. qhp->wq.oldest_read = sqp;
  387. break;
  388. case IB_WR_FAST_REG_MR:
  389. t3_wr_opcode = T3_WR_FASTREG;
  390. err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
  391. &wr_cnt, &qhp->wq);
  392. break;
  393. case IB_WR_LOCAL_INV:
  394. if (wr->send_flags & IB_SEND_FENCE)
  395. t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
  396. t3_wr_opcode = T3_WR_INV_STAG;
  397. err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
  398. break;
  399. default:
  400. PDBG("%s post of type=%d TBD!\n", __func__,
  401. wr->opcode);
  402. err = -EINVAL;
  403. }
  404. if (err)
  405. break;
  406. wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
  407. sqp->wr_id = wr->wr_id;
  408. sqp->opcode = wr2opcode(t3_wr_opcode);
  409. sqp->sq_wptr = qhp->wq.sq_wptr;
  410. sqp->complete = 0;
  411. sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
  412. build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
  413. Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
  414. 0, t3_wr_flit_cnt,
  415. (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
  416. PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
  417. __func__, (unsigned long long) wr->wr_id, idx,
  418. Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
  419. sqp->opcode);
  420. wr = wr->next;
  421. num_wrs--;
  422. qhp->wq.wptr += wr_cnt;
  423. ++(qhp->wq.sq_wptr);
  424. }
  425. spin_unlock_irqrestore(&qhp->lock, flag);
  426. if (cxio_wq_db_enabled(&qhp->wq))
  427. ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
  428. out:
  429. if (err)
  430. *bad_wr = wr;
  431. return err;
  432. }
  433. int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  434. struct ib_recv_wr **bad_wr)
  435. {
  436. int err = 0;
  437. struct iwch_qp *qhp;
  438. u32 idx;
  439. union t3_wr *wqe;
  440. u32 num_wrs;
  441. unsigned long flag;
  442. qhp = to_iwch_qp(ibqp);
  443. spin_lock_irqsave(&qhp->lock, flag);
  444. if (qhp->attr.state > IWCH_QP_STATE_RTS) {
  445. spin_unlock_irqrestore(&qhp->lock, flag);
  446. err = -EINVAL;
  447. goto out;
  448. }
  449. num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
  450. qhp->wq.rq_size_log2) - 1;
  451. if (!wr) {
  452. spin_unlock_irqrestore(&qhp->lock, flag);
  453. err = -ENOMEM;
  454. goto out;
  455. }
  456. while (wr) {
  457. if (wr->num_sge > T3_MAX_SGE) {
  458. err = -EINVAL;
  459. break;
  460. }
  461. idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
  462. wqe = (union t3_wr *) (qhp->wq.queue + idx);
  463. if (num_wrs)
  464. if (wr->sg_list[0].lkey)
  465. err = build_rdma_recv(qhp, wqe, wr);
  466. else
  467. err = build_zero_stag_recv(qhp, wqe, wr);
  468. else
  469. err = -ENOMEM;
  470. if (err)
  471. break;
  472. build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
  473. Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
  474. 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
  475. PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
  476. "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
  477. idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
  478. ++(qhp->wq.rq_wptr);
  479. ++(qhp->wq.wptr);
  480. wr = wr->next;
  481. num_wrs--;
  482. }
  483. spin_unlock_irqrestore(&qhp->lock, flag);
  484. if (cxio_wq_db_enabled(&qhp->wq))
  485. ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
  486. out:
  487. if (err)
  488. *bad_wr = wr;
  489. return err;
  490. }
  491. int iwch_bind_mw(struct ib_qp *qp,
  492. struct ib_mw *mw,
  493. struct ib_mw_bind *mw_bind)
  494. {
  495. struct iwch_dev *rhp;
  496. struct iwch_mw *mhp;
  497. struct iwch_qp *qhp;
  498. union t3_wr *wqe;
  499. u32 pbl_addr;
  500. u8 page_size;
  501. u32 num_wrs;
  502. unsigned long flag;
  503. struct ib_sge sgl;
  504. int err=0;
  505. enum t3_wr_flags t3_wr_flags;
  506. u32 idx;
  507. struct t3_swsq *sqp;
  508. qhp = to_iwch_qp(qp);
  509. mhp = to_iwch_mw(mw);
  510. rhp = qhp->rhp;
  511. spin_lock_irqsave(&qhp->lock, flag);
  512. if (qhp->attr.state > IWCH_QP_STATE_RTS) {
  513. spin_unlock_irqrestore(&qhp->lock, flag);
  514. return -EINVAL;
  515. }
  516. num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
  517. qhp->wq.sq_size_log2);
  518. if (num_wrs == 0) {
  519. spin_unlock_irqrestore(&qhp->lock, flag);
  520. return -ENOMEM;
  521. }
  522. idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
  523. PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
  524. mw, mw_bind);
  525. wqe = (union t3_wr *) (qhp->wq.queue + idx);
  526. t3_wr_flags = 0;
  527. if (mw_bind->send_flags & IB_SEND_SIGNALED)
  528. t3_wr_flags = T3_COMPLETION_FLAG;
  529. sgl.addr = mw_bind->bind_info.addr;
  530. sgl.lkey = mw_bind->bind_info.mr->lkey;
  531. sgl.length = mw_bind->bind_info.length;
  532. wqe->bind.reserved = 0;
  533. wqe->bind.type = TPT_VATO;
  534. /* TBD: check perms */
  535. wqe->bind.perms = iwch_ib_to_tpt_bind_access(
  536. mw_bind->bind_info.mw_access_flags);
  537. wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey);
  538. wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
  539. wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length);
  540. wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr);
  541. err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
  542. if (err) {
  543. spin_unlock_irqrestore(&qhp->lock, flag);
  544. return err;
  545. }
  546. wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
  547. sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
  548. sqp->wr_id = mw_bind->wr_id;
  549. sqp->opcode = T3_BIND_MW;
  550. sqp->sq_wptr = qhp->wq.sq_wptr;
  551. sqp->complete = 0;
  552. sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
  553. wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
  554. wqe->bind.mr_pagesz = page_size;
  555. build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
  556. Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
  557. sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
  558. ++(qhp->wq.wptr);
  559. ++(qhp->wq.sq_wptr);
  560. spin_unlock_irqrestore(&qhp->lock, flag);
  561. if (cxio_wq_db_enabled(&qhp->wq))
  562. ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
  563. return err;
  564. }
  565. static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
  566. u8 *layer_type, u8 *ecode)
  567. {
  568. int status = TPT_ERR_INTERNAL_ERR;
  569. int tagged = 0;
  570. int opcode = -1;
  571. int rqtype = 0;
  572. int send_inv = 0;
  573. if (rsp_msg) {
  574. status = CQE_STATUS(rsp_msg->cqe);
  575. opcode = CQE_OPCODE(rsp_msg->cqe);
  576. rqtype = RQ_TYPE(rsp_msg->cqe);
  577. send_inv = (opcode == T3_SEND_WITH_INV) ||
  578. (opcode == T3_SEND_WITH_SE_INV);
  579. tagged = (opcode == T3_RDMA_WRITE) ||
  580. (rqtype && (opcode == T3_READ_RESP));
  581. }
  582. switch (status) {
  583. case TPT_ERR_STAG:
  584. if (send_inv) {
  585. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  586. *ecode = RDMAP_CANT_INV_STAG;
  587. } else {
  588. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  589. *ecode = RDMAP_INV_STAG;
  590. }
  591. break;
  592. case TPT_ERR_PDID:
  593. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  594. if ((opcode == T3_SEND_WITH_INV) ||
  595. (opcode == T3_SEND_WITH_SE_INV))
  596. *ecode = RDMAP_CANT_INV_STAG;
  597. else
  598. *ecode = RDMAP_STAG_NOT_ASSOC;
  599. break;
  600. case TPT_ERR_QPID:
  601. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  602. *ecode = RDMAP_STAG_NOT_ASSOC;
  603. break;
  604. case TPT_ERR_ACCESS:
  605. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  606. *ecode = RDMAP_ACC_VIOL;
  607. break;
  608. case TPT_ERR_WRAP:
  609. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  610. *ecode = RDMAP_TO_WRAP;
  611. break;
  612. case TPT_ERR_BOUND:
  613. if (tagged) {
  614. *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
  615. *ecode = DDPT_BASE_BOUNDS;
  616. } else {
  617. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  618. *ecode = RDMAP_BASE_BOUNDS;
  619. }
  620. break;
  621. case TPT_ERR_INVALIDATE_SHARED_MR:
  622. case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
  623. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  624. *ecode = RDMAP_CANT_INV_STAG;
  625. break;
  626. case TPT_ERR_ECC:
  627. case TPT_ERR_ECC_PSTAG:
  628. case TPT_ERR_INTERNAL_ERR:
  629. *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
  630. *ecode = 0;
  631. break;
  632. case TPT_ERR_OUT_OF_RQE:
  633. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  634. *ecode = DDPU_INV_MSN_NOBUF;
  635. break;
  636. case TPT_ERR_PBL_ADDR_BOUND:
  637. *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
  638. *ecode = DDPT_BASE_BOUNDS;
  639. break;
  640. case TPT_ERR_CRC:
  641. *layer_type = LAYER_MPA|DDP_LLP;
  642. *ecode = MPA_CRC_ERR;
  643. break;
  644. case TPT_ERR_MARKER:
  645. *layer_type = LAYER_MPA|DDP_LLP;
  646. *ecode = MPA_MARKER_ERR;
  647. break;
  648. case TPT_ERR_PDU_LEN_ERR:
  649. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  650. *ecode = DDPU_MSG_TOOBIG;
  651. break;
  652. case TPT_ERR_DDP_VERSION:
  653. if (tagged) {
  654. *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
  655. *ecode = DDPT_INV_VERS;
  656. } else {
  657. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  658. *ecode = DDPU_INV_VERS;
  659. }
  660. break;
  661. case TPT_ERR_RDMA_VERSION:
  662. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  663. *ecode = RDMAP_INV_VERS;
  664. break;
  665. case TPT_ERR_OPCODE:
  666. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  667. *ecode = RDMAP_INV_OPCODE;
  668. break;
  669. case TPT_ERR_DDP_QUEUE_NUM:
  670. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  671. *ecode = DDPU_INV_QN;
  672. break;
  673. case TPT_ERR_MSN:
  674. case TPT_ERR_MSN_GAP:
  675. case TPT_ERR_MSN_RANGE:
  676. case TPT_ERR_IRD_OVERFLOW:
  677. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  678. *ecode = DDPU_INV_MSN_RANGE;
  679. break;
  680. case TPT_ERR_TBIT:
  681. *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
  682. *ecode = 0;
  683. break;
  684. case TPT_ERR_MO:
  685. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  686. *ecode = DDPU_INV_MO;
  687. break;
  688. default:
  689. *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
  690. *ecode = 0;
  691. break;
  692. }
  693. }
  694. int iwch_post_zb_read(struct iwch_ep *ep)
  695. {
  696. union t3_wr *wqe;
  697. struct sk_buff *skb;
  698. u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
  699. PDBG("%s enter\n", __func__);
  700. skb = alloc_skb(40, GFP_KERNEL);
  701. if (!skb) {
  702. printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
  703. return -ENOMEM;
  704. }
  705. wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
  706. memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
  707. wqe->read.rdmaop = T3_READ_REQ;
  708. wqe->read.reserved[0] = 0;
  709. wqe->read.reserved[1] = 0;
  710. wqe->read.rem_stag = cpu_to_be32(1);
  711. wqe->read.rem_to = cpu_to_be64(1);
  712. wqe->read.local_stag = cpu_to_be32(1);
  713. wqe->read.local_len = cpu_to_be32(0);
  714. wqe->read.local_to = cpu_to_be64(1);
  715. wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
  716. wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
  717. V_FW_RIWR_LEN(flit_cnt));
  718. skb->priority = CPL_PRIORITY_DATA;
  719. return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
  720. }
  721. /*
  722. * This posts a TERMINATE with layer=RDMA, type=catastrophic.
  723. */
  724. int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
  725. {
  726. union t3_wr *wqe;
  727. struct terminate_message *term;
  728. struct sk_buff *skb;
  729. PDBG("%s %d\n", __func__, __LINE__);
  730. skb = alloc_skb(40, GFP_ATOMIC);
  731. if (!skb) {
  732. printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
  733. return -ENOMEM;
  734. }
  735. wqe = (union t3_wr *)skb_put(skb, 40);
  736. memset(wqe, 0, 40);
  737. wqe->send.rdmaop = T3_TERMINATE;
  738. /* immediate data length */
  739. wqe->send.plen = htonl(4);
  740. /* immediate data starts here. */
  741. term = (struct terminate_message *)wqe->send.sgl;
  742. build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
  743. wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
  744. V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
  745. wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
  746. skb->priority = CPL_PRIORITY_DATA;
  747. return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
  748. }
  749. /*
  750. * Assumes qhp lock is held.
  751. */
  752. static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
  753. struct iwch_cq *schp)
  754. {
  755. int count;
  756. int flushed;
  757. PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
  758. /* take a ref on the qhp since we must release the lock */
  759. atomic_inc(&qhp->refcnt);
  760. spin_unlock(&qhp->lock);
  761. /* locking hierarchy: cq lock first, then qp lock. */
  762. spin_lock(&rchp->lock);
  763. spin_lock(&qhp->lock);
  764. cxio_flush_hw_cq(&rchp->cq);
  765. cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
  766. flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
  767. spin_unlock(&qhp->lock);
  768. spin_unlock(&rchp->lock);
  769. if (flushed) {
  770. spin_lock(&rchp->comp_handler_lock);
  771. (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
  772. spin_unlock(&rchp->comp_handler_lock);
  773. }
  774. /* locking hierarchy: cq lock first, then qp lock. */
  775. spin_lock(&schp->lock);
  776. spin_lock(&qhp->lock);
  777. cxio_flush_hw_cq(&schp->cq);
  778. cxio_count_scqes(&schp->cq, &qhp->wq, &count);
  779. flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
  780. spin_unlock(&qhp->lock);
  781. spin_unlock(&schp->lock);
  782. if (flushed) {
  783. spin_lock(&schp->comp_handler_lock);
  784. (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
  785. spin_unlock(&schp->comp_handler_lock);
  786. }
  787. /* deref */
  788. if (atomic_dec_and_test(&qhp->refcnt))
  789. wake_up(&qhp->wait);
  790. spin_lock(&qhp->lock);
  791. }
  792. static void flush_qp(struct iwch_qp *qhp)
  793. {
  794. struct iwch_cq *rchp, *schp;
  795. rchp = get_chp(qhp->rhp, qhp->attr.rcq);
  796. schp = get_chp(qhp->rhp, qhp->attr.scq);
  797. if (qhp->ibqp.uobject) {
  798. cxio_set_wq_in_error(&qhp->wq);
  799. cxio_set_cq_in_error(&rchp->cq);
  800. spin_lock(&rchp->comp_handler_lock);
  801. (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
  802. spin_unlock(&rchp->comp_handler_lock);
  803. if (schp != rchp) {
  804. cxio_set_cq_in_error(&schp->cq);
  805. spin_lock(&schp->comp_handler_lock);
  806. (*schp->ibcq.comp_handler)(&schp->ibcq,
  807. schp->ibcq.cq_context);
  808. spin_unlock(&schp->comp_handler_lock);
  809. }
  810. return;
  811. }
  812. __flush_qp(qhp, rchp, schp);
  813. }
  814. /*
  815. * Return count of RECV WRs posted
  816. */
  817. u16 iwch_rqes_posted(struct iwch_qp *qhp)
  818. {
  819. union t3_wr *wqe = qhp->wq.queue;
  820. u16 count = 0;
  821. while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
  822. count++;
  823. wqe++;
  824. }
  825. PDBG("%s qhp %p count %u\n", __func__, qhp, count);
  826. return count;
  827. }
  828. static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
  829. enum iwch_qp_attr_mask mask,
  830. struct iwch_qp_attributes *attrs)
  831. {
  832. struct t3_rdma_init_attr init_attr;
  833. int ret;
  834. init_attr.tid = qhp->ep->hwtid;
  835. init_attr.qpid = qhp->wq.qpid;
  836. init_attr.pdid = qhp->attr.pd;
  837. init_attr.scqid = qhp->attr.scq;
  838. init_attr.rcqid = qhp->attr.rcq;
  839. init_attr.rq_addr = qhp->wq.rq_addr;
  840. init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
  841. init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
  842. qhp->attr.mpa_attr.recv_marker_enabled |
  843. (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
  844. (qhp->attr.mpa_attr.crc_enabled << 2);
  845. init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
  846. uP_RI_QP_RDMA_WRITE_ENABLE |
  847. uP_RI_QP_BIND_ENABLE;
  848. if (!qhp->ibqp.uobject)
  849. init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
  850. uP_RI_QP_FAST_REGISTER_ENABLE;
  851. init_attr.tcp_emss = qhp->ep->emss;
  852. init_attr.ord = qhp->attr.max_ord;
  853. init_attr.ird = qhp->attr.max_ird;
  854. init_attr.qp_dma_addr = qhp->wq.dma_addr;
  855. init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
  856. init_attr.rqe_count = iwch_rqes_posted(qhp);
  857. init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
  858. init_attr.chan = qhp->ep->l2t->smt_idx;
  859. if (peer2peer) {
  860. init_attr.rtr_type = RTR_READ;
  861. if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
  862. init_attr.ord = 1;
  863. if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
  864. init_attr.ird = 1;
  865. } else
  866. init_attr.rtr_type = 0;
  867. init_attr.irs = qhp->ep->rcv_seq;
  868. PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
  869. "flags 0x%x qpcaps 0x%x\n", __func__,
  870. init_attr.rq_addr, init_attr.rq_size,
  871. init_attr.flags, init_attr.qpcaps);
  872. ret = cxio_rdma_init(&rhp->rdev, &init_attr);
  873. PDBG("%s ret %d\n", __func__, ret);
  874. return ret;
  875. }
  876. int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
  877. enum iwch_qp_attr_mask mask,
  878. struct iwch_qp_attributes *attrs,
  879. int internal)
  880. {
  881. int ret = 0;
  882. struct iwch_qp_attributes newattr = qhp->attr;
  883. unsigned long flag;
  884. int disconnect = 0;
  885. int terminate = 0;
  886. int abort = 0;
  887. int free = 0;
  888. struct iwch_ep *ep = NULL;
  889. PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
  890. qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
  891. (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
  892. spin_lock_irqsave(&qhp->lock, flag);
  893. /* Process attr changes if in IDLE */
  894. if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
  895. if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
  896. ret = -EIO;
  897. goto out;
  898. }
  899. if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
  900. newattr.enable_rdma_read = attrs->enable_rdma_read;
  901. if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
  902. newattr.enable_rdma_write = attrs->enable_rdma_write;
  903. if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
  904. newattr.enable_bind = attrs->enable_bind;
  905. if (mask & IWCH_QP_ATTR_MAX_ORD) {
  906. if (attrs->max_ord >
  907. rhp->attr.max_rdma_read_qp_depth) {
  908. ret = -EINVAL;
  909. goto out;
  910. }
  911. newattr.max_ord = attrs->max_ord;
  912. }
  913. if (mask & IWCH_QP_ATTR_MAX_IRD) {
  914. if (attrs->max_ird >
  915. rhp->attr.max_rdma_reads_per_qp) {
  916. ret = -EINVAL;
  917. goto out;
  918. }
  919. newattr.max_ird = attrs->max_ird;
  920. }
  921. qhp->attr = newattr;
  922. }
  923. if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
  924. goto out;
  925. if (qhp->attr.state == attrs->next_state)
  926. goto out;
  927. switch (qhp->attr.state) {
  928. case IWCH_QP_STATE_IDLE:
  929. switch (attrs->next_state) {
  930. case IWCH_QP_STATE_RTS:
  931. if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
  932. ret = -EINVAL;
  933. goto out;
  934. }
  935. if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
  936. ret = -EINVAL;
  937. goto out;
  938. }
  939. qhp->attr.mpa_attr = attrs->mpa_attr;
  940. qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
  941. qhp->ep = qhp->attr.llp_stream_handle;
  942. qhp->attr.state = IWCH_QP_STATE_RTS;
  943. /*
  944. * Ref the endpoint here and deref when we
  945. * disassociate the endpoint from the QP. This
  946. * happens in CLOSING->IDLE transition or *->ERROR
  947. * transition.
  948. */
  949. get_ep(&qhp->ep->com);
  950. spin_unlock_irqrestore(&qhp->lock, flag);
  951. ret = rdma_init(rhp, qhp, mask, attrs);
  952. spin_lock_irqsave(&qhp->lock, flag);
  953. if (ret)
  954. goto err;
  955. break;
  956. case IWCH_QP_STATE_ERROR:
  957. qhp->attr.state = IWCH_QP_STATE_ERROR;
  958. flush_qp(qhp);
  959. break;
  960. default:
  961. ret = -EINVAL;
  962. goto out;
  963. }
  964. break;
  965. case IWCH_QP_STATE_RTS:
  966. switch (attrs->next_state) {
  967. case IWCH_QP_STATE_CLOSING:
  968. BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
  969. qhp->attr.state = IWCH_QP_STATE_CLOSING;
  970. if (!internal) {
  971. abort=0;
  972. disconnect = 1;
  973. ep = qhp->ep;
  974. get_ep(&ep->com);
  975. }
  976. break;
  977. case IWCH_QP_STATE_TERMINATE:
  978. qhp->attr.state = IWCH_QP_STATE_TERMINATE;
  979. if (qhp->ibqp.uobject)
  980. cxio_set_wq_in_error(&qhp->wq);
  981. if (!internal)
  982. terminate = 1;
  983. break;
  984. case IWCH_QP_STATE_ERROR:
  985. qhp->attr.state = IWCH_QP_STATE_ERROR;
  986. if (!internal) {
  987. abort=1;
  988. disconnect = 1;
  989. ep = qhp->ep;
  990. get_ep(&ep->com);
  991. }
  992. goto err;
  993. break;
  994. default:
  995. ret = -EINVAL;
  996. goto out;
  997. }
  998. break;
  999. case IWCH_QP_STATE_CLOSING:
  1000. if (!internal) {
  1001. ret = -EINVAL;
  1002. goto out;
  1003. }
  1004. switch (attrs->next_state) {
  1005. case IWCH_QP_STATE_IDLE:
  1006. flush_qp(qhp);
  1007. qhp->attr.state = IWCH_QP_STATE_IDLE;
  1008. qhp->attr.llp_stream_handle = NULL;
  1009. put_ep(&qhp->ep->com);
  1010. qhp->ep = NULL;
  1011. wake_up(&qhp->wait);
  1012. break;
  1013. case IWCH_QP_STATE_ERROR:
  1014. goto err;
  1015. default:
  1016. ret = -EINVAL;
  1017. goto err;
  1018. }
  1019. break;
  1020. case IWCH_QP_STATE_ERROR:
  1021. if (attrs->next_state != IWCH_QP_STATE_IDLE) {
  1022. ret = -EINVAL;
  1023. goto out;
  1024. }
  1025. if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
  1026. !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
  1027. ret = -EINVAL;
  1028. goto out;
  1029. }
  1030. qhp->attr.state = IWCH_QP_STATE_IDLE;
  1031. break;
  1032. case IWCH_QP_STATE_TERMINATE:
  1033. if (!internal) {
  1034. ret = -EINVAL;
  1035. goto out;
  1036. }
  1037. goto err;
  1038. break;
  1039. default:
  1040. printk(KERN_ERR "%s in a bad state %d\n",
  1041. __func__, qhp->attr.state);
  1042. ret = -EINVAL;
  1043. goto err;
  1044. break;
  1045. }
  1046. goto out;
  1047. err:
  1048. PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
  1049. qhp->wq.qpid);
  1050. /* disassociate the LLP connection */
  1051. qhp->attr.llp_stream_handle = NULL;
  1052. ep = qhp->ep;
  1053. qhp->ep = NULL;
  1054. qhp->attr.state = IWCH_QP_STATE_ERROR;
  1055. free=1;
  1056. wake_up(&qhp->wait);
  1057. BUG_ON(!ep);
  1058. flush_qp(qhp);
  1059. out:
  1060. spin_unlock_irqrestore(&qhp->lock, flag);
  1061. if (terminate)
  1062. iwch_post_terminate(qhp, NULL);
  1063. /*
  1064. * If disconnect is 1, then we need to initiate a disconnect
  1065. * on the EP. This can be a normal close (RTS->CLOSING) or
  1066. * an abnormal close (RTS/CLOSING->ERROR).
  1067. */
  1068. if (disconnect) {
  1069. iwch_ep_disconnect(ep, abort, GFP_KERNEL);
  1070. put_ep(&ep->com);
  1071. }
  1072. /*
  1073. * If free is 1, then we've disassociated the EP from the QP
  1074. * and we need to dereference the EP.
  1075. */
  1076. if (free)
  1077. put_ep(&ep->com);
  1078. PDBG("%s exit state %d\n", __func__, qhp->attr.state);
  1079. return ret;
  1080. }