iwch_qp.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/sched.h>
  33. #include "iwch_provider.h"
  34. #include "iwch.h"
  35. #include "iwch_cm.h"
  36. #include "cxio_hal.h"
  37. #include "cxio_resource.h"
  38. #define NO_SUPPORT -1
  39. static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
  40. u8 * flit_cnt)
  41. {
  42. int i;
  43. u32 plen;
  44. switch (wr->opcode) {
  45. case IB_WR_SEND:
  46. if (wr->send_flags & IB_SEND_SOLICITED)
  47. wqe->send.rdmaop = T3_SEND_WITH_SE;
  48. else
  49. wqe->send.rdmaop = T3_SEND;
  50. wqe->send.rem_stag = 0;
  51. break;
  52. case IB_WR_SEND_WITH_INV:
  53. if (wr->send_flags & IB_SEND_SOLICITED)
  54. wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
  55. else
  56. wqe->send.rdmaop = T3_SEND_WITH_INV;
  57. wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
  58. break;
  59. default:
  60. return -EINVAL;
  61. }
  62. if (wr->num_sge > T3_MAX_SGE)
  63. return -EINVAL;
  64. wqe->send.reserved[0] = 0;
  65. wqe->send.reserved[1] = 0;
  66. wqe->send.reserved[2] = 0;
  67. plen = 0;
  68. for (i = 0; i < wr->num_sge; i++) {
  69. if ((plen + wr->sg_list[i].length) < plen)
  70. return -EMSGSIZE;
  71. plen += wr->sg_list[i].length;
  72. wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
  73. wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
  74. wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
  75. }
  76. wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
  77. *flit_cnt = 4 + ((wr->num_sge) << 1);
  78. wqe->send.plen = cpu_to_be32(plen);
  79. return 0;
  80. }
  81. static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
  82. u8 *flit_cnt)
  83. {
  84. int i;
  85. u32 plen;
  86. if (wr->num_sge > T3_MAX_SGE)
  87. return -EINVAL;
  88. wqe->write.rdmaop = T3_RDMA_WRITE;
  89. wqe->write.reserved[0] = 0;
  90. wqe->write.reserved[1] = 0;
  91. wqe->write.reserved[2] = 0;
  92. wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
  93. wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
  94. if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
  95. plen = 4;
  96. wqe->write.sgl[0].stag = wr->ex.imm_data;
  97. wqe->write.sgl[0].len = cpu_to_be32(0);
  98. wqe->write.num_sgle = cpu_to_be32(0);
  99. *flit_cnt = 6;
  100. } else {
  101. plen = 0;
  102. for (i = 0; i < wr->num_sge; i++) {
  103. if ((plen + wr->sg_list[i].length) < plen) {
  104. return -EMSGSIZE;
  105. }
  106. plen += wr->sg_list[i].length;
  107. wqe->write.sgl[i].stag =
  108. cpu_to_be32(wr->sg_list[i].lkey);
  109. wqe->write.sgl[i].len =
  110. cpu_to_be32(wr->sg_list[i].length);
  111. wqe->write.sgl[i].to =
  112. cpu_to_be64(wr->sg_list[i].addr);
  113. }
  114. wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
  115. *flit_cnt = 5 + ((wr->num_sge) << 1);
  116. }
  117. wqe->write.plen = cpu_to_be32(plen);
  118. return 0;
  119. }
  120. static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
  121. u8 *flit_cnt)
  122. {
  123. if (wr->num_sge > 1)
  124. return -EINVAL;
  125. wqe->read.rdmaop = T3_READ_REQ;
  126. if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
  127. wqe->read.local_inv = 1;
  128. else
  129. wqe->read.local_inv = 0;
  130. wqe->read.reserved[0] = 0;
  131. wqe->read.reserved[1] = 0;
  132. wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
  133. wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
  134. wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
  135. wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
  136. wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
  137. *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
  138. return 0;
  139. }
  140. static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
  141. u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
  142. {
  143. int i;
  144. __be64 *p;
  145. if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
  146. return -EINVAL;
  147. *wr_cnt = 1;
  148. wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
  149. wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
  150. wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
  151. wqe->fastreg.va_base_lo_fbo =
  152. cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
  153. wqe->fastreg.page_type_perms = cpu_to_be32(
  154. V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
  155. V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
  156. V_FR_TYPE(TPT_VATO) |
  157. V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
  158. p = &wqe->fastreg.pbl_addrs[0];
  159. for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
  160. /* If we need a 2nd WR, then set it up */
  161. if (i == T3_MAX_FASTREG_FRAG) {
  162. *wr_cnt = 2;
  163. wqe = (union t3_wr *)(wq->queue +
  164. Q_PTR2IDX((wq->wptr+1), wq->size_log2));
  165. build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
  166. Q_GENBIT(wq->wptr + 1, wq->size_log2),
  167. 0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
  168. T3_EOP);
  169. p = &wqe->pbl_frag.pbl_addrs[0];
  170. }
  171. *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
  172. }
  173. *flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
  174. if (*flit_cnt > 15)
  175. *flit_cnt = 15;
  176. return 0;
  177. }
  178. static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
  179. u8 *flit_cnt)
  180. {
  181. wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
  182. wqe->local_inv.reserved = 0;
  183. *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
  184. return 0;
  185. }
  186. static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
  187. u32 num_sgle, u32 * pbl_addr, u8 * page_size)
  188. {
  189. int i;
  190. struct iwch_mr *mhp;
  191. u64 offset;
  192. for (i = 0; i < num_sgle; i++) {
  193. mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
  194. if (!mhp) {
  195. PDBG("%s %d\n", __func__, __LINE__);
  196. return -EIO;
  197. }
  198. if (!mhp->attr.state) {
  199. PDBG("%s %d\n", __func__, __LINE__);
  200. return -EIO;
  201. }
  202. if (mhp->attr.zbva) {
  203. PDBG("%s %d\n", __func__, __LINE__);
  204. return -EIO;
  205. }
  206. if (sg_list[i].addr < mhp->attr.va_fbo) {
  207. PDBG("%s %d\n", __func__, __LINE__);
  208. return -EINVAL;
  209. }
  210. if (sg_list[i].addr + ((u64) sg_list[i].length) <
  211. sg_list[i].addr) {
  212. PDBG("%s %d\n", __func__, __LINE__);
  213. return -EINVAL;
  214. }
  215. if (sg_list[i].addr + ((u64) sg_list[i].length) >
  216. mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
  217. PDBG("%s %d\n", __func__, __LINE__);
  218. return -EINVAL;
  219. }
  220. offset = sg_list[i].addr - mhp->attr.va_fbo;
  221. offset += mhp->attr.va_fbo &
  222. ((1UL << (12 + mhp->attr.page_size)) - 1);
  223. pbl_addr[i] = ((mhp->attr.pbl_addr -
  224. rhp->rdev.rnic_info.pbl_base) >> 3) +
  225. (offset >> (12 + mhp->attr.page_size));
  226. page_size[i] = mhp->attr.page_size;
  227. }
  228. return 0;
  229. }
  230. static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
  231. struct ib_recv_wr *wr)
  232. {
  233. int i, err = 0;
  234. u32 pbl_addr[T3_MAX_SGE];
  235. u8 page_size[T3_MAX_SGE];
  236. err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
  237. page_size);
  238. if (err)
  239. return err;
  240. wqe->recv.pagesz[0] = page_size[0];
  241. wqe->recv.pagesz[1] = page_size[1];
  242. wqe->recv.pagesz[2] = page_size[2];
  243. wqe->recv.pagesz[3] = page_size[3];
  244. wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
  245. for (i = 0; i < wr->num_sge; i++) {
  246. wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
  247. wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
  248. /* to in the WQE == the offset into the page */
  249. wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
  250. ((1UL << (12 + page_size[i])) - 1));
  251. /* pbl_addr is the adapters address in the PBL */
  252. wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
  253. }
  254. for (; i < T3_MAX_SGE; i++) {
  255. wqe->recv.sgl[i].stag = 0;
  256. wqe->recv.sgl[i].len = 0;
  257. wqe->recv.sgl[i].to = 0;
  258. wqe->recv.pbl_addr[i] = 0;
  259. }
  260. qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
  261. qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
  262. qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
  263. qhp->wq.rq_size_log2)].pbl_addr = 0;
  264. return 0;
  265. }
  266. static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
  267. struct ib_recv_wr *wr)
  268. {
  269. int i;
  270. u32 pbl_addr;
  271. u32 pbl_offset;
  272. /*
  273. * The T3 HW requires the PBL in the HW recv descriptor to reference
  274. * a PBL entry. So we allocate the max needed PBL memory here and pass
  275. * it to the uP in the recv WR. The uP will build the PBL and setup
  276. * the HW recv descriptor.
  277. */
  278. pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
  279. if (!pbl_addr)
  280. return -ENOMEM;
  281. /*
  282. * Compute the 8B aligned offset.
  283. */
  284. pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
  285. wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
  286. for (i = 0; i < wr->num_sge; i++) {
  287. /*
  288. * Use a 128MB page size. This and an imposed 128MB
  289. * sge length limit allows us to require only a 2-entry HW
  290. * PBL for each SGE. This restriction is acceptable since
  291. * since it is not possible to allocate 128MB of contiguous
  292. * DMA coherent memory!
  293. */
  294. if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
  295. return -EINVAL;
  296. wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
  297. /*
  298. * T3 restricts a recv to all zero-stag or all non-zero-stag.
  299. */
  300. if (wr->sg_list[i].lkey != 0)
  301. return -EINVAL;
  302. wqe->recv.sgl[i].stag = 0;
  303. wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
  304. wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
  305. wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
  306. pbl_offset += 2;
  307. }
  308. for (; i < T3_MAX_SGE; i++) {
  309. wqe->recv.pagesz[i] = 0;
  310. wqe->recv.sgl[i].stag = 0;
  311. wqe->recv.sgl[i].len = 0;
  312. wqe->recv.sgl[i].to = 0;
  313. wqe->recv.pbl_addr[i] = 0;
  314. }
  315. qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
  316. qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
  317. qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
  318. qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
  319. return 0;
  320. }
  321. int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  322. struct ib_send_wr **bad_wr)
  323. {
  324. int err = 0;
  325. u8 uninitialized_var(t3_wr_flit_cnt);
  326. enum t3_wr_opcode t3_wr_opcode = 0;
  327. enum t3_wr_flags t3_wr_flags;
  328. struct iwch_qp *qhp;
  329. u32 idx;
  330. union t3_wr *wqe;
  331. u32 num_wrs;
  332. unsigned long flag;
  333. struct t3_swsq *sqp;
  334. int wr_cnt = 1;
  335. qhp = to_iwch_qp(ibqp);
  336. spin_lock_irqsave(&qhp->lock, flag);
  337. if (qhp->attr.state > IWCH_QP_STATE_RTS) {
  338. spin_unlock_irqrestore(&qhp->lock, flag);
  339. err = -EINVAL;
  340. goto out;
  341. }
  342. num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
  343. qhp->wq.sq_size_log2);
  344. if (num_wrs <= 0) {
  345. spin_unlock_irqrestore(&qhp->lock, flag);
  346. err = -ENOMEM;
  347. goto out;
  348. }
  349. while (wr) {
  350. if (num_wrs == 0) {
  351. err = -ENOMEM;
  352. break;
  353. }
  354. idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
  355. wqe = (union t3_wr *) (qhp->wq.queue + idx);
  356. t3_wr_flags = 0;
  357. if (wr->send_flags & IB_SEND_SOLICITED)
  358. t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
  359. if (wr->send_flags & IB_SEND_SIGNALED)
  360. t3_wr_flags |= T3_COMPLETION_FLAG;
  361. sqp = qhp->wq.sq +
  362. Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
  363. switch (wr->opcode) {
  364. case IB_WR_SEND:
  365. case IB_WR_SEND_WITH_INV:
  366. if (wr->send_flags & IB_SEND_FENCE)
  367. t3_wr_flags |= T3_READ_FENCE_FLAG;
  368. t3_wr_opcode = T3_WR_SEND;
  369. err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
  370. break;
  371. case IB_WR_RDMA_WRITE:
  372. case IB_WR_RDMA_WRITE_WITH_IMM:
  373. t3_wr_opcode = T3_WR_WRITE;
  374. err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
  375. break;
  376. case IB_WR_RDMA_READ:
  377. case IB_WR_RDMA_READ_WITH_INV:
  378. t3_wr_opcode = T3_WR_READ;
  379. t3_wr_flags = 0; /* T3 reads are always signaled */
  380. err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
  381. if (err)
  382. break;
  383. sqp->read_len = wqe->read.local_len;
  384. if (!qhp->wq.oldest_read)
  385. qhp->wq.oldest_read = sqp;
  386. break;
  387. case IB_WR_FAST_REG_MR:
  388. t3_wr_opcode = T3_WR_FASTREG;
  389. err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
  390. &wr_cnt, &qhp->wq);
  391. break;
  392. case IB_WR_LOCAL_INV:
  393. if (wr->send_flags & IB_SEND_FENCE)
  394. t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
  395. t3_wr_opcode = T3_WR_INV_STAG;
  396. err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
  397. break;
  398. default:
  399. PDBG("%s post of type=%d TBD!\n", __func__,
  400. wr->opcode);
  401. err = -EINVAL;
  402. }
  403. if (err)
  404. break;
  405. wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
  406. sqp->wr_id = wr->wr_id;
  407. sqp->opcode = wr2opcode(t3_wr_opcode);
  408. sqp->sq_wptr = qhp->wq.sq_wptr;
  409. sqp->complete = 0;
  410. sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
  411. build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
  412. Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
  413. 0, t3_wr_flit_cnt,
  414. (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
  415. PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
  416. __func__, (unsigned long long) wr->wr_id, idx,
  417. Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
  418. sqp->opcode);
  419. wr = wr->next;
  420. num_wrs--;
  421. qhp->wq.wptr += wr_cnt;
  422. ++(qhp->wq.sq_wptr);
  423. }
  424. spin_unlock_irqrestore(&qhp->lock, flag);
  425. ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
  426. out:
  427. if (err)
  428. *bad_wr = wr;
  429. return err;
  430. }
  431. int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
  432. struct ib_recv_wr **bad_wr)
  433. {
  434. int err = 0;
  435. struct iwch_qp *qhp;
  436. u32 idx;
  437. union t3_wr *wqe;
  438. u32 num_wrs;
  439. unsigned long flag;
  440. qhp = to_iwch_qp(ibqp);
  441. spin_lock_irqsave(&qhp->lock, flag);
  442. if (qhp->attr.state > IWCH_QP_STATE_RTS) {
  443. spin_unlock_irqrestore(&qhp->lock, flag);
  444. err = -EINVAL;
  445. goto out;
  446. }
  447. num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
  448. qhp->wq.rq_size_log2) - 1;
  449. if (!wr) {
  450. spin_unlock_irqrestore(&qhp->lock, flag);
  451. err = -ENOMEM;
  452. goto out;
  453. }
  454. while (wr) {
  455. if (wr->num_sge > T3_MAX_SGE) {
  456. err = -EINVAL;
  457. break;
  458. }
  459. idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
  460. wqe = (union t3_wr *) (qhp->wq.queue + idx);
  461. if (num_wrs)
  462. if (wr->sg_list[0].lkey)
  463. err = build_rdma_recv(qhp, wqe, wr);
  464. else
  465. err = build_zero_stag_recv(qhp, wqe, wr);
  466. else
  467. err = -ENOMEM;
  468. if (err)
  469. break;
  470. build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
  471. Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
  472. 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
  473. PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
  474. "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
  475. idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
  476. ++(qhp->wq.rq_wptr);
  477. ++(qhp->wq.wptr);
  478. wr = wr->next;
  479. num_wrs--;
  480. }
  481. spin_unlock_irqrestore(&qhp->lock, flag);
  482. ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
  483. out:
  484. if (err)
  485. *bad_wr = wr;
  486. return err;
  487. }
  488. int iwch_bind_mw(struct ib_qp *qp,
  489. struct ib_mw *mw,
  490. struct ib_mw_bind *mw_bind)
  491. {
  492. struct iwch_dev *rhp;
  493. struct iwch_mw *mhp;
  494. struct iwch_qp *qhp;
  495. union t3_wr *wqe;
  496. u32 pbl_addr;
  497. u8 page_size;
  498. u32 num_wrs;
  499. unsigned long flag;
  500. struct ib_sge sgl;
  501. int err=0;
  502. enum t3_wr_flags t3_wr_flags;
  503. u32 idx;
  504. struct t3_swsq *sqp;
  505. qhp = to_iwch_qp(qp);
  506. mhp = to_iwch_mw(mw);
  507. rhp = qhp->rhp;
  508. spin_lock_irqsave(&qhp->lock, flag);
  509. if (qhp->attr.state > IWCH_QP_STATE_RTS) {
  510. spin_unlock_irqrestore(&qhp->lock, flag);
  511. return -EINVAL;
  512. }
  513. num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
  514. qhp->wq.sq_size_log2);
  515. if ((num_wrs) <= 0) {
  516. spin_unlock_irqrestore(&qhp->lock, flag);
  517. return -ENOMEM;
  518. }
  519. idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
  520. PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
  521. mw, mw_bind);
  522. wqe = (union t3_wr *) (qhp->wq.queue + idx);
  523. t3_wr_flags = 0;
  524. if (mw_bind->send_flags & IB_SEND_SIGNALED)
  525. t3_wr_flags = T3_COMPLETION_FLAG;
  526. sgl.addr = mw_bind->addr;
  527. sgl.lkey = mw_bind->mr->lkey;
  528. sgl.length = mw_bind->length;
  529. wqe->bind.reserved = 0;
  530. wqe->bind.type = TPT_VATO;
  531. /* TBD: check perms */
  532. wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags);
  533. wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
  534. wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
  535. wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
  536. wqe->bind.mw_va = cpu_to_be64(mw_bind->addr);
  537. err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
  538. if (err) {
  539. spin_unlock_irqrestore(&qhp->lock, flag);
  540. return err;
  541. }
  542. wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
  543. sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
  544. sqp->wr_id = mw_bind->wr_id;
  545. sqp->opcode = T3_BIND_MW;
  546. sqp->sq_wptr = qhp->wq.sq_wptr;
  547. sqp->complete = 0;
  548. sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
  549. wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
  550. wqe->bind.mr_pagesz = page_size;
  551. build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
  552. Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
  553. sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
  554. ++(qhp->wq.wptr);
  555. ++(qhp->wq.sq_wptr);
  556. spin_unlock_irqrestore(&qhp->lock, flag);
  557. ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
  558. return err;
  559. }
  560. static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
  561. u8 *layer_type, u8 *ecode)
  562. {
  563. int status = TPT_ERR_INTERNAL_ERR;
  564. int tagged = 0;
  565. int opcode = -1;
  566. int rqtype = 0;
  567. int send_inv = 0;
  568. if (rsp_msg) {
  569. status = CQE_STATUS(rsp_msg->cqe);
  570. opcode = CQE_OPCODE(rsp_msg->cqe);
  571. rqtype = RQ_TYPE(rsp_msg->cqe);
  572. send_inv = (opcode == T3_SEND_WITH_INV) ||
  573. (opcode == T3_SEND_WITH_SE_INV);
  574. tagged = (opcode == T3_RDMA_WRITE) ||
  575. (rqtype && (opcode == T3_READ_RESP));
  576. }
  577. switch (status) {
  578. case TPT_ERR_STAG:
  579. if (send_inv) {
  580. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  581. *ecode = RDMAP_CANT_INV_STAG;
  582. } else {
  583. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  584. *ecode = RDMAP_INV_STAG;
  585. }
  586. break;
  587. case TPT_ERR_PDID:
  588. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  589. if ((opcode == T3_SEND_WITH_INV) ||
  590. (opcode == T3_SEND_WITH_SE_INV))
  591. *ecode = RDMAP_CANT_INV_STAG;
  592. else
  593. *ecode = RDMAP_STAG_NOT_ASSOC;
  594. break;
  595. case TPT_ERR_QPID:
  596. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  597. *ecode = RDMAP_STAG_NOT_ASSOC;
  598. break;
  599. case TPT_ERR_ACCESS:
  600. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  601. *ecode = RDMAP_ACC_VIOL;
  602. break;
  603. case TPT_ERR_WRAP:
  604. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  605. *ecode = RDMAP_TO_WRAP;
  606. break;
  607. case TPT_ERR_BOUND:
  608. if (tagged) {
  609. *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
  610. *ecode = DDPT_BASE_BOUNDS;
  611. } else {
  612. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
  613. *ecode = RDMAP_BASE_BOUNDS;
  614. }
  615. break;
  616. case TPT_ERR_INVALIDATE_SHARED_MR:
  617. case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
  618. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  619. *ecode = RDMAP_CANT_INV_STAG;
  620. break;
  621. case TPT_ERR_ECC:
  622. case TPT_ERR_ECC_PSTAG:
  623. case TPT_ERR_INTERNAL_ERR:
  624. *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
  625. *ecode = 0;
  626. break;
  627. case TPT_ERR_OUT_OF_RQE:
  628. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  629. *ecode = DDPU_INV_MSN_NOBUF;
  630. break;
  631. case TPT_ERR_PBL_ADDR_BOUND:
  632. *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
  633. *ecode = DDPT_BASE_BOUNDS;
  634. break;
  635. case TPT_ERR_CRC:
  636. *layer_type = LAYER_MPA|DDP_LLP;
  637. *ecode = MPA_CRC_ERR;
  638. break;
  639. case TPT_ERR_MARKER:
  640. *layer_type = LAYER_MPA|DDP_LLP;
  641. *ecode = MPA_MARKER_ERR;
  642. break;
  643. case TPT_ERR_PDU_LEN_ERR:
  644. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  645. *ecode = DDPU_MSG_TOOBIG;
  646. break;
  647. case TPT_ERR_DDP_VERSION:
  648. if (tagged) {
  649. *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
  650. *ecode = DDPT_INV_VERS;
  651. } else {
  652. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  653. *ecode = DDPU_INV_VERS;
  654. }
  655. break;
  656. case TPT_ERR_RDMA_VERSION:
  657. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  658. *ecode = RDMAP_INV_VERS;
  659. break;
  660. case TPT_ERR_OPCODE:
  661. *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
  662. *ecode = RDMAP_INV_OPCODE;
  663. break;
  664. case TPT_ERR_DDP_QUEUE_NUM:
  665. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  666. *ecode = DDPU_INV_QN;
  667. break;
  668. case TPT_ERR_MSN:
  669. case TPT_ERR_MSN_GAP:
  670. case TPT_ERR_MSN_RANGE:
  671. case TPT_ERR_IRD_OVERFLOW:
  672. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  673. *ecode = DDPU_INV_MSN_RANGE;
  674. break;
  675. case TPT_ERR_TBIT:
  676. *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
  677. *ecode = 0;
  678. break;
  679. case TPT_ERR_MO:
  680. *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
  681. *ecode = DDPU_INV_MO;
  682. break;
  683. default:
  684. *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
  685. *ecode = 0;
  686. break;
  687. }
  688. }
  689. int iwch_post_zb_read(struct iwch_qp *qhp)
  690. {
  691. union t3_wr *wqe;
  692. struct sk_buff *skb;
  693. u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
  694. PDBG("%s enter\n", __func__);
  695. skb = alloc_skb(40, GFP_KERNEL);
  696. if (!skb) {
  697. printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
  698. return -ENOMEM;
  699. }
  700. wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
  701. memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
  702. wqe->read.rdmaop = T3_READ_REQ;
  703. wqe->read.reserved[0] = 0;
  704. wqe->read.reserved[1] = 0;
  705. wqe->read.rem_stag = cpu_to_be32(1);
  706. wqe->read.rem_to = cpu_to_be64(1);
  707. wqe->read.local_stag = cpu_to_be32(1);
  708. wqe->read.local_len = cpu_to_be32(0);
  709. wqe->read.local_to = cpu_to_be64(1);
  710. wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
  711. wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)|
  712. V_FW_RIWR_LEN(flit_cnt));
  713. skb->priority = CPL_PRIORITY_DATA;
  714. return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
  715. }
  716. /*
  717. * This posts a TERMINATE with layer=RDMA, type=catastrophic.
  718. */
  719. int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
  720. {
  721. union t3_wr *wqe;
  722. struct terminate_message *term;
  723. struct sk_buff *skb;
  724. PDBG("%s %d\n", __func__, __LINE__);
  725. skb = alloc_skb(40, GFP_ATOMIC);
  726. if (!skb) {
  727. printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
  728. return -ENOMEM;
  729. }
  730. wqe = (union t3_wr *)skb_put(skb, 40);
  731. memset(wqe, 0, 40);
  732. wqe->send.rdmaop = T3_TERMINATE;
  733. /* immediate data length */
  734. wqe->send.plen = htonl(4);
  735. /* immediate data starts here. */
  736. term = (struct terminate_message *)wqe->send.sgl;
  737. build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
  738. wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
  739. V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
  740. wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
  741. skb->priority = CPL_PRIORITY_DATA;
  742. return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
  743. }
  744. /*
  745. * Assumes qhp lock is held.
  746. */
  747. static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
  748. {
  749. struct iwch_cq *rchp, *schp;
  750. int count;
  751. int flushed;
  752. rchp = get_chp(qhp->rhp, qhp->attr.rcq);
  753. schp = get_chp(qhp->rhp, qhp->attr.scq);
  754. PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
  755. /* take a ref on the qhp since we must release the lock */
  756. atomic_inc(&qhp->refcnt);
  757. spin_unlock_irqrestore(&qhp->lock, *flag);
  758. /* locking heirarchy: cq lock first, then qp lock. */
  759. spin_lock_irqsave(&rchp->lock, *flag);
  760. spin_lock(&qhp->lock);
  761. cxio_flush_hw_cq(&rchp->cq);
  762. cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
  763. flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
  764. spin_unlock(&qhp->lock);
  765. spin_unlock_irqrestore(&rchp->lock, *flag);
  766. if (flushed)
  767. (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
  768. /* locking heirarchy: cq lock first, then qp lock. */
  769. spin_lock_irqsave(&schp->lock, *flag);
  770. spin_lock(&qhp->lock);
  771. cxio_flush_hw_cq(&schp->cq);
  772. cxio_count_scqes(&schp->cq, &qhp->wq, &count);
  773. flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
  774. spin_unlock(&qhp->lock);
  775. spin_unlock_irqrestore(&schp->lock, *flag);
  776. if (flushed)
  777. (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
  778. /* deref */
  779. if (atomic_dec_and_test(&qhp->refcnt))
  780. wake_up(&qhp->wait);
  781. spin_lock_irqsave(&qhp->lock, *flag);
  782. }
  783. static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
  784. {
  785. if (qhp->ibqp.uobject)
  786. cxio_set_wq_in_error(&qhp->wq);
  787. else
  788. __flush_qp(qhp, flag);
  789. }
  790. /*
  791. * Return count of RECV WRs posted
  792. */
  793. u16 iwch_rqes_posted(struct iwch_qp *qhp)
  794. {
  795. union t3_wr *wqe = qhp->wq.queue;
  796. u16 count = 0;
  797. while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
  798. count++;
  799. wqe++;
  800. }
  801. PDBG("%s qhp %p count %u\n", __func__, qhp, count);
  802. return count;
  803. }
  804. static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
  805. enum iwch_qp_attr_mask mask,
  806. struct iwch_qp_attributes *attrs)
  807. {
  808. struct t3_rdma_init_attr init_attr;
  809. int ret;
  810. init_attr.tid = qhp->ep->hwtid;
  811. init_attr.qpid = qhp->wq.qpid;
  812. init_attr.pdid = qhp->attr.pd;
  813. init_attr.scqid = qhp->attr.scq;
  814. init_attr.rcqid = qhp->attr.rcq;
  815. init_attr.rq_addr = qhp->wq.rq_addr;
  816. init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
  817. init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
  818. qhp->attr.mpa_attr.recv_marker_enabled |
  819. (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
  820. (qhp->attr.mpa_attr.crc_enabled << 2);
  821. init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
  822. uP_RI_QP_RDMA_WRITE_ENABLE |
  823. uP_RI_QP_BIND_ENABLE;
  824. if (!qhp->ibqp.uobject)
  825. init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
  826. uP_RI_QP_FAST_REGISTER_ENABLE;
  827. init_attr.tcp_emss = qhp->ep->emss;
  828. init_attr.ord = qhp->attr.max_ord;
  829. init_attr.ird = qhp->attr.max_ird;
  830. init_attr.qp_dma_addr = qhp->wq.dma_addr;
  831. init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
  832. init_attr.rqe_count = iwch_rqes_posted(qhp);
  833. init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
  834. init_attr.chan = qhp->ep->l2t->smt_idx;
  835. if (peer2peer) {
  836. init_attr.rtr_type = RTR_READ;
  837. if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
  838. init_attr.ord = 1;
  839. if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
  840. init_attr.ird = 1;
  841. } else
  842. init_attr.rtr_type = 0;
  843. init_attr.irs = qhp->ep->rcv_seq;
  844. PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
  845. "flags 0x%x qpcaps 0x%x\n", __func__,
  846. init_attr.rq_addr, init_attr.rq_size,
  847. init_attr.flags, init_attr.qpcaps);
  848. ret = cxio_rdma_init(&rhp->rdev, &init_attr);
  849. PDBG("%s ret %d\n", __func__, ret);
  850. return ret;
  851. }
  852. int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
  853. enum iwch_qp_attr_mask mask,
  854. struct iwch_qp_attributes *attrs,
  855. int internal)
  856. {
  857. int ret = 0;
  858. struct iwch_qp_attributes newattr = qhp->attr;
  859. unsigned long flag;
  860. int disconnect = 0;
  861. int terminate = 0;
  862. int abort = 0;
  863. int free = 0;
  864. struct iwch_ep *ep = NULL;
  865. PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
  866. qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
  867. (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
  868. spin_lock_irqsave(&qhp->lock, flag);
  869. /* Process attr changes if in IDLE */
  870. if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
  871. if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
  872. ret = -EIO;
  873. goto out;
  874. }
  875. if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
  876. newattr.enable_rdma_read = attrs->enable_rdma_read;
  877. if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
  878. newattr.enable_rdma_write = attrs->enable_rdma_write;
  879. if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
  880. newattr.enable_bind = attrs->enable_bind;
  881. if (mask & IWCH_QP_ATTR_MAX_ORD) {
  882. if (attrs->max_ord >
  883. rhp->attr.max_rdma_read_qp_depth) {
  884. ret = -EINVAL;
  885. goto out;
  886. }
  887. newattr.max_ord = attrs->max_ord;
  888. }
  889. if (mask & IWCH_QP_ATTR_MAX_IRD) {
  890. if (attrs->max_ird >
  891. rhp->attr.max_rdma_reads_per_qp) {
  892. ret = -EINVAL;
  893. goto out;
  894. }
  895. newattr.max_ird = attrs->max_ird;
  896. }
  897. qhp->attr = newattr;
  898. }
  899. if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
  900. goto out;
  901. if (qhp->attr.state == attrs->next_state)
  902. goto out;
  903. switch (qhp->attr.state) {
  904. case IWCH_QP_STATE_IDLE:
  905. switch (attrs->next_state) {
  906. case IWCH_QP_STATE_RTS:
  907. if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
  908. ret = -EINVAL;
  909. goto out;
  910. }
  911. if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
  912. ret = -EINVAL;
  913. goto out;
  914. }
  915. qhp->attr.mpa_attr = attrs->mpa_attr;
  916. qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
  917. qhp->ep = qhp->attr.llp_stream_handle;
  918. qhp->attr.state = IWCH_QP_STATE_RTS;
  919. /*
  920. * Ref the endpoint here and deref when we
  921. * disassociate the endpoint from the QP. This
  922. * happens in CLOSING->IDLE transition or *->ERROR
  923. * transition.
  924. */
  925. get_ep(&qhp->ep->com);
  926. spin_unlock_irqrestore(&qhp->lock, flag);
  927. ret = rdma_init(rhp, qhp, mask, attrs);
  928. spin_lock_irqsave(&qhp->lock, flag);
  929. if (ret)
  930. goto err;
  931. break;
  932. case IWCH_QP_STATE_ERROR:
  933. qhp->attr.state = IWCH_QP_STATE_ERROR;
  934. flush_qp(qhp, &flag);
  935. break;
  936. default:
  937. ret = -EINVAL;
  938. goto out;
  939. }
  940. break;
  941. case IWCH_QP_STATE_RTS:
  942. switch (attrs->next_state) {
  943. case IWCH_QP_STATE_CLOSING:
  944. BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
  945. qhp->attr.state = IWCH_QP_STATE_CLOSING;
  946. if (!internal) {
  947. abort=0;
  948. disconnect = 1;
  949. ep = qhp->ep;
  950. get_ep(&ep->com);
  951. }
  952. break;
  953. case IWCH_QP_STATE_TERMINATE:
  954. qhp->attr.state = IWCH_QP_STATE_TERMINATE;
  955. if (qhp->ibqp.uobject)
  956. cxio_set_wq_in_error(&qhp->wq);
  957. if (!internal)
  958. terminate = 1;
  959. break;
  960. case IWCH_QP_STATE_ERROR:
  961. qhp->attr.state = IWCH_QP_STATE_ERROR;
  962. if (!internal) {
  963. abort=1;
  964. disconnect = 1;
  965. ep = qhp->ep;
  966. get_ep(&ep->com);
  967. }
  968. goto err;
  969. break;
  970. default:
  971. ret = -EINVAL;
  972. goto out;
  973. }
  974. break;
  975. case IWCH_QP_STATE_CLOSING:
  976. if (!internal) {
  977. ret = -EINVAL;
  978. goto out;
  979. }
  980. switch (attrs->next_state) {
  981. case IWCH_QP_STATE_IDLE:
  982. flush_qp(qhp, &flag);
  983. qhp->attr.state = IWCH_QP_STATE_IDLE;
  984. qhp->attr.llp_stream_handle = NULL;
  985. put_ep(&qhp->ep->com);
  986. qhp->ep = NULL;
  987. wake_up(&qhp->wait);
  988. break;
  989. case IWCH_QP_STATE_ERROR:
  990. goto err;
  991. default:
  992. ret = -EINVAL;
  993. goto err;
  994. }
  995. break;
  996. case IWCH_QP_STATE_ERROR:
  997. if (attrs->next_state != IWCH_QP_STATE_IDLE) {
  998. ret = -EINVAL;
  999. goto out;
  1000. }
  1001. if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
  1002. !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
  1003. ret = -EINVAL;
  1004. goto out;
  1005. }
  1006. qhp->attr.state = IWCH_QP_STATE_IDLE;
  1007. break;
  1008. case IWCH_QP_STATE_TERMINATE:
  1009. if (!internal) {
  1010. ret = -EINVAL;
  1011. goto out;
  1012. }
  1013. goto err;
  1014. break;
  1015. default:
  1016. printk(KERN_ERR "%s in a bad state %d\n",
  1017. __func__, qhp->attr.state);
  1018. ret = -EINVAL;
  1019. goto err;
  1020. break;
  1021. }
  1022. goto out;
  1023. err:
  1024. PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
  1025. qhp->wq.qpid);
  1026. /* disassociate the LLP connection */
  1027. qhp->attr.llp_stream_handle = NULL;
  1028. ep = qhp->ep;
  1029. qhp->ep = NULL;
  1030. qhp->attr.state = IWCH_QP_STATE_ERROR;
  1031. free=1;
  1032. wake_up(&qhp->wait);
  1033. BUG_ON(!ep);
  1034. flush_qp(qhp, &flag);
  1035. out:
  1036. spin_unlock_irqrestore(&qhp->lock, flag);
  1037. if (terminate)
  1038. iwch_post_terminate(qhp, NULL);
  1039. /*
  1040. * If disconnect is 1, then we need to initiate a disconnect
  1041. * on the EP. This can be a normal close (RTS->CLOSING) or
  1042. * an abnormal close (RTS/CLOSING->ERROR).
  1043. */
  1044. if (disconnect) {
  1045. iwch_ep_disconnect(ep, abort, GFP_KERNEL);
  1046. put_ep(&ep->com);
  1047. }
  1048. /*
  1049. * If free is 1, then we've disassociated the EP from the QP
  1050. * and we need to dereference the EP.
  1051. */
  1052. if (free)
  1053. put_ep(&ep->com);
  1054. PDBG("%s exit state %d\n", __func__, qhp->attr.state);
  1055. return ret;
  1056. }
  1057. static int quiesce_qp(struct iwch_qp *qhp)
  1058. {
  1059. spin_lock_irq(&qhp->lock);
  1060. iwch_quiesce_tid(qhp->ep);
  1061. qhp->flags |= QP_QUIESCED;
  1062. spin_unlock_irq(&qhp->lock);
  1063. return 0;
  1064. }
  1065. static int resume_qp(struct iwch_qp *qhp)
  1066. {
  1067. spin_lock_irq(&qhp->lock);
  1068. iwch_resume_tid(qhp->ep);
  1069. qhp->flags &= ~QP_QUIESCED;
  1070. spin_unlock_irq(&qhp->lock);
  1071. return 0;
  1072. }
  1073. int iwch_quiesce_qps(struct iwch_cq *chp)
  1074. {
  1075. int i;
  1076. struct iwch_qp *qhp;
  1077. for (i=0; i < T3_MAX_NUM_QP; i++) {
  1078. qhp = get_qhp(chp->rhp, i);
  1079. if (!qhp)
  1080. continue;
  1081. if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
  1082. quiesce_qp(qhp);
  1083. continue;
  1084. }
  1085. if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
  1086. quiesce_qp(qhp);
  1087. }
  1088. return 0;
  1089. }
  1090. int iwch_resume_qps(struct iwch_cq *chp)
  1091. {
  1092. int i;
  1093. struct iwch_qp *qhp;
  1094. for (i=0; i < T3_MAX_NUM_QP; i++) {
  1095. qhp = get_qhp(chp->rhp, i);
  1096. if (!qhp)
  1097. continue;
  1098. if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
  1099. resume_qp(qhp);
  1100. continue;
  1101. }
  1102. if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
  1103. resume_qp(qhp);
  1104. }
  1105. return 0;
  1106. }