cq.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "iw_cxgb4.h"
  33. static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  34. struct c4iw_dev_ucontext *uctx)
  35. {
  36. struct fw_ri_res_wr *res_wr;
  37. struct fw_ri_res *res;
  38. int wr_len;
  39. struct c4iw_wr_wait wr_wait;
  40. struct sk_buff *skb;
  41. int ret;
  42. wr_len = sizeof *res_wr + sizeof *res;
  43. skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
  44. if (!skb)
  45. return -ENOMEM;
  46. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  47. res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  48. memset(res_wr, 0, wr_len);
  49. res_wr->op_nres = cpu_to_be32(
  50. FW_WR_OP(FW_RI_RES_WR) |
  51. V_FW_RI_RES_WR_NRES(1) |
  52. FW_WR_COMPL(1));
  53. res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  54. res_wr->cookie = (u64)&wr_wait;
  55. res = res_wr->res;
  56. res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  57. res->u.cq.op = FW_RI_RES_OP_RESET;
  58. res->u.cq.iqid = cpu_to_be32(cq->cqid);
  59. c4iw_init_wr_wait(&wr_wait);
  60. ret = c4iw_ofld_send(rdev, skb);
  61. if (!ret) {
  62. wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
  63. if (!wr_wait.done) {
  64. printk(KERN_ERR MOD "Device %s not responding!\n",
  65. pci_name(rdev->lldi.pdev));
  66. rdev->flags = T4_FATAL_ERROR;
  67. ret = -EIO;
  68. } else
  69. ret = wr_wait.ret;
  70. }
  71. kfree(cq->sw_queue);
  72. dma_free_coherent(&(rdev->lldi.pdev->dev),
  73. cq->memsize, cq->queue,
  74. dma_unmap_addr(cq, mapping));
  75. c4iw_put_cqid(rdev, cq->cqid, uctx);
  76. return ret;
  77. }
  78. static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
  79. struct c4iw_dev_ucontext *uctx)
  80. {
  81. struct fw_ri_res_wr *res_wr;
  82. struct fw_ri_res *res;
  83. int wr_len;
  84. int user = (uctx != &rdev->uctx);
  85. struct c4iw_wr_wait wr_wait;
  86. int ret;
  87. struct sk_buff *skb;
  88. cq->cqid = c4iw_get_cqid(rdev, uctx);
  89. if (!cq->cqid) {
  90. ret = -ENOMEM;
  91. goto err1;
  92. }
  93. if (!user) {
  94. cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
  95. if (!cq->sw_queue) {
  96. ret = -ENOMEM;
  97. goto err2;
  98. }
  99. }
  100. cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
  101. &cq->dma_addr, GFP_KERNEL);
  102. if (!cq->queue) {
  103. ret = -ENOMEM;
  104. goto err3;
  105. }
  106. dma_unmap_addr_set(cq, mapping, cq->dma_addr);
  107. memset(cq->queue, 0, cq->memsize);
  108. /* build fw_ri_res_wr */
  109. wr_len = sizeof *res_wr + sizeof *res;
  110. skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
  111. if (!skb) {
  112. ret = -ENOMEM;
  113. goto err4;
  114. }
  115. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  116. res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
  117. memset(res_wr, 0, wr_len);
  118. res_wr->op_nres = cpu_to_be32(
  119. FW_WR_OP(FW_RI_RES_WR) |
  120. V_FW_RI_RES_WR_NRES(1) |
  121. FW_WR_COMPL(1));
  122. res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
  123. res_wr->cookie = (u64)&wr_wait;
  124. res = res_wr->res;
  125. res->u.cq.restype = FW_RI_RES_TYPE_CQ;
  126. res->u.cq.op = FW_RI_RES_OP_WRITE;
  127. res->u.cq.iqid = cpu_to_be32(cq->cqid);
  128. res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
  129. V_FW_RI_RES_WR_IQANUS(0) |
  130. V_FW_RI_RES_WR_IQANUD(1) |
  131. F_FW_RI_RES_WR_IQANDST |
  132. V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
  133. res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
  134. F_FW_RI_RES_WR_IQDROPRSS |
  135. V_FW_RI_RES_WR_IQPCIECH(2) |
  136. V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
  137. F_FW_RI_RES_WR_IQO |
  138. V_FW_RI_RES_WR_IQESIZE(1));
  139. res->u.cq.iqsize = cpu_to_be16(cq->size);
  140. res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
  141. c4iw_init_wr_wait(&wr_wait);
  142. ret = c4iw_ofld_send(rdev, skb);
  143. if (ret)
  144. goto err4;
  145. PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
  146. wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
  147. if (!wr_wait.done) {
  148. printk(KERN_ERR MOD "Device %s not responding!\n",
  149. pci_name(rdev->lldi.pdev));
  150. rdev->flags = T4_FATAL_ERROR;
  151. ret = -EIO;
  152. } else
  153. ret = wr_wait.ret;
  154. if (ret)
  155. goto err4;
  156. cq->gen = 1;
  157. cq->gts = rdev->lldi.gts_reg;
  158. cq->rdev = rdev;
  159. if (user) {
  160. cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
  161. (cq->cqid << rdev->cqshift);
  162. cq->ugts &= PAGE_MASK;
  163. }
  164. return 0;
  165. err4:
  166. dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
  167. dma_unmap_addr(cq, mapping));
  168. err3:
  169. kfree(cq->sw_queue);
  170. err2:
  171. c4iw_put_cqid(rdev, cq->cqid, uctx);
  172. err1:
  173. return ret;
  174. }
  175. static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
  176. {
  177. struct t4_cqe cqe;
  178. PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
  179. wq, cq, cq->sw_cidx, cq->sw_pidx);
  180. memset(&cqe, 0, sizeof(cqe));
  181. cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
  182. V_CQE_OPCODE(FW_RI_SEND) |
  183. V_CQE_TYPE(0) |
  184. V_CQE_SWCQE(1) |
  185. V_CQE_QPID(wq->rq.qid));
  186. cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
  187. cq->sw_queue[cq->sw_pidx] = cqe;
  188. t4_swcq_produce(cq);
  189. }
  190. int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
  191. {
  192. int flushed = 0;
  193. int in_use = wq->rq.in_use - count;
  194. BUG_ON(in_use < 0);
  195. PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
  196. wq, cq, wq->rq.in_use, count);
  197. while (in_use--) {
  198. insert_recv_cqe(wq, cq);
  199. flushed++;
  200. }
  201. return flushed;
  202. }
  203. static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
  204. struct t4_swsqe *swcqe)
  205. {
  206. struct t4_cqe cqe;
  207. PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
  208. wq, cq, cq->sw_cidx, cq->sw_pidx);
  209. memset(&cqe, 0, sizeof(cqe));
  210. cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
  211. V_CQE_OPCODE(swcqe->opcode) |
  212. V_CQE_TYPE(1) |
  213. V_CQE_SWCQE(1) |
  214. V_CQE_QPID(wq->sq.qid));
  215. CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
  216. cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
  217. cq->sw_queue[cq->sw_pidx] = cqe;
  218. t4_swcq_produce(cq);
  219. }
  220. int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
  221. {
  222. int flushed = 0;
  223. struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
  224. int in_use = wq->sq.in_use - count;
  225. BUG_ON(in_use < 0);
  226. while (in_use--) {
  227. swsqe->signaled = 0;
  228. insert_sq_cqe(wq, cq, swsqe);
  229. swsqe++;
  230. if (swsqe == (wq->sq.sw_sq + wq->sq.size))
  231. swsqe = wq->sq.sw_sq;
  232. flushed++;
  233. }
  234. return flushed;
  235. }
  236. /*
  237. * Move all CQEs from the HWCQ into the SWCQ.
  238. */
  239. void c4iw_flush_hw_cq(struct t4_cq *cq)
  240. {
  241. struct t4_cqe *cqe = NULL, *swcqe;
  242. int ret;
  243. PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
  244. ret = t4_next_hw_cqe(cq, &cqe);
  245. while (!ret) {
  246. PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
  247. __func__, cq->cidx, cq->sw_pidx);
  248. swcqe = &cq->sw_queue[cq->sw_pidx];
  249. *swcqe = *cqe;
  250. swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
  251. t4_swcq_produce(cq);
  252. t4_hwcq_consume(cq);
  253. ret = t4_next_hw_cqe(cq, &cqe);
  254. }
  255. }
  256. static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
  257. {
  258. if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
  259. return 0;
  260. if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
  261. return 0;
  262. if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
  263. return 0;
  264. if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
  265. return 0;
  266. return 1;
  267. }
  268. void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
  269. {
  270. struct t4_cqe *cqe;
  271. u32 ptr;
  272. *count = 0;
  273. ptr = cq->sw_cidx;
  274. while (ptr != cq->sw_pidx) {
  275. cqe = &cq->sw_queue[ptr];
  276. if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
  277. wq->sq.oldest_read)) &&
  278. (CQE_QPID(cqe) == wq->sq.qid))
  279. (*count)++;
  280. if (++ptr == cq->size)
  281. ptr = 0;
  282. }
  283. PDBG("%s cq %p count %d\n", __func__, cq, *count);
  284. }
  285. void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
  286. {
  287. struct t4_cqe *cqe;
  288. u32 ptr;
  289. *count = 0;
  290. PDBG("%s count zero %d\n", __func__, *count);
  291. ptr = cq->sw_cidx;
  292. while (ptr != cq->sw_pidx) {
  293. cqe = &cq->sw_queue[ptr];
  294. if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
  295. (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
  296. (*count)++;
  297. if (++ptr == cq->size)
  298. ptr = 0;
  299. }
  300. PDBG("%s cq %p count %d\n", __func__, cq, *count);
  301. }
  302. static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
  303. {
  304. struct t4_swsqe *swsqe;
  305. u16 ptr = wq->sq.cidx;
  306. int count = wq->sq.in_use;
  307. int unsignaled = 0;
  308. swsqe = &wq->sq.sw_sq[ptr];
  309. while (count--)
  310. if (!swsqe->signaled) {
  311. if (++ptr == wq->sq.size)
  312. ptr = 0;
  313. swsqe = &wq->sq.sw_sq[ptr];
  314. unsignaled++;
  315. } else if (swsqe->complete) {
  316. /*
  317. * Insert this completed cqe into the swcq.
  318. */
  319. PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
  320. __func__, ptr, cq->sw_pidx);
  321. swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
  322. cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
  323. t4_swcq_produce(cq);
  324. swsqe->signaled = 0;
  325. wq->sq.in_use -= unsignaled;
  326. break;
  327. } else
  328. break;
  329. }
  330. static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
  331. struct t4_cqe *read_cqe)
  332. {
  333. read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
  334. read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
  335. read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
  336. V_CQE_SWCQE(SW_CQE(hw_cqe)) |
  337. V_CQE_OPCODE(FW_RI_READ_REQ) |
  338. V_CQE_TYPE(1));
  339. read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
  340. }
  341. /*
  342. * Return a ptr to the next read wr in the SWSQ or NULL.
  343. */
  344. static void advance_oldest_read(struct t4_wq *wq)
  345. {
  346. u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
  347. if (rptr == wq->sq.size)
  348. rptr = 0;
  349. while (rptr != wq->sq.pidx) {
  350. wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
  351. if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
  352. return;
  353. if (++rptr == wq->sq.size)
  354. rptr = 0;
  355. }
  356. wq->sq.oldest_read = NULL;
  357. }
  358. /*
  359. * poll_cq
  360. *
  361. * Caller must:
  362. * check the validity of the first CQE,
  363. * supply the wq assicated with the qpid.
  364. *
  365. * credit: cq credit to return to sge.
  366. * cqe_flushed: 1 iff the CQE is flushed.
  367. * cqe: copy of the polled CQE.
  368. *
  369. * return value:
  370. * 0 CQE returned ok.
  371. * -EAGAIN CQE skipped, try again.
  372. * -EOVERFLOW CQ overflow detected.
  373. */
  374. static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
  375. u8 *cqe_flushed, u64 *cookie, u32 *credit)
  376. {
  377. int ret = 0;
  378. struct t4_cqe *hw_cqe, read_cqe;
  379. *cqe_flushed = 0;
  380. *credit = 0;
  381. ret = t4_next_cqe(cq, &hw_cqe);
  382. if (ret)
  383. return ret;
  384. PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
  385. " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
  386. __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
  387. CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
  388. CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
  389. CQE_WRID_LOW(hw_cqe));
  390. /*
  391. * skip cqe's not affiliated with a QP.
  392. */
  393. if (wq == NULL) {
  394. ret = -EAGAIN;
  395. goto skip_cqe;
  396. }
  397. /*
  398. * Gotta tweak READ completions:
  399. * 1) the cqe doesn't contain the sq_wptr from the wr.
  400. * 2) opcode not reflected from the wr.
  401. * 3) read_len not reflected from the wr.
  402. * 4) cq_type is RQ_TYPE not SQ_TYPE.
  403. */
  404. if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
  405. /*
  406. * If this is an unsolicited read response, then the read
  407. * was generated by the kernel driver as part of peer-2-peer
  408. * connection setup. So ignore the completion.
  409. */
  410. if (!wq->sq.oldest_read) {
  411. if (CQE_STATUS(hw_cqe))
  412. t4_set_wq_in_error(wq);
  413. ret = -EAGAIN;
  414. goto skip_cqe;
  415. }
  416. /*
  417. * Don't write to the HWCQ, so create a new read req CQE
  418. * in local memory.
  419. */
  420. create_read_req_cqe(wq, hw_cqe, &read_cqe);
  421. hw_cqe = &read_cqe;
  422. advance_oldest_read(wq);
  423. }
  424. if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
  425. *cqe_flushed = t4_wq_in_error(wq);
  426. t4_set_wq_in_error(wq);
  427. goto proc_cqe;
  428. }
  429. /*
  430. * RECV completion.
  431. */
  432. if (RQ_TYPE(hw_cqe)) {
  433. /*
  434. * HW only validates 4 bits of MSN. So we must validate that
  435. * the MSN in the SEND is the next expected MSN. If its not,
  436. * then we complete this with T4_ERR_MSN and mark the wq in
  437. * error.
  438. */
  439. if (t4_rq_empty(wq)) {
  440. t4_set_wq_in_error(wq);
  441. ret = -EAGAIN;
  442. goto skip_cqe;
  443. }
  444. if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
  445. t4_set_wq_in_error(wq);
  446. hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
  447. goto proc_cqe;
  448. }
  449. goto proc_cqe;
  450. }
  451. /*
  452. * If we get here its a send completion.
  453. *
  454. * Handle out of order completion. These get stuffed
  455. * in the SW SQ. Then the SW SQ is walked to move any
  456. * now in-order completions into the SW CQ. This handles
  457. * 2 cases:
  458. * 1) reaping unsignaled WRs when the first subsequent
  459. * signaled WR is completed.
  460. * 2) out of order read completions.
  461. */
  462. if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
  463. struct t4_swsqe *swsqe;
  464. PDBG("%s out of order completion going in sw_sq at idx %u\n",
  465. __func__, CQE_WRID_SQ_IDX(hw_cqe));
  466. swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
  467. swsqe->cqe = *hw_cqe;
  468. swsqe->complete = 1;
  469. ret = -EAGAIN;
  470. goto flush_wq;
  471. }
  472. proc_cqe:
  473. *cqe = *hw_cqe;
  474. /*
  475. * Reap the associated WR(s) that are freed up with this
  476. * completion.
  477. */
  478. if (SQ_TYPE(hw_cqe)) {
  479. wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
  480. PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
  481. *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
  482. t4_sq_consume(wq);
  483. } else {
  484. PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
  485. *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
  486. BUG_ON(t4_rq_empty(wq));
  487. t4_rq_consume(wq);
  488. }
  489. flush_wq:
  490. /*
  491. * Flush any completed cqes that are now in-order.
  492. */
  493. flush_completed_wrs(wq, cq);
  494. skip_cqe:
  495. if (SW_CQE(hw_cqe)) {
  496. PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
  497. __func__, cq, cq->cqid, cq->sw_cidx);
  498. t4_swcq_consume(cq);
  499. } else {
  500. PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
  501. __func__, cq, cq->cqid, cq->cidx);
  502. t4_hwcq_consume(cq);
  503. }
  504. return ret;
  505. }
  506. /*
  507. * Get one cq entry from c4iw and map it to openib.
  508. *
  509. * Returns:
  510. * 0 cqe returned
  511. * -ENODATA EMPTY;
  512. * -EAGAIN caller must try again
  513. * any other -errno fatal error
  514. */
  515. static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
  516. {
  517. struct c4iw_qp *qhp = NULL;
  518. struct t4_cqe cqe = {0, 0}, *rd_cqe;
  519. struct t4_wq *wq;
  520. u32 credit = 0;
  521. u8 cqe_flushed;
  522. u64 cookie = 0;
  523. int ret;
  524. ret = t4_next_cqe(&chp->cq, &rd_cqe);
  525. if (ret)
  526. return ret;
  527. qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
  528. if (!qhp)
  529. wq = NULL;
  530. else {
  531. spin_lock(&qhp->lock);
  532. wq = &(qhp->wq);
  533. }
  534. ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
  535. if (ret)
  536. goto out;
  537. wc->wr_id = cookie;
  538. wc->qp = &qhp->ibqp;
  539. wc->vendor_err = CQE_STATUS(&cqe);
  540. wc->wc_flags = 0;
  541. PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
  542. "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
  543. CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
  544. CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
  545. if (CQE_TYPE(&cqe) == 0) {
  546. if (!CQE_STATUS(&cqe))
  547. wc->byte_len = CQE_LEN(&cqe);
  548. else
  549. wc->byte_len = 0;
  550. wc->opcode = IB_WC_RECV;
  551. if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
  552. CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
  553. wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
  554. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  555. }
  556. } else {
  557. switch (CQE_OPCODE(&cqe)) {
  558. case FW_RI_RDMA_WRITE:
  559. wc->opcode = IB_WC_RDMA_WRITE;
  560. break;
  561. case FW_RI_READ_REQ:
  562. wc->opcode = IB_WC_RDMA_READ;
  563. wc->byte_len = CQE_LEN(&cqe);
  564. break;
  565. case FW_RI_SEND_WITH_INV:
  566. case FW_RI_SEND_WITH_SE_INV:
  567. wc->opcode = IB_WC_SEND;
  568. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  569. break;
  570. case FW_RI_SEND:
  571. case FW_RI_SEND_WITH_SE:
  572. wc->opcode = IB_WC_SEND;
  573. break;
  574. case FW_RI_BIND_MW:
  575. wc->opcode = IB_WC_BIND_MW;
  576. break;
  577. case FW_RI_LOCAL_INV:
  578. wc->opcode = IB_WC_LOCAL_INV;
  579. break;
  580. case FW_RI_FAST_REGISTER:
  581. wc->opcode = IB_WC_FAST_REG_MR;
  582. break;
  583. default:
  584. printk(KERN_ERR MOD "Unexpected opcode %d "
  585. "in the CQE received for QPID=0x%0x\n",
  586. CQE_OPCODE(&cqe), CQE_QPID(&cqe));
  587. ret = -EINVAL;
  588. goto out;
  589. }
  590. }
  591. if (cqe_flushed)
  592. wc->status = IB_WC_WR_FLUSH_ERR;
  593. else {
  594. switch (CQE_STATUS(&cqe)) {
  595. case T4_ERR_SUCCESS:
  596. wc->status = IB_WC_SUCCESS;
  597. break;
  598. case T4_ERR_STAG:
  599. wc->status = IB_WC_LOC_ACCESS_ERR;
  600. break;
  601. case T4_ERR_PDID:
  602. wc->status = IB_WC_LOC_PROT_ERR;
  603. break;
  604. case T4_ERR_QPID:
  605. case T4_ERR_ACCESS:
  606. wc->status = IB_WC_LOC_ACCESS_ERR;
  607. break;
  608. case T4_ERR_WRAP:
  609. wc->status = IB_WC_GENERAL_ERR;
  610. break;
  611. case T4_ERR_BOUND:
  612. wc->status = IB_WC_LOC_LEN_ERR;
  613. break;
  614. case T4_ERR_INVALIDATE_SHARED_MR:
  615. case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
  616. wc->status = IB_WC_MW_BIND_ERR;
  617. break;
  618. case T4_ERR_CRC:
  619. case T4_ERR_MARKER:
  620. case T4_ERR_PDU_LEN_ERR:
  621. case T4_ERR_OUT_OF_RQE:
  622. case T4_ERR_DDP_VERSION:
  623. case T4_ERR_RDMA_VERSION:
  624. case T4_ERR_DDP_QUEUE_NUM:
  625. case T4_ERR_MSN:
  626. case T4_ERR_TBIT:
  627. case T4_ERR_MO:
  628. case T4_ERR_MSN_RANGE:
  629. case T4_ERR_IRD_OVERFLOW:
  630. case T4_ERR_OPCODE:
  631. wc->status = IB_WC_FATAL_ERR;
  632. break;
  633. case T4_ERR_SWFLUSH:
  634. wc->status = IB_WC_WR_FLUSH_ERR;
  635. break;
  636. default:
  637. printk(KERN_ERR MOD
  638. "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
  639. CQE_STATUS(&cqe), CQE_QPID(&cqe));
  640. ret = -EINVAL;
  641. }
  642. }
  643. out:
  644. if (wq)
  645. spin_unlock(&qhp->lock);
  646. return ret;
  647. }
  648. int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  649. {
  650. struct c4iw_cq *chp;
  651. unsigned long flags;
  652. int npolled;
  653. int err = 0;
  654. chp = to_c4iw_cq(ibcq);
  655. spin_lock_irqsave(&chp->lock, flags);
  656. for (npolled = 0; npolled < num_entries; ++npolled) {
  657. do {
  658. err = c4iw_poll_cq_one(chp, wc + npolled);
  659. } while (err == -EAGAIN);
  660. if (err)
  661. break;
  662. }
  663. spin_unlock_irqrestore(&chp->lock, flags);
  664. return !err || err == -ENODATA ? npolled : err;
  665. }
  666. int c4iw_destroy_cq(struct ib_cq *ib_cq)
  667. {
  668. struct c4iw_cq *chp;
  669. struct c4iw_ucontext *ucontext;
  670. PDBG("%s ib_cq %p\n", __func__, ib_cq);
  671. chp = to_c4iw_cq(ib_cq);
  672. remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
  673. atomic_dec(&chp->refcnt);
  674. wait_event(chp->wait, !atomic_read(&chp->refcnt));
  675. ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
  676. : NULL;
  677. destroy_cq(&chp->rhp->rdev, &chp->cq,
  678. ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
  679. kfree(chp);
  680. return 0;
  681. }
  682. struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
  683. int vector, struct ib_ucontext *ib_context,
  684. struct ib_udata *udata)
  685. {
  686. struct c4iw_dev *rhp;
  687. struct c4iw_cq *chp;
  688. struct c4iw_create_cq_resp uresp;
  689. struct c4iw_ucontext *ucontext = NULL;
  690. int ret;
  691. size_t memsize, hwentries;
  692. struct c4iw_mm_entry *mm, *mm2;
  693. PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
  694. rhp = to_c4iw_dev(ibdev);
  695. chp = kzalloc(sizeof(*chp), GFP_KERNEL);
  696. if (!chp)
  697. return ERR_PTR(-ENOMEM);
  698. if (ib_context)
  699. ucontext = to_c4iw_ucontext(ib_context);
  700. /* account for the status page. */
  701. entries++;
  702. /* IQ needs one extra entry to differentiate full vs empty. */
  703. entries++;
  704. /*
  705. * entries must be multiple of 16 for HW.
  706. */
  707. entries = roundup(entries, 16);
  708. /*
  709. * Make actual HW queue 2x to avoid cdix_inc overflows.
  710. */
  711. hwentries = entries * 2;
  712. /*
  713. * Make HW queue at least 64 entries so GTS updates aren't too
  714. * frequent.
  715. */
  716. if (hwentries < 64)
  717. hwentries = 64;
  718. memsize = hwentries * sizeof *chp->cq.queue;
  719. /*
  720. * memsize must be a multiple of the page size if its a user cq.
  721. */
  722. if (ucontext) {
  723. memsize = roundup(memsize, PAGE_SIZE);
  724. hwentries = memsize / sizeof *chp->cq.queue;
  725. }
  726. chp->cq.size = hwentries;
  727. chp->cq.memsize = memsize;
  728. ret = create_cq(&rhp->rdev, &chp->cq,
  729. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  730. if (ret)
  731. goto err1;
  732. chp->rhp = rhp;
  733. chp->cq.size--; /* status page */
  734. chp->ibcq.cqe = entries - 2;
  735. spin_lock_init(&chp->lock);
  736. atomic_set(&chp->refcnt, 1);
  737. init_waitqueue_head(&chp->wait);
  738. ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
  739. if (ret)
  740. goto err2;
  741. if (ucontext) {
  742. mm = kmalloc(sizeof *mm, GFP_KERNEL);
  743. if (!mm)
  744. goto err3;
  745. mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
  746. if (!mm2)
  747. goto err4;
  748. uresp.qid_mask = rhp->rdev.cqmask;
  749. uresp.cqid = chp->cq.cqid;
  750. uresp.size = chp->cq.size;
  751. uresp.memsize = chp->cq.memsize;
  752. spin_lock(&ucontext->mmap_lock);
  753. uresp.key = ucontext->key;
  754. ucontext->key += PAGE_SIZE;
  755. uresp.gts_key = ucontext->key;
  756. ucontext->key += PAGE_SIZE;
  757. spin_unlock(&ucontext->mmap_lock);
  758. ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
  759. if (ret)
  760. goto err5;
  761. mm->key = uresp.key;
  762. mm->addr = virt_to_phys(chp->cq.queue);
  763. mm->len = chp->cq.memsize;
  764. insert_mmap(ucontext, mm);
  765. mm2->key = uresp.gts_key;
  766. mm2->addr = chp->cq.ugts;
  767. mm2->len = PAGE_SIZE;
  768. insert_mmap(ucontext, mm2);
  769. }
  770. PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
  771. __func__, chp->cq.cqid, chp, chp->cq.size,
  772. chp->cq.memsize,
  773. (unsigned long long) chp->cq.dma_addr);
  774. return &chp->ibcq;
  775. err5:
  776. kfree(mm2);
  777. err4:
  778. kfree(mm);
  779. err3:
  780. remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
  781. err2:
  782. destroy_cq(&chp->rhp->rdev, &chp->cq,
  783. ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
  784. err1:
  785. kfree(chp);
  786. return ERR_PTR(ret);
  787. }
  788. int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
  789. {
  790. return -ENOSYS;
  791. }
  792. int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  793. {
  794. struct c4iw_cq *chp;
  795. int ret;
  796. unsigned long flag;
  797. chp = to_c4iw_cq(ibcq);
  798. spin_lock_irqsave(&chp->lock, flag);
  799. ret = t4_arm_cq(&chp->cq,
  800. (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
  801. spin_unlock_irqrestore(&chp->lock, flag);
  802. if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
  803. ret = 0;
  804. return ret;
  805. }