c2_cq.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4. * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
  5. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  6. * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
  7. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  8. *
  9. * This software is available to you under a choice of one of two
  10. * licenses. You may choose to be licensed under the terms of the GNU
  11. * General Public License (GPL) Version 2, available from the file
  12. * COPYING in the main directory of this source tree, or the
  13. * OpenIB.org BSD license below:
  14. *
  15. * Redistribution and use in source and binary forms, with or
  16. * without modification, are permitted provided that the following
  17. * conditions are met:
  18. *
  19. * - Redistributions of source code must retain the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer.
  22. *
  23. * - Redistributions in binary form must reproduce the above
  24. * copyright notice, this list of conditions and the following
  25. * disclaimer in the documentation and/or other materials
  26. * provided with the distribution.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  29. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  30. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  31. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  32. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  33. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  34. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  35. * SOFTWARE.
  36. *
  37. */
  38. #include "c2.h"
  39. #include "c2_vq.h"
  40. #include "c2_status.h"
  41. #define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
  42. static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
  43. {
  44. struct c2_cq *cq;
  45. unsigned long flags;
  46. spin_lock_irqsave(&c2dev->lock, flags);
  47. cq = c2dev->qptr_array[cqn];
  48. if (!cq) {
  49. spin_unlock_irqrestore(&c2dev->lock, flags);
  50. return NULL;
  51. }
  52. atomic_inc(&cq->refcount);
  53. spin_unlock_irqrestore(&c2dev->lock, flags);
  54. return cq;
  55. }
  56. static void c2_cq_put(struct c2_cq *cq)
  57. {
  58. if (atomic_dec_and_test(&cq->refcount))
  59. wake_up(&cq->wait);
  60. }
  61. void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
  62. {
  63. struct c2_cq *cq;
  64. cq = c2_cq_get(c2dev, mq_index);
  65. if (!cq) {
  66. printk("discarding events on destroyed CQN=%d\n", mq_index);
  67. return;
  68. }
  69. (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
  70. c2_cq_put(cq);
  71. }
  72. void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
  73. {
  74. struct c2_cq *cq;
  75. struct c2_mq *q;
  76. cq = c2_cq_get(c2dev, mq_index);
  77. if (!cq)
  78. return;
  79. spin_lock_irq(&cq->lock);
  80. q = &cq->mq;
  81. if (q && !c2_mq_empty(q)) {
  82. u16 priv = q->priv;
  83. struct c2wr_ce *msg;
  84. while (priv != be16_to_cpu(*q->shared)) {
  85. msg = (struct c2wr_ce *)
  86. (q->msg_pool.host + priv * q->msg_size);
  87. if (msg->qp_user_context == (u64) (unsigned long) qp) {
  88. msg->qp_user_context = (u64) 0;
  89. }
  90. priv = (priv + 1) % q->q_size;
  91. }
  92. }
  93. spin_unlock_irq(&cq->lock);
  94. c2_cq_put(cq);
  95. }
  96. static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
  97. {
  98. switch (status) {
  99. case C2_OK:
  100. return IB_WC_SUCCESS;
  101. case CCERR_FLUSHED:
  102. return IB_WC_WR_FLUSH_ERR;
  103. case CCERR_BASE_AND_BOUNDS_VIOLATION:
  104. return IB_WC_LOC_PROT_ERR;
  105. case CCERR_ACCESS_VIOLATION:
  106. return IB_WC_LOC_ACCESS_ERR;
  107. case CCERR_TOTAL_LENGTH_TOO_BIG:
  108. return IB_WC_LOC_LEN_ERR;
  109. case CCERR_INVALID_WINDOW:
  110. return IB_WC_MW_BIND_ERR;
  111. default:
  112. return IB_WC_GENERAL_ERR;
  113. }
  114. }
  115. static inline int c2_poll_one(struct c2_dev *c2dev,
  116. struct c2_cq *cq, struct ib_wc *entry)
  117. {
  118. struct c2wr_ce *ce;
  119. struct c2_qp *qp;
  120. int is_recv = 0;
  121. ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
  122. if (!ce) {
  123. return -EAGAIN;
  124. }
  125. /*
  126. * if the qp returned is null then this qp has already
  127. * been freed and we are unable process the completion.
  128. * try pulling the next message
  129. */
  130. while ((qp =
  131. (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
  132. c2_mq_free(&cq->mq);
  133. ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
  134. if (!ce)
  135. return -EAGAIN;
  136. }
  137. entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
  138. entry->wr_id = ce->hdr.context;
  139. entry->qp = &qp->ibqp;
  140. entry->wc_flags = 0;
  141. entry->slid = 0;
  142. entry->sl = 0;
  143. entry->src_qp = 0;
  144. entry->dlid_path_bits = 0;
  145. entry->pkey_index = 0;
  146. switch (c2_wr_get_id(ce)) {
  147. case C2_WR_TYPE_SEND:
  148. entry->opcode = IB_WC_SEND;
  149. break;
  150. case C2_WR_TYPE_RDMA_WRITE:
  151. entry->opcode = IB_WC_RDMA_WRITE;
  152. break;
  153. case C2_WR_TYPE_RDMA_READ:
  154. entry->opcode = IB_WC_RDMA_READ;
  155. break;
  156. case C2_WR_TYPE_BIND_MW:
  157. entry->opcode = IB_WC_BIND_MW;
  158. break;
  159. case C2_WR_TYPE_RECV:
  160. entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
  161. entry->opcode = IB_WC_RECV;
  162. is_recv = 1;
  163. break;
  164. default:
  165. break;
  166. }
  167. /* consume the WQEs */
  168. if (is_recv)
  169. c2_mq_lconsume(&qp->rq_mq, 1);
  170. else
  171. c2_mq_lconsume(&qp->sq_mq,
  172. be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
  173. /* free the message */
  174. c2_mq_free(&cq->mq);
  175. return 0;
  176. }
  177. int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
  178. {
  179. struct c2_dev *c2dev = to_c2dev(ibcq->device);
  180. struct c2_cq *cq = to_c2cq(ibcq);
  181. unsigned long flags;
  182. int npolled, err;
  183. spin_lock_irqsave(&cq->lock, flags);
  184. for (npolled = 0; npolled < num_entries; ++npolled) {
  185. err = c2_poll_one(c2dev, cq, entry + npolled);
  186. if (err)
  187. break;
  188. }
  189. spin_unlock_irqrestore(&cq->lock, flags);
  190. return npolled;
  191. }
  192. int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
  193. {
  194. struct c2_mq_shared __iomem *shared;
  195. struct c2_cq *cq;
  196. unsigned long flags;
  197. int ret = 0;
  198. cq = to_c2cq(ibcq);
  199. shared = cq->mq.peer;
  200. if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
  201. writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
  202. else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
  203. writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
  204. else
  205. return -EINVAL;
  206. writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
  207. /*
  208. * Now read back shared->armed to make the PCI
  209. * write synchronous. This is necessary for
  210. * correct cq notification semantics.
  211. */
  212. readb(&shared->armed);
  213. if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
  214. spin_lock_irqsave(&cq->lock, flags);
  215. ret = !c2_mq_empty(&cq->mq);
  216. spin_unlock_irqrestore(&cq->lock, flags);
  217. }
  218. return ret;
  219. }
  220. static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
  221. {
  222. dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
  223. mq->msg_pool.host, pci_unmap_addr(mq, mapping));
  224. }
  225. static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
  226. int msg_size)
  227. {
  228. u8 *pool_start;
  229. pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
  230. &mq->host_dma, GFP_KERNEL);
  231. if (!pool_start)
  232. return -ENOMEM;
  233. c2_mq_rep_init(mq,
  234. 0, /* index (currently unknown) */
  235. q_size,
  236. msg_size,
  237. pool_start,
  238. NULL, /* peer (currently unknown) */
  239. C2_MQ_HOST_TARGET);
  240. pci_unmap_addr_set(mq, mapping, mq->host_dma);
  241. return 0;
  242. }
  243. int c2_init_cq(struct c2_dev *c2dev, int entries,
  244. struct c2_ucontext *ctx, struct c2_cq *cq)
  245. {
  246. struct c2wr_cq_create_req wr;
  247. struct c2wr_cq_create_rep *reply;
  248. unsigned long peer_pa;
  249. struct c2_vq_req *vq_req;
  250. int err;
  251. might_sleep();
  252. cq->ibcq.cqe = entries - 1;
  253. cq->is_kernel = !ctx;
  254. /* Allocate a shared pointer */
  255. cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
  256. &cq->mq.shared_dma, GFP_KERNEL);
  257. if (!cq->mq.shared)
  258. return -ENOMEM;
  259. /* Allocate pages for the message pool */
  260. err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
  261. if (err)
  262. goto bail0;
  263. vq_req = vq_req_alloc(c2dev);
  264. if (!vq_req) {
  265. err = -ENOMEM;
  266. goto bail1;
  267. }
  268. memset(&wr, 0, sizeof(wr));
  269. c2_wr_set_id(&wr, CCWR_CQ_CREATE);
  270. wr.hdr.context = (unsigned long) vq_req;
  271. wr.rnic_handle = c2dev->adapter_handle;
  272. wr.msg_size = cpu_to_be32(cq->mq.msg_size);
  273. wr.depth = cpu_to_be32(cq->mq.q_size);
  274. wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
  275. wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
  276. wr.user_context = (u64) (unsigned long) (cq);
  277. vq_req_get(c2dev, vq_req);
  278. err = vq_send_wr(c2dev, (union c2wr *) & wr);
  279. if (err) {
  280. vq_req_put(c2dev, vq_req);
  281. goto bail2;
  282. }
  283. err = vq_wait_for_reply(c2dev, vq_req);
  284. if (err)
  285. goto bail2;
  286. reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
  287. if (!reply) {
  288. err = -ENOMEM;
  289. goto bail2;
  290. }
  291. if ((err = c2_errno(reply)) != 0)
  292. goto bail3;
  293. cq->adapter_handle = reply->cq_handle;
  294. cq->mq.index = be32_to_cpu(reply->mq_index);
  295. peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
  296. cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
  297. if (!cq->mq.peer) {
  298. err = -ENOMEM;
  299. goto bail3;
  300. }
  301. vq_repbuf_free(c2dev, reply);
  302. vq_req_free(c2dev, vq_req);
  303. spin_lock_init(&cq->lock);
  304. atomic_set(&cq->refcount, 1);
  305. init_waitqueue_head(&cq->wait);
  306. /*
  307. * Use the MQ index allocated by the adapter to
  308. * store the CQ in the qptr_array
  309. */
  310. cq->cqn = cq->mq.index;
  311. c2dev->qptr_array[cq->cqn] = cq;
  312. return 0;
  313. bail3:
  314. vq_repbuf_free(c2dev, reply);
  315. bail2:
  316. vq_req_free(c2dev, vq_req);
  317. bail1:
  318. c2_free_cq_buf(c2dev, &cq->mq);
  319. bail0:
  320. c2_free_mqsp(cq->mq.shared);
  321. return err;
  322. }
  323. void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
  324. {
  325. int err;
  326. struct c2_vq_req *vq_req;
  327. struct c2wr_cq_destroy_req wr;
  328. struct c2wr_cq_destroy_rep *reply;
  329. might_sleep();
  330. /* Clear CQ from the qptr array */
  331. spin_lock_irq(&c2dev->lock);
  332. c2dev->qptr_array[cq->mq.index] = NULL;
  333. atomic_dec(&cq->refcount);
  334. spin_unlock_irq(&c2dev->lock);
  335. wait_event(cq->wait, !atomic_read(&cq->refcount));
  336. vq_req = vq_req_alloc(c2dev);
  337. if (!vq_req) {
  338. goto bail0;
  339. }
  340. memset(&wr, 0, sizeof(wr));
  341. c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
  342. wr.hdr.context = (unsigned long) vq_req;
  343. wr.rnic_handle = c2dev->adapter_handle;
  344. wr.cq_handle = cq->adapter_handle;
  345. vq_req_get(c2dev, vq_req);
  346. err = vq_send_wr(c2dev, (union c2wr *) & wr);
  347. if (err) {
  348. vq_req_put(c2dev, vq_req);
  349. goto bail1;
  350. }
  351. err = vq_wait_for_reply(c2dev, vq_req);
  352. if (err)
  353. goto bail1;
  354. reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
  355. if (reply)
  356. vq_repbuf_free(c2dev, reply);
  357. bail1:
  358. vq_req_free(c2dev, vq_req);
  359. bail0:
  360. if (cq->is_kernel) {
  361. c2_free_cq_buf(c2dev, &cq->mq);
  362. }
  363. return;
  364. }