qib_cq.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/err.h>
  34. #include <linux/slab.h>
  35. #include <linux/vmalloc.h>
  36. #include "qib_verbs.h"
  37. /**
  38. * qib_cq_enter - add a new entry to the completion queue
  39. * @cq: completion queue
  40. * @entry: work completion entry to add
  41. * @sig: true if @entry is a solicitated entry
  42. *
  43. * This may be called with qp->s_lock held.
  44. */
  45. void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
  46. {
  47. struct qib_cq_wc *wc;
  48. unsigned long flags;
  49. u32 head;
  50. u32 next;
  51. spin_lock_irqsave(&cq->lock, flags);
  52. /*
  53. * Note that the head pointer might be writable by user processes.
  54. * Take care to verify it is a sane value.
  55. */
  56. wc = cq->queue;
  57. head = wc->head;
  58. if (head >= (unsigned) cq->ibcq.cqe) {
  59. head = cq->ibcq.cqe;
  60. next = 0;
  61. } else
  62. next = head + 1;
  63. if (unlikely(next == wc->tail)) {
  64. spin_unlock_irqrestore(&cq->lock, flags);
  65. if (cq->ibcq.event_handler) {
  66. struct ib_event ev;
  67. ev.device = cq->ibcq.device;
  68. ev.element.cq = &cq->ibcq;
  69. ev.event = IB_EVENT_CQ_ERR;
  70. cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
  71. }
  72. return;
  73. }
  74. if (cq->ip) {
  75. wc->uqueue[head].wr_id = entry->wr_id;
  76. wc->uqueue[head].status = entry->status;
  77. wc->uqueue[head].opcode = entry->opcode;
  78. wc->uqueue[head].vendor_err = entry->vendor_err;
  79. wc->uqueue[head].byte_len = entry->byte_len;
  80. wc->uqueue[head].ex.imm_data =
  81. (__u32 __force)entry->ex.imm_data;
  82. wc->uqueue[head].qp_num = entry->qp->qp_num;
  83. wc->uqueue[head].src_qp = entry->src_qp;
  84. wc->uqueue[head].wc_flags = entry->wc_flags;
  85. wc->uqueue[head].pkey_index = entry->pkey_index;
  86. wc->uqueue[head].slid = entry->slid;
  87. wc->uqueue[head].sl = entry->sl;
  88. wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
  89. wc->uqueue[head].port_num = entry->port_num;
  90. /* Make sure entry is written before the head index. */
  91. smp_wmb();
  92. } else
  93. wc->kqueue[head] = *entry;
  94. wc->head = next;
  95. if (cq->notify == IB_CQ_NEXT_COMP ||
  96. (cq->notify == IB_CQ_SOLICITED && solicited)) {
  97. cq->notify = IB_CQ_NONE;
  98. cq->triggered++;
  99. /*
  100. * This will cause send_complete() to be called in
  101. * another thread.
  102. */
  103. queue_work(qib_cq_wq, &cq->comptask);
  104. }
  105. spin_unlock_irqrestore(&cq->lock, flags);
  106. }
  107. /**
  108. * qib_poll_cq - poll for work completion entries
  109. * @ibcq: the completion queue to poll
  110. * @num_entries: the maximum number of entries to return
  111. * @entry: pointer to array where work completions are placed
  112. *
  113. * Returns the number of completion entries polled.
  114. *
  115. * This may be called from interrupt context. Also called by ib_poll_cq()
  116. * in the generic verbs code.
  117. */
  118. int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
  119. {
  120. struct qib_cq *cq = to_icq(ibcq);
  121. struct qib_cq_wc *wc;
  122. unsigned long flags;
  123. int npolled;
  124. u32 tail;
  125. /* The kernel can only poll a kernel completion queue */
  126. if (cq->ip) {
  127. npolled = -EINVAL;
  128. goto bail;
  129. }
  130. spin_lock_irqsave(&cq->lock, flags);
  131. wc = cq->queue;
  132. tail = wc->tail;
  133. if (tail > (u32) cq->ibcq.cqe)
  134. tail = (u32) cq->ibcq.cqe;
  135. for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
  136. if (tail == wc->head)
  137. break;
  138. /* The kernel doesn't need a RMB since it has the lock. */
  139. *entry = wc->kqueue[tail];
  140. if (tail >= cq->ibcq.cqe)
  141. tail = 0;
  142. else
  143. tail++;
  144. }
  145. wc->tail = tail;
  146. spin_unlock_irqrestore(&cq->lock, flags);
  147. bail:
  148. return npolled;
  149. }
  150. static void send_complete(struct work_struct *work)
  151. {
  152. struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
  153. /*
  154. * The completion handler will most likely rearm the notification
  155. * and poll for all pending entries. If a new completion entry
  156. * is added while we are in this routine, queue_work()
  157. * won't call us again until we return so we check triggered to
  158. * see if we need to call the handler again.
  159. */
  160. for (;;) {
  161. u8 triggered = cq->triggered;
  162. /*
  163. * IPoIB connected mode assumes the callback is from a
  164. * soft IRQ. We simulate this by blocking "bottom halves".
  165. * See the implementation for ipoib_cm_handle_tx_wc(),
  166. * netif_tx_lock_bh() and netif_tx_lock().
  167. */
  168. local_bh_disable();
  169. cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  170. local_bh_enable();
  171. if (cq->triggered == triggered)
  172. return;
  173. }
  174. }
  175. /**
  176. * qib_create_cq - create a completion queue
  177. * @ibdev: the device this completion queue is attached to
  178. * @entries: the minimum size of the completion queue
  179. * @context: unused by the QLogic_IB driver
  180. * @udata: user data for libibverbs.so
  181. *
  182. * Returns a pointer to the completion queue or negative errno values
  183. * for failure.
  184. *
  185. * Called by ib_create_cq() in the generic verbs code.
  186. */
  187. struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
  188. int comp_vector, struct ib_ucontext *context,
  189. struct ib_udata *udata)
  190. {
  191. struct qib_ibdev *dev = to_idev(ibdev);
  192. struct qib_cq *cq;
  193. struct qib_cq_wc *wc;
  194. struct ib_cq *ret;
  195. u32 sz;
  196. if (entries < 1 || entries > ib_qib_max_cqes) {
  197. ret = ERR_PTR(-EINVAL);
  198. goto done;
  199. }
  200. /* Allocate the completion queue structure. */
  201. cq = kmalloc(sizeof(*cq), GFP_KERNEL);
  202. if (!cq) {
  203. ret = ERR_PTR(-ENOMEM);
  204. goto done;
  205. }
  206. /*
  207. * Allocate the completion queue entries and head/tail pointers.
  208. * This is allocated separately so that it can be resized and
  209. * also mapped into user space.
  210. * We need to use vmalloc() in order to support mmap and large
  211. * numbers of entries.
  212. */
  213. sz = sizeof(*wc);
  214. if (udata && udata->outlen >= sizeof(__u64))
  215. sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
  216. else
  217. sz += sizeof(struct ib_wc) * (entries + 1);
  218. wc = vmalloc_user(sz);
  219. if (!wc) {
  220. ret = ERR_PTR(-ENOMEM);
  221. goto bail_cq;
  222. }
  223. /*
  224. * Return the address of the WC as the offset to mmap.
  225. * See qib_mmap() for details.
  226. */
  227. if (udata && udata->outlen >= sizeof(__u64)) {
  228. int err;
  229. cq->ip = qib_create_mmap_info(dev, sz, context, wc);
  230. if (!cq->ip) {
  231. ret = ERR_PTR(-ENOMEM);
  232. goto bail_wc;
  233. }
  234. err = ib_copy_to_udata(udata, &cq->ip->offset,
  235. sizeof(cq->ip->offset));
  236. if (err) {
  237. ret = ERR_PTR(err);
  238. goto bail_ip;
  239. }
  240. } else
  241. cq->ip = NULL;
  242. spin_lock(&dev->n_cqs_lock);
  243. if (dev->n_cqs_allocated == ib_qib_max_cqs) {
  244. spin_unlock(&dev->n_cqs_lock);
  245. ret = ERR_PTR(-ENOMEM);
  246. goto bail_ip;
  247. }
  248. dev->n_cqs_allocated++;
  249. spin_unlock(&dev->n_cqs_lock);
  250. if (cq->ip) {
  251. spin_lock_irq(&dev->pending_lock);
  252. list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
  253. spin_unlock_irq(&dev->pending_lock);
  254. }
  255. /*
  256. * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
  257. * The number of entries should be >= the number requested or return
  258. * an error.
  259. */
  260. cq->ibcq.cqe = entries;
  261. cq->notify = IB_CQ_NONE;
  262. cq->triggered = 0;
  263. spin_lock_init(&cq->lock);
  264. INIT_WORK(&cq->comptask, send_complete);
  265. wc->head = 0;
  266. wc->tail = 0;
  267. cq->queue = wc;
  268. ret = &cq->ibcq;
  269. goto done;
  270. bail_ip:
  271. kfree(cq->ip);
  272. bail_wc:
  273. vfree(wc);
  274. bail_cq:
  275. kfree(cq);
  276. done:
  277. return ret;
  278. }
  279. /**
  280. * qib_destroy_cq - destroy a completion queue
  281. * @ibcq: the completion queue to destroy.
  282. *
  283. * Returns 0 for success.
  284. *
  285. * Called by ib_destroy_cq() in the generic verbs code.
  286. */
  287. int qib_destroy_cq(struct ib_cq *ibcq)
  288. {
  289. struct qib_ibdev *dev = to_idev(ibcq->device);
  290. struct qib_cq *cq = to_icq(ibcq);
  291. flush_work(&cq->comptask);
  292. spin_lock(&dev->n_cqs_lock);
  293. dev->n_cqs_allocated--;
  294. spin_unlock(&dev->n_cqs_lock);
  295. if (cq->ip)
  296. kref_put(&cq->ip->ref, qib_release_mmap_info);
  297. else
  298. vfree(cq->queue);
  299. kfree(cq);
  300. return 0;
  301. }
  302. /**
  303. * qib_req_notify_cq - change the notification type for a completion queue
  304. * @ibcq: the completion queue
  305. * @notify_flags: the type of notification to request
  306. *
  307. * Returns 0 for success.
  308. *
  309. * This may be called from interrupt context. Also called by
  310. * ib_req_notify_cq() in the generic verbs code.
  311. */
  312. int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
  313. {
  314. struct qib_cq *cq = to_icq(ibcq);
  315. unsigned long flags;
  316. int ret = 0;
  317. spin_lock_irqsave(&cq->lock, flags);
  318. /*
  319. * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
  320. * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
  321. */
  322. if (cq->notify != IB_CQ_NEXT_COMP)
  323. cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
  324. if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
  325. cq->queue->head != cq->queue->tail)
  326. ret = 1;
  327. spin_unlock_irqrestore(&cq->lock, flags);
  328. return ret;
  329. }
  330. /**
  331. * qib_resize_cq - change the size of the CQ
  332. * @ibcq: the completion queue
  333. *
  334. * Returns 0 for success.
  335. */
  336. int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  337. {
  338. struct qib_cq *cq = to_icq(ibcq);
  339. struct qib_cq_wc *old_wc;
  340. struct qib_cq_wc *wc;
  341. u32 head, tail, n;
  342. int ret;
  343. u32 sz;
  344. if (cqe < 1 || cqe > ib_qib_max_cqes) {
  345. ret = -EINVAL;
  346. goto bail;
  347. }
  348. /*
  349. * Need to use vmalloc() if we want to support large #s of entries.
  350. */
  351. sz = sizeof(*wc);
  352. if (udata && udata->outlen >= sizeof(__u64))
  353. sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
  354. else
  355. sz += sizeof(struct ib_wc) * (cqe + 1);
  356. wc = vmalloc_user(sz);
  357. if (!wc) {
  358. ret = -ENOMEM;
  359. goto bail;
  360. }
  361. /* Check that we can write the offset to mmap. */
  362. if (udata && udata->outlen >= sizeof(__u64)) {
  363. __u64 offset = 0;
  364. ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
  365. if (ret)
  366. goto bail_free;
  367. }
  368. spin_lock_irq(&cq->lock);
  369. /*
  370. * Make sure head and tail are sane since they
  371. * might be user writable.
  372. */
  373. old_wc = cq->queue;
  374. head = old_wc->head;
  375. if (head > (u32) cq->ibcq.cqe)
  376. head = (u32) cq->ibcq.cqe;
  377. tail = old_wc->tail;
  378. if (tail > (u32) cq->ibcq.cqe)
  379. tail = (u32) cq->ibcq.cqe;
  380. if (head < tail)
  381. n = cq->ibcq.cqe + 1 + head - tail;
  382. else
  383. n = head - tail;
  384. if (unlikely((u32)cqe < n)) {
  385. ret = -EINVAL;
  386. goto bail_unlock;
  387. }
  388. for (n = 0; tail != head; n++) {
  389. if (cq->ip)
  390. wc->uqueue[n] = old_wc->uqueue[tail];
  391. else
  392. wc->kqueue[n] = old_wc->kqueue[tail];
  393. if (tail == (u32) cq->ibcq.cqe)
  394. tail = 0;
  395. else
  396. tail++;
  397. }
  398. cq->ibcq.cqe = cqe;
  399. wc->head = n;
  400. wc->tail = 0;
  401. cq->queue = wc;
  402. spin_unlock_irq(&cq->lock);
  403. vfree(old_wc);
  404. if (cq->ip) {
  405. struct qib_ibdev *dev = to_idev(ibcq->device);
  406. struct qib_mmap_info *ip = cq->ip;
  407. qib_update_mmap_info(dev, ip, sz, wc);
  408. /*
  409. * Return the offset to mmap.
  410. * See qib_mmap() for details.
  411. */
  412. if (udata && udata->outlen >= sizeof(__u64)) {
  413. ret = ib_copy_to_udata(udata, &ip->offset,
  414. sizeof(ip->offset));
  415. if (ret)
  416. goto bail;
  417. }
  418. spin_lock_irq(&dev->pending_lock);
  419. if (list_empty(&ip->pending_mmaps))
  420. list_add(&ip->pending_mmaps, &dev->pending_mmaps);
  421. spin_unlock_irq(&dev->pending_lock);
  422. }
  423. ret = 0;
  424. goto bail;
  425. bail_unlock:
  426. spin_unlock_irq(&cq->lock);
  427. bail_free:
  428. vfree(wc);
  429. bail:
  430. return ret;
  431. }