ipath_cq.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /*
  2. * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/err.h>
  34. #include <linux/vmalloc.h>
  35. #include "ipath_verbs.h"
  36. /**
  37. * ipath_cq_enter - add a new entry to the completion queue
  38. * @cq: completion queue
  39. * @entry: work completion entry to add
  40. * @sig: true if @entry is a solicitated entry
  41. *
  42. * This may be called with qp->s_lock held.
  43. */
  44. void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
  45. {
  46. struct ipath_cq_wc *wc;
  47. unsigned long flags;
  48. u32 head;
  49. u32 next;
  50. spin_lock_irqsave(&cq->lock, flags);
  51. /*
  52. * Note that the head pointer might be writable by user processes.
  53. * Take care to verify it is a sane value.
  54. */
  55. wc = cq->queue;
  56. head = wc->head;
  57. if (head >= (unsigned) cq->ibcq.cqe) {
  58. head = cq->ibcq.cqe;
  59. next = 0;
  60. } else
  61. next = head + 1;
  62. if (unlikely(next == wc->tail)) {
  63. spin_unlock_irqrestore(&cq->lock, flags);
  64. if (cq->ibcq.event_handler) {
  65. struct ib_event ev;
  66. ev.device = cq->ibcq.device;
  67. ev.element.cq = &cq->ibcq;
  68. ev.event = IB_EVENT_CQ_ERR;
  69. cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
  70. }
  71. return;
  72. }
  73. wc->queue[head].wr_id = entry->wr_id;
  74. wc->queue[head].status = entry->status;
  75. wc->queue[head].opcode = entry->opcode;
  76. wc->queue[head].vendor_err = entry->vendor_err;
  77. wc->queue[head].byte_len = entry->byte_len;
  78. wc->queue[head].imm_data = (__u32 __force)entry->imm_data;
  79. wc->queue[head].qp_num = entry->qp->qp_num;
  80. wc->queue[head].src_qp = entry->src_qp;
  81. wc->queue[head].wc_flags = entry->wc_flags;
  82. wc->queue[head].pkey_index = entry->pkey_index;
  83. wc->queue[head].slid = entry->slid;
  84. wc->queue[head].sl = entry->sl;
  85. wc->queue[head].dlid_path_bits = entry->dlid_path_bits;
  86. wc->queue[head].port_num = entry->port_num;
  87. wc->head = next;
  88. if (cq->notify == IB_CQ_NEXT_COMP ||
  89. (cq->notify == IB_CQ_SOLICITED && solicited)) {
  90. cq->notify = IB_CQ_NONE;
  91. cq->triggered++;
  92. /*
  93. * This will cause send_complete() to be called in
  94. * another thread.
  95. */
  96. tasklet_hi_schedule(&cq->comptask);
  97. }
  98. spin_unlock_irqrestore(&cq->lock, flags);
  99. if (entry->status != IB_WC_SUCCESS)
  100. to_idev(cq->ibcq.device)->n_wqe_errs++;
  101. }
  102. /**
  103. * ipath_poll_cq - poll for work completion entries
  104. * @ibcq: the completion queue to poll
  105. * @num_entries: the maximum number of entries to return
  106. * @entry: pointer to array where work completions are placed
  107. *
  108. * Returns the number of completion entries polled.
  109. *
  110. * This may be called from interrupt context. Also called by ib_poll_cq()
  111. * in the generic verbs code.
  112. */
  113. int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
  114. {
  115. struct ipath_cq *cq = to_icq(ibcq);
  116. struct ipath_cq_wc *wc;
  117. unsigned long flags;
  118. int npolled;
  119. u32 tail;
  120. spin_lock_irqsave(&cq->lock, flags);
  121. wc = cq->queue;
  122. tail = wc->tail;
  123. if (tail > (u32) cq->ibcq.cqe)
  124. tail = (u32) cq->ibcq.cqe;
  125. for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
  126. struct ipath_qp *qp;
  127. if (tail == wc->head)
  128. break;
  129. qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table,
  130. wc->queue[tail].qp_num);
  131. entry->qp = &qp->ibqp;
  132. if (atomic_dec_and_test(&qp->refcount))
  133. wake_up(&qp->wait);
  134. entry->wr_id = wc->queue[tail].wr_id;
  135. entry->status = wc->queue[tail].status;
  136. entry->opcode = wc->queue[tail].opcode;
  137. entry->vendor_err = wc->queue[tail].vendor_err;
  138. entry->byte_len = wc->queue[tail].byte_len;
  139. entry->imm_data = wc->queue[tail].imm_data;
  140. entry->src_qp = wc->queue[tail].src_qp;
  141. entry->wc_flags = wc->queue[tail].wc_flags;
  142. entry->pkey_index = wc->queue[tail].pkey_index;
  143. entry->slid = wc->queue[tail].slid;
  144. entry->sl = wc->queue[tail].sl;
  145. entry->dlid_path_bits = wc->queue[tail].dlid_path_bits;
  146. entry->port_num = wc->queue[tail].port_num;
  147. if (tail >= cq->ibcq.cqe)
  148. tail = 0;
  149. else
  150. tail++;
  151. }
  152. wc->tail = tail;
  153. spin_unlock_irqrestore(&cq->lock, flags);
  154. return npolled;
  155. }
  156. static void send_complete(unsigned long data)
  157. {
  158. struct ipath_cq *cq = (struct ipath_cq *)data;
  159. /*
  160. * The completion handler will most likely rearm the notification
  161. * and poll for all pending entries. If a new completion entry
  162. * is added while we are in this routine, tasklet_hi_schedule()
  163. * won't call us again until we return so we check triggered to
  164. * see if we need to call the handler again.
  165. */
  166. for (;;) {
  167. u8 triggered = cq->triggered;
  168. cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  169. if (cq->triggered == triggered)
  170. return;
  171. }
  172. }
  173. /**
  174. * ipath_create_cq - create a completion queue
  175. * @ibdev: the device this completion queue is attached to
  176. * @entries: the minimum size of the completion queue
  177. * @context: unused by the InfiniPath driver
  178. * @udata: unused by the InfiniPath driver
  179. *
  180. * Returns a pointer to the completion queue or negative errno values
  181. * for failure.
  182. *
  183. * Called by ib_create_cq() in the generic verbs code.
  184. */
  185. struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
  186. struct ib_ucontext *context,
  187. struct ib_udata *udata)
  188. {
  189. struct ipath_ibdev *dev = to_idev(ibdev);
  190. struct ipath_cq *cq;
  191. struct ipath_cq_wc *wc;
  192. struct ib_cq *ret;
  193. if (entries < 1 || entries > ib_ipath_max_cqes) {
  194. ret = ERR_PTR(-EINVAL);
  195. goto done;
  196. }
  197. /* Allocate the completion queue structure. */
  198. cq = kmalloc(sizeof(*cq), GFP_KERNEL);
  199. if (!cq) {
  200. ret = ERR_PTR(-ENOMEM);
  201. goto done;
  202. }
  203. /*
  204. * Allocate the completion queue entries and head/tail pointers.
  205. * This is allocated separately so that it can be resized and
  206. * also mapped into user space.
  207. * We need to use vmalloc() in order to support mmap and large
  208. * numbers of entries.
  209. */
  210. wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries);
  211. if (!wc) {
  212. ret = ERR_PTR(-ENOMEM);
  213. goto bail_cq;
  214. }
  215. /*
  216. * Return the address of the WC as the offset to mmap.
  217. * See ipath_mmap() for details.
  218. */
  219. if (udata && udata->outlen >= sizeof(__u64)) {
  220. int err;
  221. u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;
  222. cq->ip = ipath_create_mmap_info(dev, s, context, wc);
  223. if (!cq->ip) {
  224. ret = ERR_PTR(-ENOMEM);
  225. goto bail_wc;
  226. }
  227. err = ib_copy_to_udata(udata, &cq->ip->offset,
  228. sizeof(cq->ip->offset));
  229. if (err) {
  230. ret = ERR_PTR(err);
  231. goto bail_ip;
  232. }
  233. } else
  234. cq->ip = NULL;
  235. spin_lock(&dev->n_cqs_lock);
  236. if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
  237. spin_unlock(&dev->n_cqs_lock);
  238. ret = ERR_PTR(-ENOMEM);
  239. goto bail_ip;
  240. }
  241. dev->n_cqs_allocated++;
  242. spin_unlock(&dev->n_cqs_lock);
  243. if (cq->ip) {
  244. spin_lock_irq(&dev->pending_lock);
  245. list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
  246. spin_unlock_irq(&dev->pending_lock);
  247. }
  248. /*
  249. * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
  250. * The number of entries should be >= the number requested or return
  251. * an error.
  252. */
  253. cq->ibcq.cqe = entries;
  254. cq->notify = IB_CQ_NONE;
  255. cq->triggered = 0;
  256. spin_lock_init(&cq->lock);
  257. tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
  258. wc->head = 0;
  259. wc->tail = 0;
  260. cq->queue = wc;
  261. ret = &cq->ibcq;
  262. goto done;
  263. bail_ip:
  264. kfree(cq->ip);
  265. bail_wc:
  266. vfree(wc);
  267. bail_cq:
  268. kfree(cq);
  269. done:
  270. return ret;
  271. }
  272. /**
  273. * ipath_destroy_cq - destroy a completion queue
  274. * @ibcq: the completion queue to destroy.
  275. *
  276. * Returns 0 for success.
  277. *
  278. * Called by ib_destroy_cq() in the generic verbs code.
  279. */
  280. int ipath_destroy_cq(struct ib_cq *ibcq)
  281. {
  282. struct ipath_ibdev *dev = to_idev(ibcq->device);
  283. struct ipath_cq *cq = to_icq(ibcq);
  284. tasklet_kill(&cq->comptask);
  285. spin_lock(&dev->n_cqs_lock);
  286. dev->n_cqs_allocated--;
  287. spin_unlock(&dev->n_cqs_lock);
  288. if (cq->ip)
  289. kref_put(&cq->ip->ref, ipath_release_mmap_info);
  290. else
  291. vfree(cq->queue);
  292. kfree(cq);
  293. return 0;
  294. }
  295. /**
  296. * ipath_req_notify_cq - change the notification type for a completion queue
  297. * @ibcq: the completion queue
  298. * @notify_flags: the type of notification to request
  299. *
  300. * Returns 0 for success.
  301. *
  302. * This may be called from interrupt context. Also called by
  303. * ib_req_notify_cq() in the generic verbs code.
  304. */
  305. int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
  306. {
  307. struct ipath_cq *cq = to_icq(ibcq);
  308. unsigned long flags;
  309. int ret = 0;
  310. spin_lock_irqsave(&cq->lock, flags);
  311. /*
  312. * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
  313. * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
  314. */
  315. if (cq->notify != IB_CQ_NEXT_COMP)
  316. cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
  317. if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
  318. cq->queue->head != cq->queue->tail)
  319. ret = 1;
  320. spin_unlock_irqrestore(&cq->lock, flags);
  321. return ret;
  322. }
  323. /**
  324. * ipath_resize_cq - change the size of the CQ
  325. * @ibcq: the completion queue
  326. *
  327. * Returns 0 for success.
  328. */
  329. int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  330. {
  331. struct ipath_cq *cq = to_icq(ibcq);
  332. struct ipath_cq_wc *old_wc;
  333. struct ipath_cq_wc *wc;
  334. u32 head, tail, n;
  335. int ret;
  336. if (cqe < 1 || cqe > ib_ipath_max_cqes) {
  337. ret = -EINVAL;
  338. goto bail;
  339. }
  340. /*
  341. * Need to use vmalloc() if we want to support large #s of entries.
  342. */
  343. wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * cqe);
  344. if (!wc) {
  345. ret = -ENOMEM;
  346. goto bail;
  347. }
  348. /*
  349. * Return the address of the WC as the offset to mmap.
  350. * See ipath_mmap() for details.
  351. */
  352. if (udata && udata->outlen >= sizeof(__u64)) {
  353. __u64 offset = (__u64) wc;
  354. ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
  355. if (ret)
  356. goto bail;
  357. }
  358. spin_lock_irq(&cq->lock);
  359. /*
  360. * Make sure head and tail are sane since they
  361. * might be user writable.
  362. */
  363. old_wc = cq->queue;
  364. head = old_wc->head;
  365. if (head > (u32) cq->ibcq.cqe)
  366. head = (u32) cq->ibcq.cqe;
  367. tail = old_wc->tail;
  368. if (tail > (u32) cq->ibcq.cqe)
  369. tail = (u32) cq->ibcq.cqe;
  370. if (head < tail)
  371. n = cq->ibcq.cqe + 1 + head - tail;
  372. else
  373. n = head - tail;
  374. if (unlikely((u32)cqe < n)) {
  375. spin_unlock_irq(&cq->lock);
  376. vfree(wc);
  377. ret = -EOVERFLOW;
  378. goto bail;
  379. }
  380. for (n = 0; tail != head; n++) {
  381. wc->queue[n] = old_wc->queue[tail];
  382. if (tail == (u32) cq->ibcq.cqe)
  383. tail = 0;
  384. else
  385. tail++;
  386. }
  387. cq->ibcq.cqe = cqe;
  388. wc->head = n;
  389. wc->tail = 0;
  390. cq->queue = wc;
  391. spin_unlock_irq(&cq->lock);
  392. vfree(old_wc);
  393. if (cq->ip) {
  394. struct ipath_ibdev *dev = to_idev(ibcq->device);
  395. struct ipath_mmap_info *ip = cq->ip;
  396. u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe;
  397. ipath_update_mmap_info(dev, ip, s, wc);
  398. spin_lock_irq(&dev->pending_lock);
  399. if (list_empty(&ip->pending_mmaps))
  400. list_add(&ip->pending_mmaps, &dev->pending_mmaps);
  401. spin_unlock_irq(&dev->pending_lock);
  402. }
  403. ret = 0;
  404. bail:
  405. return ret;
  406. }