ipath_cq.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/err.h>
  33. #include <linux/vmalloc.h>
  34. #include "ipath_verbs.h"
  35. /**
  36. * ipath_cq_enter - add a new entry to the completion queue
  37. * @cq: completion queue
  38. * @entry: work completion entry to add
  39. * @sig: true if @entry is a solicitated entry
  40. *
  41. * This may be called with one of the qp->s_lock or qp->r_rq.lock held.
  42. */
  43. void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
  44. {
  45. unsigned long flags;
  46. u32 next;
  47. spin_lock_irqsave(&cq->lock, flags);
  48. if (cq->head == cq->ibcq.cqe)
  49. next = 0;
  50. else
  51. next = cq->head + 1;
  52. if (unlikely(next == cq->tail)) {
  53. spin_unlock_irqrestore(&cq->lock, flags);
  54. if (cq->ibcq.event_handler) {
  55. struct ib_event ev;
  56. ev.device = cq->ibcq.device;
  57. ev.element.cq = &cq->ibcq;
  58. ev.event = IB_EVENT_CQ_ERR;
  59. cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
  60. }
  61. return;
  62. }
  63. cq->queue[cq->head] = *entry;
  64. cq->head = next;
  65. if (cq->notify == IB_CQ_NEXT_COMP ||
  66. (cq->notify == IB_CQ_SOLICITED && solicited)) {
  67. cq->notify = IB_CQ_NONE;
  68. cq->triggered++;
  69. /*
  70. * This will cause send_complete() to be called in
  71. * another thread.
  72. */
  73. tasklet_hi_schedule(&cq->comptask);
  74. }
  75. spin_unlock_irqrestore(&cq->lock, flags);
  76. if (entry->status != IB_WC_SUCCESS)
  77. to_idev(cq->ibcq.device)->n_wqe_errs++;
  78. }
  79. /**
  80. * ipath_poll_cq - poll for work completion entries
  81. * @ibcq: the completion queue to poll
  82. * @num_entries: the maximum number of entries to return
  83. * @entry: pointer to array where work completions are placed
  84. *
  85. * Returns the number of completion entries polled.
  86. *
  87. * This may be called from interrupt context. Also called by ib_poll_cq()
  88. * in the generic verbs code.
  89. */
  90. int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
  91. {
  92. struct ipath_cq *cq = to_icq(ibcq);
  93. unsigned long flags;
  94. int npolled;
  95. spin_lock_irqsave(&cq->lock, flags);
  96. for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
  97. if (cq->tail == cq->head)
  98. break;
  99. *entry = cq->queue[cq->tail];
  100. if (cq->tail == cq->ibcq.cqe)
  101. cq->tail = 0;
  102. else
  103. cq->tail++;
  104. }
  105. spin_unlock_irqrestore(&cq->lock, flags);
  106. return npolled;
  107. }
  108. static void send_complete(unsigned long data)
  109. {
  110. struct ipath_cq *cq = (struct ipath_cq *)data;
  111. /*
  112. * The completion handler will most likely rearm the notification
  113. * and poll for all pending entries. If a new completion entry
  114. * is added while we are in this routine, tasklet_hi_schedule()
  115. * won't call us again until we return so we check triggered to
  116. * see if we need to call the handler again.
  117. */
  118. for (;;) {
  119. u8 triggered = cq->triggered;
  120. cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  121. if (cq->triggered == triggered)
  122. return;
  123. }
  124. }
  125. /**
  126. * ipath_create_cq - create a completion queue
  127. * @ibdev: the device this completion queue is attached to
  128. * @entries: the minimum size of the completion queue
  129. * @context: unused by the InfiniPath driver
  130. * @udata: unused by the InfiniPath driver
  131. *
  132. * Returns a pointer to the completion queue or negative errno values
  133. * for failure.
  134. *
  135. * Called by ib_create_cq() in the generic verbs code.
  136. */
  137. struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
  138. struct ib_ucontext *context,
  139. struct ib_udata *udata)
  140. {
  141. struct ipath_cq *cq;
  142. struct ib_wc *wc;
  143. struct ib_cq *ret;
  144. /*
  145. * Need to use vmalloc() if we want to support large #s of
  146. * entries.
  147. */
  148. cq = kmalloc(sizeof(*cq), GFP_KERNEL);
  149. if (!cq) {
  150. ret = ERR_PTR(-ENOMEM);
  151. goto bail;
  152. }
  153. /*
  154. * Need to use vmalloc() if we want to support large #s of entries.
  155. */
  156. wc = vmalloc(sizeof(*wc) * (entries + 1));
  157. if (!wc) {
  158. kfree(cq);
  159. ret = ERR_PTR(-ENOMEM);
  160. goto bail;
  161. }
  162. /*
  163. * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
  164. * The number of entries should be >= the number requested or return
  165. * an error.
  166. */
  167. cq->ibcq.cqe = entries;
  168. cq->notify = IB_CQ_NONE;
  169. cq->triggered = 0;
  170. spin_lock_init(&cq->lock);
  171. tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
  172. cq->head = 0;
  173. cq->tail = 0;
  174. cq->queue = wc;
  175. ret = &cq->ibcq;
  176. bail:
  177. return ret;
  178. }
  179. /**
  180. * ipath_destroy_cq - destroy a completion queue
  181. * @ibcq: the completion queue to destroy.
  182. *
  183. * Returns 0 for success.
  184. *
  185. * Called by ib_destroy_cq() in the generic verbs code.
  186. */
  187. int ipath_destroy_cq(struct ib_cq *ibcq)
  188. {
  189. struct ipath_cq *cq = to_icq(ibcq);
  190. tasklet_kill(&cq->comptask);
  191. vfree(cq->queue);
  192. kfree(cq);
  193. return 0;
  194. }
  195. /**
  196. * ipath_req_notify_cq - change the notification type for a completion queue
  197. * @ibcq: the completion queue
  198. * @notify: the type of notification to request
  199. *
  200. * Returns 0 for success.
  201. *
  202. * This may be called from interrupt context. Also called by
  203. * ib_req_notify_cq() in the generic verbs code.
  204. */
  205. int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
  206. {
  207. struct ipath_cq *cq = to_icq(ibcq);
  208. unsigned long flags;
  209. spin_lock_irqsave(&cq->lock, flags);
  210. /*
  211. * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
  212. * any other transitions.
  213. */
  214. if (cq->notify != IB_CQ_NEXT_COMP)
  215. cq->notify = notify;
  216. spin_unlock_irqrestore(&cq->lock, flags);
  217. return 0;
  218. }
  219. int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  220. {
  221. struct ipath_cq *cq = to_icq(ibcq);
  222. struct ib_wc *wc, *old_wc;
  223. u32 n;
  224. int ret;
  225. /*
  226. * Need to use vmalloc() if we want to support large #s of entries.
  227. */
  228. wc = vmalloc(sizeof(*wc) * (cqe + 1));
  229. if (!wc) {
  230. ret = -ENOMEM;
  231. goto bail;
  232. }
  233. spin_lock_irq(&cq->lock);
  234. if (cq->head < cq->tail)
  235. n = cq->ibcq.cqe + 1 + cq->head - cq->tail;
  236. else
  237. n = cq->head - cq->tail;
  238. if (unlikely((u32)cqe < n)) {
  239. spin_unlock_irq(&cq->lock);
  240. vfree(wc);
  241. ret = -EOVERFLOW;
  242. goto bail;
  243. }
  244. for (n = 0; cq->tail != cq->head; n++) {
  245. wc[n] = cq->queue[cq->tail];
  246. if (cq->tail == cq->ibcq.cqe)
  247. cq->tail = 0;
  248. else
  249. cq->tail++;
  250. }
  251. cq->ibcq.cqe = cqe;
  252. cq->head = n;
  253. cq->tail = 0;
  254. old_wc = cq->queue;
  255. cq->queue = wc;
  256. spin_unlock_irq(&cq->lock);
  257. vfree(old_wc);
  258. ret = 0;
  259. bail:
  260. return ret;
  261. }