ipath_cq.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. /*
  2. * Copyright (c) 2006 QLogic, Inc. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/err.h>
  34. #include <linux/vmalloc.h>
  35. #include "ipath_verbs.h"
  36. /**
  37. * ipath_cq_enter - add a new entry to the completion queue
  38. * @cq: completion queue
  39. * @entry: work completion entry to add
  40. * @sig: true if @entry is a solicitated entry
  41. *
  42. * This may be called with one of the qp->s_lock or qp->r_rq.lock held.
  43. */
  44. void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
  45. {
  46. unsigned long flags;
  47. u32 next;
  48. spin_lock_irqsave(&cq->lock, flags);
  49. if (cq->head == cq->ibcq.cqe)
  50. next = 0;
  51. else
  52. next = cq->head + 1;
  53. if (unlikely(next == cq->tail)) {
  54. spin_unlock_irqrestore(&cq->lock, flags);
  55. if (cq->ibcq.event_handler) {
  56. struct ib_event ev;
  57. ev.device = cq->ibcq.device;
  58. ev.element.cq = &cq->ibcq;
  59. ev.event = IB_EVENT_CQ_ERR;
  60. cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
  61. }
  62. return;
  63. }
  64. cq->queue[cq->head] = *entry;
  65. cq->head = next;
  66. if (cq->notify == IB_CQ_NEXT_COMP ||
  67. (cq->notify == IB_CQ_SOLICITED && solicited)) {
  68. cq->notify = IB_CQ_NONE;
  69. cq->triggered++;
  70. /*
  71. * This will cause send_complete() to be called in
  72. * another thread.
  73. */
  74. tasklet_hi_schedule(&cq->comptask);
  75. }
  76. spin_unlock_irqrestore(&cq->lock, flags);
  77. if (entry->status != IB_WC_SUCCESS)
  78. to_idev(cq->ibcq.device)->n_wqe_errs++;
  79. }
  80. /**
  81. * ipath_poll_cq - poll for work completion entries
  82. * @ibcq: the completion queue to poll
  83. * @num_entries: the maximum number of entries to return
  84. * @entry: pointer to array where work completions are placed
  85. *
  86. * Returns the number of completion entries polled.
  87. *
  88. * This may be called from interrupt context. Also called by ib_poll_cq()
  89. * in the generic verbs code.
  90. */
  91. int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
  92. {
  93. struct ipath_cq *cq = to_icq(ibcq);
  94. unsigned long flags;
  95. int npolled;
  96. spin_lock_irqsave(&cq->lock, flags);
  97. for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
  98. if (cq->tail == cq->head)
  99. break;
  100. *entry = cq->queue[cq->tail];
  101. if (cq->tail == cq->ibcq.cqe)
  102. cq->tail = 0;
  103. else
  104. cq->tail++;
  105. }
  106. spin_unlock_irqrestore(&cq->lock, flags);
  107. return npolled;
  108. }
  109. static void send_complete(unsigned long data)
  110. {
  111. struct ipath_cq *cq = (struct ipath_cq *)data;
  112. /*
  113. * The completion handler will most likely rearm the notification
  114. * and poll for all pending entries. If a new completion entry
  115. * is added while we are in this routine, tasklet_hi_schedule()
  116. * won't call us again until we return so we check triggered to
  117. * see if we need to call the handler again.
  118. */
  119. for (;;) {
  120. u8 triggered = cq->triggered;
  121. cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  122. if (cq->triggered == triggered)
  123. return;
  124. }
  125. }
  126. /**
  127. * ipath_create_cq - create a completion queue
  128. * @ibdev: the device this completion queue is attached to
  129. * @entries: the minimum size of the completion queue
  130. * @context: unused by the InfiniPath driver
  131. * @udata: unused by the InfiniPath driver
  132. *
  133. * Returns a pointer to the completion queue or negative errno values
  134. * for failure.
  135. *
  136. * Called by ib_create_cq() in the generic verbs code.
  137. */
  138. struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
  139. struct ib_ucontext *context,
  140. struct ib_udata *udata)
  141. {
  142. struct ipath_ibdev *dev = to_idev(ibdev);
  143. struct ipath_cq *cq;
  144. struct ib_wc *wc;
  145. struct ib_cq *ret;
  146. if (entries > ib_ipath_max_cqes) {
  147. ret = ERR_PTR(-EINVAL);
  148. goto bail;
  149. }
  150. if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
  151. ret = ERR_PTR(-ENOMEM);
  152. goto bail;
  153. }
  154. /*
  155. * Need to use vmalloc() if we want to support large #s of
  156. * entries.
  157. */
  158. cq = kmalloc(sizeof(*cq), GFP_KERNEL);
  159. if (!cq) {
  160. ret = ERR_PTR(-ENOMEM);
  161. goto bail;
  162. }
  163. /*
  164. * Need to use vmalloc() if we want to support large #s of entries.
  165. */
  166. wc = vmalloc(sizeof(*wc) * (entries + 1));
  167. if (!wc) {
  168. kfree(cq);
  169. ret = ERR_PTR(-ENOMEM);
  170. goto bail;
  171. }
  172. /*
  173. * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
  174. * The number of entries should be >= the number requested or return
  175. * an error.
  176. */
  177. cq->ibcq.cqe = entries;
  178. cq->notify = IB_CQ_NONE;
  179. cq->triggered = 0;
  180. spin_lock_init(&cq->lock);
  181. tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
  182. cq->head = 0;
  183. cq->tail = 0;
  184. cq->queue = wc;
  185. ret = &cq->ibcq;
  186. dev->n_cqs_allocated++;
  187. bail:
  188. return ret;
  189. }
  190. /**
  191. * ipath_destroy_cq - destroy a completion queue
  192. * @ibcq: the completion queue to destroy.
  193. *
  194. * Returns 0 for success.
  195. *
  196. * Called by ib_destroy_cq() in the generic verbs code.
  197. */
  198. int ipath_destroy_cq(struct ib_cq *ibcq)
  199. {
  200. struct ipath_ibdev *dev = to_idev(ibcq->device);
  201. struct ipath_cq *cq = to_icq(ibcq);
  202. tasklet_kill(&cq->comptask);
  203. dev->n_cqs_allocated--;
  204. vfree(cq->queue);
  205. kfree(cq);
  206. return 0;
  207. }
  208. /**
  209. * ipath_req_notify_cq - change the notification type for a completion queue
  210. * @ibcq: the completion queue
  211. * @notify: the type of notification to request
  212. *
  213. * Returns 0 for success.
  214. *
  215. * This may be called from interrupt context. Also called by
  216. * ib_req_notify_cq() in the generic verbs code.
  217. */
  218. int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
  219. {
  220. struct ipath_cq *cq = to_icq(ibcq);
  221. unsigned long flags;
  222. spin_lock_irqsave(&cq->lock, flags);
  223. /*
  224. * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
  225. * any other transitions.
  226. */
  227. if (cq->notify != IB_CQ_NEXT_COMP)
  228. cq->notify = notify;
  229. spin_unlock_irqrestore(&cq->lock, flags);
  230. return 0;
  231. }
  232. int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
  233. {
  234. struct ipath_cq *cq = to_icq(ibcq);
  235. struct ib_wc *wc, *old_wc;
  236. u32 n;
  237. int ret;
  238. /*
  239. * Need to use vmalloc() if we want to support large #s of entries.
  240. */
  241. wc = vmalloc(sizeof(*wc) * (cqe + 1));
  242. if (!wc) {
  243. ret = -ENOMEM;
  244. goto bail;
  245. }
  246. spin_lock_irq(&cq->lock);
  247. if (cq->head < cq->tail)
  248. n = cq->ibcq.cqe + 1 + cq->head - cq->tail;
  249. else
  250. n = cq->head - cq->tail;
  251. if (unlikely((u32)cqe < n)) {
  252. spin_unlock_irq(&cq->lock);
  253. vfree(wc);
  254. ret = -EOVERFLOW;
  255. goto bail;
  256. }
  257. for (n = 0; cq->tail != cq->head; n++) {
  258. wc[n] = cq->queue[cq->tail];
  259. if (cq->tail == cq->ibcq.cqe)
  260. cq->tail = 0;
  261. else
  262. cq->tail++;
  263. }
  264. cq->ibcq.cqe = cqe;
  265. cq->head = n;
  266. cq->tail = 0;
  267. old_wc = cq->queue;
  268. cq->queue = wc;
  269. spin_unlock_irq(&cq->lock);
  270. vfree(old_wc);
  271. ret = 0;
  272. bail:
  273. return ret;
  274. }