ipath_verbs_mcast.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. /*
  2. * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/rculist.h>
  34. #include <linux/sched.h>
  35. #include "ipath_verbs.h"
  36. /*
  37. * Global table of GID to attached QPs.
  38. * The table is global to all ipath devices since a send from one QP/device
  39. * needs to be locally routed to any locally attached QPs on the same
  40. * or different device.
  41. */
  42. static struct rb_root mcast_tree;
  43. static DEFINE_SPINLOCK(mcast_lock);
  44. /**
  45. * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
  46. * @qp: the QP to link
  47. */
  48. static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
  49. {
  50. struct ipath_mcast_qp *mqp;
  51. mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
  52. if (!mqp)
  53. goto bail;
  54. mqp->qp = qp;
  55. atomic_inc(&qp->refcount);
  56. bail:
  57. return mqp;
  58. }
  59. static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
  60. {
  61. struct ipath_qp *qp = mqp->qp;
  62. /* Notify ipath_destroy_qp() if it is waiting. */
  63. if (atomic_dec_and_test(&qp->refcount))
  64. wake_up(&qp->wait);
  65. kfree(mqp);
  66. }
  67. /**
  68. * ipath_mcast_alloc - allocate the multicast GID structure
  69. * @mgid: the multicast GID
  70. *
  71. * A list of QPs will be attached to this structure.
  72. */
  73. static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
  74. {
  75. struct ipath_mcast *mcast;
  76. mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
  77. if (!mcast)
  78. goto bail;
  79. mcast->mgid = *mgid;
  80. INIT_LIST_HEAD(&mcast->qp_list);
  81. init_waitqueue_head(&mcast->wait);
  82. atomic_set(&mcast->refcount, 0);
  83. mcast->n_attached = 0;
  84. bail:
  85. return mcast;
  86. }
  87. static void ipath_mcast_free(struct ipath_mcast *mcast)
  88. {
  89. struct ipath_mcast_qp *p, *tmp;
  90. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
  91. ipath_mcast_qp_free(p);
  92. kfree(mcast);
  93. }
  94. /**
  95. * ipath_mcast_find - search the global table for the given multicast GID
  96. * @mgid: the multicast GID to search for
  97. *
  98. * Returns NULL if not found.
  99. *
  100. * The caller is responsible for decrementing the reference count if found.
  101. */
  102. struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
  103. {
  104. struct rb_node *n;
  105. unsigned long flags;
  106. struct ipath_mcast *mcast;
  107. spin_lock_irqsave(&mcast_lock, flags);
  108. n = mcast_tree.rb_node;
  109. while (n) {
  110. int ret;
  111. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  112. ret = memcmp(mgid->raw, mcast->mgid.raw,
  113. sizeof(union ib_gid));
  114. if (ret < 0)
  115. n = n->rb_left;
  116. else if (ret > 0)
  117. n = n->rb_right;
  118. else {
  119. atomic_inc(&mcast->refcount);
  120. spin_unlock_irqrestore(&mcast_lock, flags);
  121. goto bail;
  122. }
  123. }
  124. spin_unlock_irqrestore(&mcast_lock, flags);
  125. mcast = NULL;
  126. bail:
  127. return mcast;
  128. }
  129. /**
  130. * ipath_mcast_add - insert mcast GID into table and attach QP struct
  131. * @mcast: the mcast GID table
  132. * @mqp: the QP to attach
  133. *
  134. * Return zero if both were added. Return EEXIST if the GID was already in
  135. * the table but the QP was added. Return ESRCH if the QP was already
  136. * attached and neither structure was added.
  137. */
  138. static int ipath_mcast_add(struct ipath_ibdev *dev,
  139. struct ipath_mcast *mcast,
  140. struct ipath_mcast_qp *mqp)
  141. {
  142. struct rb_node **n = &mcast_tree.rb_node;
  143. struct rb_node *pn = NULL;
  144. int ret;
  145. spin_lock_irq(&mcast_lock);
  146. while (*n) {
  147. struct ipath_mcast *tmcast;
  148. struct ipath_mcast_qp *p;
  149. pn = *n;
  150. tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
  151. ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
  152. sizeof(union ib_gid));
  153. if (ret < 0) {
  154. n = &pn->rb_left;
  155. continue;
  156. }
  157. if (ret > 0) {
  158. n = &pn->rb_right;
  159. continue;
  160. }
  161. /* Search the QP list to see if this is already there. */
  162. list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
  163. if (p->qp == mqp->qp) {
  164. ret = ESRCH;
  165. goto bail;
  166. }
  167. }
  168. if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
  169. ret = ENOMEM;
  170. goto bail;
  171. }
  172. tmcast->n_attached++;
  173. list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
  174. ret = EEXIST;
  175. goto bail;
  176. }
  177. spin_lock(&dev->n_mcast_grps_lock);
  178. if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
  179. spin_unlock(&dev->n_mcast_grps_lock);
  180. ret = ENOMEM;
  181. goto bail;
  182. }
  183. dev->n_mcast_grps_allocated++;
  184. spin_unlock(&dev->n_mcast_grps_lock);
  185. mcast->n_attached++;
  186. list_add_tail_rcu(&mqp->list, &mcast->qp_list);
  187. atomic_inc(&mcast->refcount);
  188. rb_link_node(&mcast->rb_node, pn, n);
  189. rb_insert_color(&mcast->rb_node, &mcast_tree);
  190. ret = 0;
  191. bail:
  192. spin_unlock_irq(&mcast_lock);
  193. return ret;
  194. }
  195. int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  196. {
  197. struct ipath_qp *qp = to_iqp(ibqp);
  198. struct ipath_ibdev *dev = to_idev(ibqp->device);
  199. struct ipath_mcast *mcast;
  200. struct ipath_mcast_qp *mqp;
  201. int ret;
  202. /*
  203. * Allocate data structures since its better to do this outside of
  204. * spin locks and it will most likely be needed.
  205. */
  206. mcast = ipath_mcast_alloc(gid);
  207. if (mcast == NULL) {
  208. ret = -ENOMEM;
  209. goto bail;
  210. }
  211. mqp = ipath_mcast_qp_alloc(qp);
  212. if (mqp == NULL) {
  213. ipath_mcast_free(mcast);
  214. ret = -ENOMEM;
  215. goto bail;
  216. }
  217. switch (ipath_mcast_add(dev, mcast, mqp)) {
  218. case ESRCH:
  219. /* Neither was used: can't attach the same QP twice. */
  220. ipath_mcast_qp_free(mqp);
  221. ipath_mcast_free(mcast);
  222. ret = -EINVAL;
  223. goto bail;
  224. case EEXIST: /* The mcast wasn't used */
  225. ipath_mcast_free(mcast);
  226. break;
  227. case ENOMEM:
  228. /* Exceeded the maximum number of mcast groups. */
  229. ipath_mcast_qp_free(mqp);
  230. ipath_mcast_free(mcast);
  231. ret = -ENOMEM;
  232. goto bail;
  233. default:
  234. break;
  235. }
  236. ret = 0;
  237. bail:
  238. return ret;
  239. }
  240. int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  241. {
  242. struct ipath_qp *qp = to_iqp(ibqp);
  243. struct ipath_ibdev *dev = to_idev(ibqp->device);
  244. struct ipath_mcast *mcast = NULL;
  245. struct ipath_mcast_qp *p, *tmp;
  246. struct rb_node *n;
  247. int last = 0;
  248. int ret;
  249. spin_lock_irq(&mcast_lock);
  250. /* Find the GID in the mcast table. */
  251. n = mcast_tree.rb_node;
  252. while (1) {
  253. if (n == NULL) {
  254. spin_unlock_irq(&mcast_lock);
  255. ret = -EINVAL;
  256. goto bail;
  257. }
  258. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  259. ret = memcmp(gid->raw, mcast->mgid.raw,
  260. sizeof(union ib_gid));
  261. if (ret < 0)
  262. n = n->rb_left;
  263. else if (ret > 0)
  264. n = n->rb_right;
  265. else
  266. break;
  267. }
  268. /* Search the QP list. */
  269. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
  270. if (p->qp != qp)
  271. continue;
  272. /*
  273. * We found it, so remove it, but don't poison the forward
  274. * link until we are sure there are no list walkers.
  275. */
  276. list_del_rcu(&p->list);
  277. mcast->n_attached--;
  278. /* If this was the last attached QP, remove the GID too. */
  279. if (list_empty(&mcast->qp_list)) {
  280. rb_erase(&mcast->rb_node, &mcast_tree);
  281. last = 1;
  282. }
  283. break;
  284. }
  285. spin_unlock_irq(&mcast_lock);
  286. if (p) {
  287. /*
  288. * Wait for any list walkers to finish before freeing the
  289. * list element.
  290. */
  291. wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
  292. ipath_mcast_qp_free(p);
  293. }
  294. if (last) {
  295. atomic_dec(&mcast->refcount);
  296. wait_event(mcast->wait, !atomic_read(&mcast->refcount));
  297. ipath_mcast_free(mcast);
  298. spin_lock_irq(&dev->n_mcast_grps_lock);
  299. dev->n_mcast_grps_allocated--;
  300. spin_unlock_irq(&dev->n_mcast_grps_lock);
  301. }
  302. ret = 0;
  303. bail:
  304. return ret;
  305. }
  306. int ipath_mcast_tree_empty(void)
  307. {
  308. return mcast_tree.rb_node == NULL;
  309. }